Skip to content
GitLab
Projects
Groups
Snippets
/
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in
Toggle navigation
Menu
Open sidebar
Malthe Kjær Bisbo
GOFEE
Commits
ba29303e
Commit
ba29303e
authored
Dec 23, 2019
by
Malthe Kjær Bisbo
Browse files
gpr done
parent
cb9ee645
Changes
9
Hide whitespace changes
Inline
Side-by-side
surrogate/__pycache__/custom_calculators.cpython-36.pyc
0 → 100644
View file @
ba29303e
File added
surrogate/__pycache__/gpr.cpython-36.pyc
View file @
ba29303e
No preview for this file type
surrogate/__pycache__/kernel.cpython-36.pyc
View file @
ba29303e
No preview for this file type
surrogate/custom_calculators.py
0 → 100644
View file @
ba29303e
import
numpy
as
np
from
scipy.spatial.distance
import
euclidean
from
ase.calculators.calculator
import
Calculator
from
time
import
time
from
ase.calculators.singlepoint
import
SinglePointCalculator
class
krr_calculator
(
Calculator
):
implemented_properties
=
[
'energy'
,
'forces'
]
default_parameters
=
{}
def
__init__
(
self
,
MLmodel
,
label
=
'MLmodel'
,
kappa
=
None
,
**
kwargs
):
self
.
MLmodel
=
MLmodel
self
.
kappa
=
kappa
Calculator
.
__init__
(
self
,
**
kwargs
)
def
calculate
(
self
,
atoms
=
None
,
properties
=
[
'energy'
,
'forces'
],
system_changes
=
[
'positions'
]):
Calculator
.
calculate
(
self
,
atoms
,
properties
,
system_changes
)
if
'energy'
in
properties
:
if
self
.
kappa
is
None
:
E
=
self
.
MLmodel
.
predict_energy
(
atoms
,
return_error
=
False
)
else
:
energy
,
error
,
_
=
self
.
MLmodel
.
predict_energy
(
atoms
,
return_error
=
True
)
E
=
energy
-
self
.
kappa
*
error
self
.
results
[
'energy'
]
=
E
if
'forces'
in
properties
:
if
self
.
kappa
is
None
:
F
=
self
.
MLmodel
.
predict_force
(
atoms
).
reshape
((
-
1
,
3
))
else
:
F
,
F_error
=
self
.
MLmodel
.
predict_force
(
atoms
,
return_error
=
True
)
F
=
(
F
+
self
.
kappa
*
F_error
).
reshape
((
-
1
,
3
))
self
.
results
[
'forces'
]
=
F
class
doubleLJ_calculator
(
Calculator
):
implemented_properties
=
[
'energy'
,
'forces'
]
default_parameters
=
{}
def
__init__
(
self
,
eps
=
1.8
,
r0
=
1.1
,
sigma
=
np
.
sqrt
(
0.02
),
label
=
'doubleLJ'
,
noZ
=
False
,
**
kwargs
):
self
.
eps
=
eps
self
.
r0
=
r0
self
.
sigma
=
sigma
self
.
noZ
=
noZ
Calculator
.
__init__
(
self
,
**
kwargs
)
def
calculate
(
self
,
atoms
=
None
,
properties
=
[
'energy'
,
'forces'
],
system_changes
=
[
'positions'
]):
Calculator
.
calculate
(
self
,
atoms
,
properties
,
system_changes
)
if
'energy'
in
properties
:
self
.
results
[
'energy'
]
=
self
.
energy
(
atoms
)
if
'forces'
in
properties
:
F
=
self
.
forces
(
atoms
)
if
self
.
noZ
:
F
[:,
-
1
]
=
0
self
.
results
[
'forces'
]
=
F
def
energy
(
self
,
a
):
x
=
a
.
get_positions
()
E
=
0
for
i
,
xi
in
enumerate
(
x
):
for
j
,
xj
in
enumerate
(
x
):
if
j
>
i
:
r
=
euclidean
(
xi
,
xj
)
E1
=
1
/
r
**
12
-
2
/
r
**
6
E2
=
-
self
.
eps
*
np
.
exp
(
-
(
r
-
self
.
r0
)
**
2
/
(
2
*
self
.
sigma
**
2
))
E
+=
E1
+
E2
return
E
def
forces
(
self
,
a
):
x
=
a
.
get_positions
()
Natoms
,
dim
=
x
.
shape
dE
=
np
.
zeros
((
Natoms
,
dim
))
for
i
,
xi
in
enumerate
(
x
):
for
j
,
xj
in
enumerate
(
x
):
r
=
euclidean
(
xi
,
xj
)
if
j
!=
i
:
rijVec
=
xi
-
xj
dE1
=
12
*
rijVec
*
(
-
1
/
r
**
14
+
1
/
r
**
8
)
dE2
=
self
.
eps
*
(
r
-
self
.
r0
)
*
rijVec
/
(
r
*
self
.
sigma
**
2
)
*
np
.
exp
(
-
(
r
-
self
.
r0
)
**
2
/
(
2
*
self
.
sigma
**
2
))
dE
[
i
]
+=
dE1
+
dE2
return
-
dE
surrogate/gpr.py
View file @
ba29303e
...
...
@@ -166,10 +166,11 @@ class gpr():
K0
=
self
.
kernel
.
kernel_value
(
x
,
x
)
g
=
K0
-
np
.
dot
(
k
.
T
,
vk
)
assert
g
>=
0
F_std
=
-
1
/
np
.
sqrt
(
g
)
*
np
.
dot
(
k_ddr
.
T
,
vk
)
return
F
,
F_std
F_std
=
1
/
np
.
sqrt
(
g
)
*
np
.
dot
(
k_ddr
.
T
,
vk
)
#F_std = -1/np.sqrt(g) * np.dot(k_ddr.T, vk)
return
F
.
reshape
((
-
1
,
3
)),
F_std
.
reshape
(
-
1
,
3
)
else
:
return
F
return
F
.
reshape
(
-
1
,
3
)
def
set_bias
(
self
):
self
.
bias
=
np
.
mean
(
self
.
memory
.
energies
-
self
.
memory
.
prior_values
)
...
...
@@ -232,7 +233,7 @@ class gpr():
lml
=
-
0.5
*
np
.
dot
(
self
.
Y
,
alpha
)
lml
-=
np
.
sum
(
np
.
log
(
np
.
diag
(
L
)))
lml
-=
K
.
shape
[
0
]
/
2
*
np
.
log
(
2
*
np
.
pi
)
if
eval_gradient
:
# Equation (5.9) in GPML
K_inv
=
cho_solve
((
L
,
True
),
np
.
eye
(
K
.
shape
[
0
]))
...
...
@@ -252,6 +253,7 @@ class gpr():
options
=
{
'gtol'
:
1e-2
,
'ftol'
:
1e-4
})
if
not
result
.
success
:
warnings
.
warn
(
f
"L_BFGS_B terminated with state:
{
result
.
message
}
"
)
print
(
'lml:'
,
result
.
fun
)
return
result
.
x
,
result
.
fun
def
numerical_neg_lml
(
self
,
dx
=
1e-4
):
...
...
@@ -265,13 +267,41 @@ class gpr():
theta_up
[
i
]
+=
0.5
*
dx
theta_down
[
i
]
-=
0.5
*
dx
d_theta
=
np
.
exp
(
theta_up
[
i
])
-
np
.
exp
(
theta_down
[
i
])
lml_up
=
self
.
neg_log_marginal_likelihood
(
theta_up
,
eval_gradient
=
False
)
lml_down
=
self
.
neg_log_marginal_likelihood
(
theta_down
,
eval_gradient
=
False
)
#lml_ddTheta[i] = (lml_up - lml_down)/dx
lml_ddTheta
[
i
]
=
(
lml_up
-
lml_down
)
/
d_theta
lml_ddTheta
[
i
]
=
(
lml_up
-
lml_down
)
/
dx
return
lml_ddTheta
def
numerical_forces
(
self
,
a
,
dx
=
1e-4
,
eval_std
=
False
):
Na
,
Nd
=
a
.
positions
.
shape
if
not
eval_std
:
F
=
np
.
zeros
((
Na
,
Nd
))
for
ia
in
range
(
Na
):
for
idim
in
range
(
Nd
):
a_up
=
a
.
copy
()
a_down
=
a
.
copy
()
a_up
.
positions
[
ia
,
idim
]
+=
0.5
*
dx
a_down
.
positions
[
ia
,
idim
]
-=
0.5
*
dx
E_up
=
self
.
predict_energy
(
a_up
)
E_down
=
self
.
predict_energy
(
a_down
)
F
[
ia
,
idim
]
=
-
(
E_up
-
E_down
)
/
dx
return
F
else
:
F
=
np
.
zeros
((
Na
,
Nd
))
Fstd
=
np
.
zeros
((
Na
,
Nd
))
for
ia
in
range
(
Na
):
for
idim
in
range
(
Nd
):
a_up
=
a
.
copy
()
a_down
=
a
.
copy
()
a_up
.
positions
[
ia
,
idim
]
+=
0.5
*
dx
a_down
.
positions
[
ia
,
idim
]
-=
0.5
*
dx
E_up
,
Estd_up
=
self
.
predict_energy
(
a_up
,
eval_std
=
True
)
E_down
,
Estd_down
=
self
.
predict_energy
(
a_down
,
eval_std
=
True
)
F
[
ia
,
idim
]
=
-
(
E_up
-
E_down
)
/
dx
Fstd
[
ia
,
idim
]
=
(
Estd_up
-
Estd_down
)
/
dx
return
F
,
Fstd
def
get_calculator
():
pass
...
...
surrogate/kernel.py
View file @
ba29303e
...
...
@@ -54,6 +54,9 @@ class kernel(ABC):
return
f_jac
def
numerical_hyperparameter_gradient
(
self
,
X
,
dx
=
1.e-5
):
"""Calculates the numerical derivative of the kernel with respect to the
log transformed hyperparameters.
"""
N_data
=
X
.
shape
[
0
]
theta
=
np
.
copy
(
self
.
theta
)
N_hyper
=
len
(
theta
)
...
...
@@ -63,6 +66,12 @@ class kernel(ABC):
theta_down
=
np
.
copy
(
theta
)
theta_up
[
i
]
+=
0.5
*
dx
theta_down
[
i
]
-=
0.5
*
dx
#theta_up[i] = np.log(np.exp(theta_up[i]) + 0.5*dx)
#theta_down[i] = np.log(np.exp(theta_down[i]) - 0.5*dx)
#dTheta = theta_up[i] - theta_down[i]
#dTheta = np.log(np.exp(theta_up[i]) + 0.5*dx) - np.log(np.exp(theta_down[i]) - 0.5*dx)
#print('dTheta:', dTheta)
self
.
theta
=
theta_up
K_up
=
self
(
X
,
eval_gradient
=
False
)
...
...
@@ -76,12 +85,24 @@ class gauss_kernel(kernel):
def
__init__
(
self
,
amplitude
=
10.0
,
length_scale
=
10.0
,
amplitude_bounds
=
(
1e0
,
1e3
),
length_scale_bounds
=
(
1e-1
,
1e1
)):
self
.
amplitude
=
amplitude
self
.
length_scale
=
length_scale
self
.
amplitude_bounds
=
amplitude_bounds
self
.
length_scale_bounds
=
length_scale_bounds
self
.
_theta_bounds
=
[
amplitude_bounds
,
length_scale_bounds
]
def
__call__
(
self
,
X
,
Y
,
eval_gradient
=
False
):
pass
self
.
theta_bounds
=
np
.
array
([
amplitude_bounds
,
length_scale_bounds
])
def
__call__
(
self
,
X
,
eval_gradient
=
False
):
if
np
.
ndim
(
X
)
==
1
:
X
=
X
.
reshape
((
1
,
-
1
))
d
=
cdist
(
X
/
self
.
length_scale
,
X
/
self
.
length_scale
,
metric
=
'sqeuclidean'
)
K
=
self
.
amplitude
*
np
.
exp
(
-
0.5
*
d
)
if
eval_gradient
:
K_gradient
=
self
.
kernel_hyperparameter_gradient
(
X
)
return
K
,
K_gradient
else
:
return
K
def
kernel
(
self
,
X
,
Y
):
if
np
.
ndim
(
X
)
==
1
:
...
...
@@ -92,17 +113,13 @@ class gauss_kernel(kernel):
Y
/
self
.
length_scale
,
metric
=
'sqeuclidean'
)
K
=
self
.
amplitude
*
np
.
exp
(
-
0.5
*
d
)
return
K
def
kernel_value
(
self
,
x
,
y
):
d
=
cdist
(
x
.
reshape
(
1
,
-
1
)
/
self
.
length_scale
,
y
.
reshape
(
1
,
-
1
)
/
self
.
length_scale
,
metric
=
'sqeuclidean'
)
K
=
self
.
amplitude
*
np
.
exp
(
-
0.5
*
d
)
return
K
def
kernel_value
(
self
,
x
,
y
):
K
=
self
.
kernel
(
x
,
y
)
return
np
.
asscalar
(
K
)
def
kernel_vector
(
self
,
x
,
Y
):
d
=
cdist
(
x
.
reshape
(
1
,
-
1
)
/
self
.
length_scale
,
Y
/
self
.
length_scale
,
metric
=
'sqeuclidean'
)
K
=
self
.
amplitude
*
np
.
exp
(
-
0.5
*
d
)
K
=
self
.
kernel
(
x
,
Y
).
reshape
(
-
1
)
return
K
def
kernel_matrix
(
self
,
X
,
Y
=
None
):
...
...
@@ -127,21 +144,29 @@ class gauss_kernel(kernel):
@
property
def
theta
(
self
):
self
.
_theta
=
[
self
.
amplitude
,
self
.
length_scale
]
return
self
.
_theta
"""Returns the log-transformed hyperparameters of the kernel.
"""
self
.
_theta
=
np
.
array
([
self
.
amplitude
,
self
.
length_scale
])
return
np
.
log
(
self
.
_theta
)
#return self._theta
@
theta
.
setter
def
theta
(
self
,
theta
):
self
.
_theta
=
theta
"""Sets the hyperparameters of the kernel.
theta: log-transformed hyperparameters
"""
self
.
_theta
=
np
.
exp
(
theta
)
#self._theta = theta
self
.
amplitude
=
self
.
_theta
[
0
]
self
.
length_scale
=
self
.
_theta
[
1
]
def
dK_da
(
self
,
X
):
if
np
.
ndim
(
X
)
==
1
:
X
=
X
.
reshape
((
1
,
-
1
))
d
=
cdist
(
X
/
self
.
length_scale
,
X
/
self
.
length_scale
,
metric
=
'sqeuclidean'
)
dK_da
=
np
.
exp
(
-
0.5
*
d
)
dK_da
=
self
.
amplitude
*
np
.
exp
(
-
0.5
*
d
)
return
dK_da
def
dK_dl
(
self
,
X
):
...
...
@@ -149,10 +174,13 @@ class gauss_kernel(kernel):
X
=
X
.
reshape
((
1
,
-
1
))
d
=
cdist
(
X
/
self
.
length_scale
,
X
/
self
.
length_scale
,
metric
=
'sqeuclidean'
)
dK_dl
=
self
.
amplitude
*
d
/
self
.
length_scale
*
np
.
exp
(
-
0.5
*
d
)
dK_dl
=
self
.
amplitude
*
d
*
np
.
exp
(
-
0.5
*
d
)
return
dK_dl
def
kernel_hyperparameter_gradient
(
self
,
X
):
"""Calculates the derivative of the kernel with respect to the
log transformed hyperparameters.
"""
return
np
.
array
([
self
.
dK_da
(
X
),
self
.
dK_dl
(
X
)])
...
...
@@ -265,30 +293,33 @@ class double_gauss_kernel(kernel):
X
/
self
.
length_scale1
,
metric
=
'sqeuclidean'
)
d2
=
cdist
(
X
/
self
.
length_scale2
,
X
/
self
.
length_scale2
,
metric
=
'sqeuclidean'
)
dK_da
=
np
.
exp
(
-
0.5
*
d1
)
+
self
.
weight
*
np
.
exp
(
-
0.5
*
d2
)
+
self
.
noise
*
np
.
eye
(
X
.
shape
[
0
])
dK_da
=
self
.
amplitude
*
(
np
.
exp
(
-
0.5
*
d1
)
+
self
.
weight
*
np
.
exp
(
-
0.5
*
d2
)
+
self
.
noise
*
np
.
eye
(
X
.
shape
[
0
])
)
return
dK_da
def
dK_dl1
(
self
,
X
):
d1
=
cdist
(
X
/
self
.
length_scale1
,
X
/
self
.
length_scale1
,
metric
=
'sqeuclidean'
)
dK_dl1
=
self
.
amplitude
*
d1
/
self
.
length_scale1
*
np
.
exp
(
-
0.5
*
d1
)
dK_dl1
=
self
.
amplitude
*
d1
*
np
.
exp
(
-
0.5
*
d1
)
return
dK_dl1
def
dK_dl2
(
self
,
X
):
d2
=
cdist
(
X
/
self
.
length_scale2
,
X
/
self
.
length_scale2
,
metric
=
'sqeuclidean'
)
dK_dl2
=
self
.
amplitude
*
self
.
weight
*
d2
/
self
.
length_scale2
*
np
.
exp
(
-
0.5
*
d2
)
dK_dl2
=
self
.
amplitude
*
self
.
weight
*
d2
*
np
.
exp
(
-
0.5
*
d2
)
return
dK_dl2
def
dK_dw
(
self
,
X
):
d2
=
cdist
(
X
/
self
.
length_scale2
,
X
/
self
.
length_scale2
,
metric
=
'sqeuclidean'
)
dK_dl2
=
self
.
amplitude
*
np
.
exp
(
-
0.5
*
d2
)
dK_dl2
=
self
.
amplitude
*
self
.
weight
*
np
.
exp
(
-
0.5
*
d2
)
return
dK_dl2
def
dK_dn
(
self
,
X
):
dK_dn
=
self
.
amplitude
*
np
.
eye
(
X
.
shape
[
0
])
dK_dn
=
self
.
amplitude
*
self
.
noise
*
np
.
eye
(
X
.
shape
[
0
])
return
dK_dn
def
kernel_hyperparameter_gradient
(
self
,
X
):
"""Calculates the derivative of the kernel with respect to the
log transformed hyperparameters.
"""
return
np
.
array
([
self
.
dK_da
(
X
),
self
.
dK_dl1
(
X
),
self
.
dK_dl2
(
X
),
self
.
dK_dw
(
X
),
self
.
dK_dn
(
X
)])
surrogate/test_descriptor.py
0 → 100644
View file @
ba29303e
import
numpy
as
np
import
matplotlib.pyplot
as
plt
import
unittest
from
gpr
import
gpr
from
ase.io
import
read
# old gpr
from
kernels
import
RBF
,
ConstantKernel
as
C
,
WhiteKernel
from
descriptor.fingerprint
import
Fingerprint
from
delta_functions_multi.delta
import
delta
as
deltaFunc
from
GPR
import
GPR
def
finite_diff
(
descriptor
,
a
,
dx
=
1e-5
):
Nf
=
descriptor
.
get_feature
(
a
).
shape
[
0
]
Natoms
,
dim
=
a
.
positions
.
shape
f_ddr
=
np
.
zeros
((
Natoms
,
dim
,
Nf
))
for
ia
in
range
(
Natoms
):
for
idim
in
range
(
dim
):
a_up
=
a
.
copy
()
a_down
=
a
.
copy
()
a_up
.
positions
[
ia
,
idim
]
+=
dx
/
2
a_down
.
positions
[
ia
,
idim
]
-=
dx
/
2
f_up
=
descriptor
.
get_feature
(
a_up
)
f_down
=
descriptor
.
get_feature
(
a_down
)
f_ddr
[
ia
,
idim
,:]
=
(
f_up
-
f_down
)
/
dx
return
f_ddr
.
reshape
((
-
1
,
Nf
))
def
get_E_with_std
(
traj
,
gpr
):
E
=
[]
F
=
[]
for
a
in
traj
:
e
=
gpr
.
predict_energy
(
a
,
)
class
test_gpr
(
unittest
.
TestCase
):
@
classmethod
def
setUpClass
(
cls
):
print
(
'setupClass'
)
@
classmethod
def
tearDownClass
(
cls
):
print
(
'teardownClass'
)
def
setUp
(
self
):
print
(
'setUp'
)
### Set up feature ###
# Initialize feature
self
.
descriptor
=
Fingerprint
()
def
tearDown
(
self
):
print
(
'tearDown
\n
'
)
def
test_forces
(
self
):
a
=
read
(
'structures.traj'
,
index
=
'0'
)
f_ddr
=
self
.
descriptor
.
get_featureGradient
(
a
)
f_ddr_num
=
finite_diff
(
self
.
descriptor
,
a
)
Na
,
Nf
=
f_ddr
.
shape
x
=
np
.
arange
(
Nf
)
fig
,
ax
=
plt
.
subplots
(
1
,
1
)
ax
.
plot
(
x
,
f_ddr
.
T
)
ax
.
plot
(
x
,
f_ddr_num
.
T
,
'k:'
)
plt
.
show
()
print
(
f_ddr
-
f_ddr_num
)
np
.
testing
.
assert_almost_equal
(
f_ddr
,
f_ddr_num
)
if
__name__
==
'__main__'
:
unittest
.
main
()
surrogate/test_gpr.py
View file @
ba29303e
...
...
@@ -6,7 +6,7 @@ from ase.io import read
# old gpr
from
kernels
import
RBF
,
ConstantKernel
as
C
,
WhiteKernel
from
featureCalculators_multi.angular_fingerprintFeature_cy
import
Angular_
Fingerprint
from
descriptor.fingerprint
import
Fingerprint
from
delta_functions_multi.delta
import
delta
as
deltaFunc
from
GPR
import
GPR
...
...
@@ -29,9 +29,10 @@ def initialize_old_gpr(atoms):
use_angular
=
True
# Initialize feature
featureCalculator
=
Angular_
Fingerprint
(
atoms
,
Rc1
=
Rc1
,
Rc2
=
Rc2
,
binwidth1
=
binwidth1
,
Nbins2
=
Nbins2
,
sigma1
=
sigma1
,
sigma2
=
sigma2
,
gamma
=
gamma
,
eta
=
eta
,
use_angular
=
use_angular
)
featureCalculator
=
Fingerprint
(
Rc1
=
Rc1
,
Rc2
=
Rc2
,
binwidth1
=
binwidth1
,
Nbins2
=
Nbins2
,
sigma1
=
sigma1
,
sigma2
=
sigma2
,
gamma
=
gamma
,
eta
=
eta
,
use_angular
=
use_angular
)
kernel
=
C
(
10
,
(
1e1
,
1e6
))
*
(
C
(
1
,
(
1
,
1
))
*
RBF
(
10
,
(
1
,
1000
))
+
C
(
0.01
,
(
0.01
,
0.01
))
*
RBF
(
10
,
(
1
,
1000
))
+
WhiteKernel
(
1e-5
,
(
1e-6
,
1e-2
)))
kernel
=
C
(
10
,
(
1e1
,
1e6
))
*
RBF
(
10
,
(
1
,
1000
))
#kernel = C(10, (1e1, 1e6)) * (RBF(10, (1,1000)) + C(0.01, (0.01, 0.01)) * RBF(10, (1,1000)) + WhiteKernel(1e-5, (1e-6,1e-2)))
delta
=
deltaFunc
(
atoms
=
atoms
,
rcut
=
6
)
gpr
=
GPR
(
kernel
=
kernel
,
featureCalculator
=
featureCalculator
,
...
...
@@ -63,7 +64,8 @@ class test_gpr(unittest.TestCase):
#self.kernel = gauss_kernel()
a
=
read
(
'structures.traj'
,
index
=
'0'
)
self
.
gpr_old
=
initialize_old_gpr
(
a
)
self
.
gpr
=
gpr
()
#self.gpr = gpr()
self
.
gpr
=
gpr
(
kernel
=
'single'
)
def
tearDown
(
self
):
print
(
'tearDown
\n
'
)
...
...
@@ -82,11 +84,11 @@ class test_gpr(unittest.TestCase):
E
=
np
.
array
([
self
.
gpr
.
predict_energy
(
a
,
eval_std
=
True
)
for
a
in
traj_predict
])
np
.
testing
.
assert_almost_equal
(
E
,
E_old
)
F_old
=
np
.
array
([
self
.
gpr_old
.
predict_force
(
a
)
for
a
in
traj_predict
])
F_old
=
np
.
array
([
self
.
gpr_old
.
predict_force
(
a
)
.
reshape
((
-
1
,
3
))
for
a
in
traj_predict
])
F
=
np
.
array
([
self
.
gpr
.
predict_forces
(
a
)
for
a
in
traj_predict
])
np
.
testing
.
assert_almost_equal
(
F
,
F_old
)
Fstd_old
=
np
.
array
([
self
.
gpr_old
.
predict_force
(
a
,
return_error
=
True
)[
1
]
for
a
in
traj_predict
])
Fstd_old
=
np
.
array
([
self
.
gpr_old
.
predict_force
(
a
,
return_error
=
True
)[
1
]
.
reshape
((
-
1
,
3
))
for
a
in
traj_predict
])
Fstd
=
np
.
array
([
self
.
gpr
.
predict_forces
(
a
,
eval_std
=
True
)[
1
]
for
a
in
traj_predict
])
np
.
testing
.
assert_almost_equal
(
Fstd
,
Fstd_old
)
...
...
@@ -113,7 +115,7 @@ class test_gpr(unittest.TestCase):
_
,
lml_ddTheta_old
=
self
.
gpr_old
.
log_marginal_likelihood
(
self
.
gpr_old
.
kernel
.
theta
,
eval_gradient
=
True
)
_
,
lml_ddTheta
=
self
.
gpr
.
neg_log_marginal_likelihood
(
eval_gradient
=
True
)
np
.
testing
.
assert_almost_equal
(
lml_ddTheta
,
lml_ddTheta_old
)
np
.
testing
.
assert_almost_equal
(
-
lml_ddTheta
,
lml_ddTheta_old
)
def
test_lml_gradient
(
self
):
traj
=
read
(
'structures.traj'
,
index
=
':50'
)
...
...
@@ -128,7 +130,157 @@ class test_gpr(unittest.TestCase):
np
.
testing
.
assert_almost_equal
(
lml_ddTheta
,
lml_ddTheta_numeric
)
def
test_forces
(
self
):
pass
traj
=
read
(
'structures.traj'
,
index
=
':50'
)
traj_train
=
traj
[:
40
]
traj_predict
=
traj
[
40
:]
self
.
gpr_old
.
train
(
traj_train
)
self
.
gpr
.
train
(
traj_train
)
a
=
traj_predict
[
0
]
F
=
self
.
gpr
.
predict_forces
(
a
)
F_numeric
=
self
.
gpr
.
numerical_forces
(
a
)
np
.
testing
.
assert_almost_equal
(
F
,
F_numeric
)
def
test_forces_std
(
self
):
traj
=
read
(
'structures.traj'
,
index
=
':50'
)
traj_train
=
traj
[:
40
]
traj_predict
=
traj
[
40
:]
self
.
gpr_old
.
train
(
traj_train
)
self
.
gpr
.
train
(
traj_train
)
a
=
traj_predict
[
0
]
_
,
Fstd
=
self
.
gpr
.
predict_forces
(
a
,
eval_std
=
True
)
_
,
Fstd_numeric
=
self
.
gpr
.
numerical_forces
(
a
,
eval_std
=
True
)
np
.
testing
.
assert_almost_equal
(
Fstd
,
Fstd_numeric
)
def
test_optimize_hyperparameters
(
self
):
traj
=
read
(
'structures.traj'
,
index
=
':50'
)
traj_train
=
traj
[:
40
]
traj_predict
=
traj
[
40
:]
self
.
gpr_old
.
train
(
traj_train
)
self
.
gpr
.
train
(
traj_train
)
self
.
gpr
.
optimize_hyperparameters
()
if
__name__
==
'__main__'
:
unittest
.
main
()
#unittest.main()
import
matplotlib.pyplot
as
plt
from
ase
import
Atoms
from
ase.visualize
import
view
from
descriptor.fingerprint
import
Fingerprint
from
custom_calculators
import
doubleLJ_calculator
from
gpr
import
gpr
as
GPR
def
finite_diff
(
krr
,
a
,
dx
=
1e-5
,
eval_std
=
False
):
Natoms
,
dim
=
a
.
positions
.
shape
F
=
np
.
zeros
((
Natoms
,
dim
))
Fstd
=
np
.
zeros
((
Natoms
,
dim
))
for
ia
in
range
(
Natoms
):
for
idim
in
range
(
dim
):
a_up
=
a
.
copy
()
a_down
=
a
.
copy
()
a_up
.
positions
[
ia
,
idim
]
+=
dx
/
2
a_down
.
positions
[
ia
,
idim
]
-=
dx
/
2
if
not
eval_std
:
E_up
=
krr
.
predict_energy
(
a_up
,
eval_std
=
False
)
E_down
=
krr
.
predict_energy
(
a_down
,
eval_std
=
False
)
F
[
ia
,
idim
]
=
-
(
E_up
-
E_down
)
/
dx
else
:
E_up
,
err_up
=
krr
.
predict_energy
(
a_up
,
eval_std
=
True
)
E_down
,
err_down
=
krr
.
predict_energy
(
a_down
,
eval_std
=
True
)
F
[
ia
,
idim
]
=
-
(
E_up
-
E_down
)
/
dx
Fstd
[
ia
,
idim
]
=
-
(
err_up
-
err_down
)
/
dx
if
eval_std
:
return
F
[
1
,
0
],
Fstd
[
1
,
0
]
else
:
return
F
def
createData
(
r
):
positions
=
np
.
array
([[
0
,
0
,
0
],[
r
,
0
,
0
]])
a
=
Atoms
(
'2H'
,
positions
,
cell
=
[
3
,
3
,
1
],
pbc
=
[
0
,
0
,
0
])
calc
=
doubleLJ_calculator
()
a
.
set_calculator
(
calc
)
return
a
def
test1
():
a_train
=
[
createData
(
r
)
for
r
in
[
0.9
,
1
,
1.3
,
2
,
3
]]
view
(
a_train
[
3
])
E_train
=
np
.
array
([
a
.
get_potential_energy
()
for
a
in
a_train
])
Natoms
=
a_train
[
0
].
get_number_of_atoms
()
Rc1
=
5
binwidth1
=
0.2
sigma1
=
0.2
Rc2
=
4
Nbins2
=
30
sigma2
=
0.2
gamma
=
1
eta
=
30
use_angular
=
False
descriptor
=
Fingerprint
(
Rc1
=
Rc1
,
Rc2
=
Rc2
,
binwidth1
=
binwidth1
,
Nbins2
=
Nbins2
,
sigma1
=
sigma1
,
sigma2
=
sigma2
,
gamma
=
gamma
,
eta
=
eta
,
use_angular
=
use_angular
)
# Set up KRR-model
gpr
=
GPR
(
kernel
=
'single'
,
descriptor
=
descriptor
)
gpr
.
train
(
atoms_list
=
a_train
)
Ntest
=
500
r_test
=
np
.
linspace
(
0.87
,
3.5
,
Ntest
)
E_test
=
np
.
zeros
(
Ntest
)
err_test
=
np
.
zeros
(
Ntest
)
F_test
=
np
.
zeros
(
Ntest
)
Fstd_test
=
np
.
zeros
(
Ntest
)
E_true
=
np
.
zeros
(
Ntest
)
F_true
=
np
.
zeros
(
Ntest
)
F_num
=
np
.
zeros
(
Ntest
)
Fstd_num
=
np
.
zeros
(
Ntest
)
for
i
,
r
in
enumerate
(
r_test
):
ai
=
createData
(
r
)
E
,
err
=
gpr
.
predict_energy
(
ai
,
eval_std
=
True
)
E_test
[
i
]
=
E