gpr.py 10.3 KB
Newer Older
Malthe Kjær Bisbo's avatar
Malthe Kjær Bisbo committed
1
import numpy as np
Malthe Kjær Bisbo's avatar
update    
Malthe Kjær Bisbo committed
2
3
from scipy.linalg import cholesky, cho_solve, solve_triangular
from scipy.optimize import minimize
4
from scipy.optimize import fmin_l_bfgs_b
Malthe Kjær Bisbo's avatar
Malthe Kjær Bisbo committed
5

6
7
8
9
from surrogate.kernel import gauss_kernel, double_gauss_kernel
from surrogate.descriptor.fingerprint import Fingerprint
from surrogate.prior.prior import repulsive_prior
from surrogate.gpr_calculator import gpr_calculator
10

Malthe Kjær Bisbo's avatar
update    
Malthe Kjær Bisbo committed
11
import warnings
12
13

class gpr_memory():
14
15
16
    """ Class for saving "expensive to calculate" data for
    the Gaussian Process Regression model.
    """
17
18
19
20
    def __init__(self, descriptor, prior):
        self.descriptor = descriptor
        self.prior = prior

Malthe Kjær Bisbo's avatar
update    
Malthe Kjær Bisbo committed
21
        self.initialize_data()
22

Malthe Kjær Bisbo's avatar
update    
Malthe Kjær Bisbo committed
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
    def initialize_data(self):
        self.energies = None
        self.features = None
        self.prior_values = None
        
    def get_data(self):
        return self.energies, self.features, self.prior_values
        
    def save_data(self, atoms_list, add_data=True):
        if not add_data:
            self.initialize_data()
        
        self.save_energies(atoms_list)
        self.save_features(atoms_list)
        self.save_prior_values(atoms_list)
38

Malthe Kjær Bisbo's avatar
update    
Malthe Kjær Bisbo committed
39
40
41
    def save_energies(self, atoms_list):
        energies_save = np.array([a.get_potential_energy() for a in atoms_list])
        if self.energies is None:
42
43
44
45
            self.energies = energies_save
        else:
            self.energies = np.r_[self.energies, energies_save]
    
Malthe Kjær Bisbo's avatar
update    
Malthe Kjær Bisbo committed
46
47
48
    def save_features(self, atoms_list):
        features_save = self.descriptor.get_featureMat(atoms_list)
        if self.features is None:
49
            self.features = features_save
Malthe Kjær Bisbo's avatar
update    
Malthe Kjær Bisbo committed
50
        else:
51
52
            self.features = np.r_[self.features, features_save]

Malthe Kjær Bisbo's avatar
update    
Malthe Kjær Bisbo committed
53
54
55
56
57
58
59
60
61
62
    def save_prior_values(self, atoms_list):
        if self.prior is not None:
            prior_values_save = np.array([self.prior.energy(a) for a in atoms_list])
            if self.prior_values is None:
                self.prior_values = prior_values_save
            else:
                self.prior_values = np.r_[self.prior_values, prior_values_save]
        else:
            self.prior_values = 0

63
class GPR():
Malthe Kjær Bisbo's avatar
Malthe Kjær Bisbo committed
64
65
66
67
68
    """Gaussian Process Regression
    
    Parameters:
    
    descriptor:
69
70
        Descriptor defining the represention of structures. The Gaussian Process
        works with the representations.
Malthe Kjær Bisbo's avatar
Malthe Kjær Bisbo committed
71
72
    
    kernel:
73
        Kernel (or covariance) function used in the Gaussian Process.
Malthe Kjær Bisbo's avatar
Malthe Kjær Bisbo committed
74
75
    
    prior:
76
77
78
79
80
        Prior mean function used.

    n_restarts_optimizer: int
        Number of gradient decent restarts performed by each compute process
        during hyperparameter optimization.
Malthe Kjær Bisbo's avatar
Malthe Kjær Bisbo committed
81
    """
Malthe Kjær Bisbo's avatar
Malthe Kjær Bisbo committed
82
    def __init__(self, descriptor=None, kernel='double', prior=None, n_restarts_optimizer=1, template_structure=None):
83
84
85
86
        if descriptor is None:
            self.descriptor = Fingerprint()
        else:
            self.descriptor = descriptor
Malthe Kjær Bisbo's avatar
Malthe Kjær Bisbo committed
87
88
89
90
91
92
        Nsplit_eta = None
        if template_structure is not None:
            self.descriptor.initialize_from_atoms(template_structure)
            if hasattr(self.descriptor, 'use_angular'):
                if self.descriptor.use_angular:
                    Nsplit_eta = self.descriptor.Nelements_2body
93
94

        if kernel is 'single':
Malthe Kjær Bisbo's avatar
Malthe Kjær Bisbo committed
95
            self.kernel = gauss_kernel(Nsplit_eta=Nsplit_eta)
96
        elif kernel is 'double':
Malthe Kjær Bisbo's avatar
Malthe Kjær Bisbo committed
97
            self.kernel = double_gauss_kernel(Nsplit_eta=Nsplit_eta)
Malthe Kjær Bisbo's avatar
Malthe Kjær Bisbo committed
98
99
        else:
            self.kernel = kernel
100
101
102
103
104
105

        if prior is None:
            self.prior = repulsive_prior()
        else:
            self.prior = prior

Malthe Kjær Bisbo's avatar
update    
Malthe Kjær Bisbo committed
106
107
        self.n_restarts_optimizer = n_restarts_optimizer

108
        self.memory = gpr_memory(self.descriptor, self.prior)
Malthe Kjær Bisbo's avatar
Malthe Kjær Bisbo committed
109

Malthe Kjær Bisbo's avatar
update    
Malthe Kjær Bisbo committed
110
    def predict_energy(self, a, eval_std=False):
Malthe Kjær Bisbo's avatar
Malthe Kjær Bisbo committed
111
112
113
        x = self.descriptor.get_feature(a)
        k = self.kernel.kernel_vector(x, self.X)

Malthe Kjær Bisbo's avatar
update    
Malthe Kjær Bisbo committed
114
115
116
117
118
        E = np.dot(k,self.alpha) + self.bias + self.prior.energy(a)

        if eval_std:
            # Lines 5 and 6 in GPML
            vk = np.dot(self.K_inv, k)
119
            E_std = np.sqrt(self.K0 - np.dot(k, vk))
Malthe Kjær Bisbo's avatar
update    
Malthe Kjær Bisbo committed
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
            return E, E_std
        else:
            return E

    def predict_forces(self, a, eval_std=False):

        # Calculate descriptor and its gradient
        x = self.descriptor.get_feature(a)
        x_ddr = self.descriptor.get_featureGradient(a).T

        # Calculate kernel and its derivative
        k_ddx = self.kernel.kernel_jacobian(x, self.X)
        k_ddr = np.dot(k_ddx, x_ddr)

        F = -np.dot(k_ddr.T, self.alpha) + self.prior.forces(a)

        if eval_std:
            k = self.kernel.kernel_vector(x, self.X)
            vk = np.dot(self.K_inv, k)
139
            g = self.K0 - np.dot(k.T, vk)
Malthe Kjær Bisbo's avatar
update    
Malthe Kjær Bisbo committed
140
            assert g >= 0
Malthe Kjær Bisbo's avatar
Malthe Kjær Bisbo committed
141
142
            F_std = 1/np.sqrt(g) * np.dot(k_ddr.T, vk)
            return F.reshape((-1,3)), F_std.reshape(-1,3)
Malthe Kjær Bisbo's avatar
update    
Malthe Kjær Bisbo committed
143
        else:
Malthe Kjær Bisbo's avatar
Malthe Kjær Bisbo committed
144
            return F.reshape(-1,3)
145

146
    def update_bias(self):
Malthe Kjær Bisbo's avatar
update    
Malthe Kjær Bisbo committed
147
148
        self.bias = np.mean(self.memory.energies - self.memory.prior_values)

Malthe Kjær Bisbo's avatar
update    
Malthe Kjær Bisbo committed
149
    def train(self, atoms_list=None, add_data=True):
Malthe Kjær Bisbo's avatar
update    
Malthe Kjær Bisbo committed
150
        if atoms_list is not None:
Malthe Kjær Bisbo's avatar
update    
Malthe Kjær Bisbo committed
151
            self.memory.save_data(atoms_list, add_data)
152
153

        self.update_bias()
Malthe Kjær Bisbo's avatar
update    
Malthe Kjær Bisbo committed
154
155
        self.E, self.X, self.prior_values = self.memory.get_data()
        self.Y = self.E - self.prior_values - self.bias
156
        
Malthe Kjær Bisbo's avatar
update    
Malthe Kjær Bisbo committed
157
158
        K = self.kernel(self.X)
        L = cholesky(K, lower=True)
159
        
Malthe Kjær Bisbo's avatar
update    
Malthe Kjær Bisbo committed
160
161
        self.alpha = cho_solve((L, True), self.Y)
        self.K_inv = cho_solve((L, True), np.eye(K.shape[0]))
162
        self.K0 = self.kernel.kernel_value(self.X[0], self.X[0])
Malthe Kjær Bisbo's avatar
update    
Malthe Kjær Bisbo committed
163
    
Malthe Kjær Bisbo's avatar
update    
Malthe Kjær Bisbo committed
164
    def optimize_hyperparameters(self, atoms_list=None, add_data=True, comm=None):
Malthe Kjær Bisbo's avatar
update    
Malthe Kjær Bisbo committed
165
166
        if self.n_restarts_optimizer == 0:
            self.train(atoms_list)
Malthe Kjær Bisbo's avatar
Malthe Kjær Bisbo committed
167
            return
Malthe Kjær Bisbo's avatar
Malthe Kjær Bisbo committed
168

Malthe Kjær Bisbo's avatar
update    
Malthe Kjær Bisbo committed
169
        if atoms_list is not None:
Malthe Kjær Bisbo's avatar
update    
Malthe Kjær Bisbo committed
170
            self.memory.save_data(atoms_list, add_data)
171
172

        self.update_bias()
Malthe Kjær Bisbo's avatar
update    
Malthe Kjær Bisbo committed
173
174
        self.E, self.X, self.prior_values = self.memory.get_data()
        self.Y = self.E - self.prior_values - self.bias
Malthe Kjær Bisbo's avatar
Malthe Kjær Bisbo committed
175

Malthe Kjær Bisbo's avatar
update    
Malthe Kjær Bisbo committed
176
177
178
179
        results = []
        for i in range(self.n_restarts_optimizer):
            theta_initial = np.random.uniform(self.kernel.theta_bounds[:, 0],
                                              self.kernel.theta_bounds[:, 1])
180
181
182
183
184
185
186
187
188
189
            if i == 0:
                # Make sure that the previously currently choosen
                # hyperparameters are always tried as initial values.
                if comm is not None:
                    # But only on a single communicator, if multiple are present.
                    if comm.rank == 0:
                        theta_initial = self.kernel.theta
                else:
                    theta_initial = self.kernel.theta
                        
Malthe Kjær Bisbo's avatar
update    
Malthe Kjær Bisbo committed
190
191
192
            res = self.constrained_optimization(theta_initial)
            results.append(res)
        index_min = np.argmin(np.array([r[1] for r in results]))
193
194
        result_min = results[index_min]
        
Malthe Kjær Bisbo's avatar
update    
Malthe Kjær Bisbo committed
195
        if comm is not None:
196
197
198
199
200
201
202
203
204
205
206
        # Find best hyperparameters among all communicators and broadcast.
            results_all = comm.gather(result_min, root=0)
            if comm.rank == 0:
                index_all_min = np.argmin(np.array([r[1] for r in results_all]))
                result_min = results_all[index_all_min]
            else:
                result_min = None
            result_min = comm.bcast(result_min, root=0)
                
        self.kernel.theta = result_min[0]
        self.lml = -result_min[1]
Malthe Kjær Bisbo's avatar
Malthe Kjær Bisbo committed
207

Malthe Kjær Bisbo's avatar
update    
Malthe Kjær Bisbo committed
208
        self.train()
Malthe Kjær Bisbo's avatar
Malthe Kjær Bisbo committed
209
    
Malthe Kjær Bisbo's avatar
update    
Malthe Kjær Bisbo committed
210
211
212
    def neg_log_marginal_likelihood(self, theta=None, eval_gradient=True):
        if theta is not None:
            self.kernel.theta = theta
Malthe Kjær Bisbo's avatar
Malthe Kjær Bisbo committed
213

Malthe Kjær Bisbo's avatar
update    
Malthe Kjær Bisbo committed
214
215
216
217
        if eval_gradient:
            K, K_gradient = self.kernel(self.X, eval_gradient)
        else:
            K = self.kernel(self.X)
Malthe Kjær Bisbo's avatar
Malthe Kjær Bisbo committed
218

Malthe Kjær Bisbo's avatar
update    
Malthe Kjær Bisbo committed
219
220
221
222
223
224
        L = cholesky(K, lower=True)
        alpha = cho_solve((L, True), self.Y)

        lml = -0.5 * np.dot(self.Y, alpha)
        lml -= np.sum(np.log(np.diag(L)))
        lml -= K.shape[0]/2 * np.log(2*np.pi)
Malthe Kjær Bisbo's avatar
Malthe Kjær Bisbo committed
225
        
Malthe Kjær Bisbo's avatar
update    
Malthe Kjær Bisbo committed
226
227
228
229
230
231
232
233
234
235
236
        if eval_gradient:
            # Equation (5.9) in GPML
            K_inv = cho_solve((L, True), np.eye(K.shape[0]))
            tmp = np.einsum("i,j->ij", alpha, alpha) - K_inv

            lml_gradient = 0.5*np.einsum("ij,kij->k", tmp, K_gradient)
            return -lml, -lml_gradient
        else:
            return -lml

    def constrained_optimization(self, theta_initial):
237
238
239
240
241
        theta_opt, func_min, convergence_dict = \
            fmin_l_bfgs_b(self.neg_log_marginal_likelihood,
                          theta_initial,
                          bounds=self.kernel.theta_bounds)
        return theta_opt, func_min
Malthe Kjær Bisbo's avatar
update    
Malthe Kjær Bisbo committed
242
243
244
245
246
247
248
249
250
251
252
253
254
255

    def numerical_neg_lml(self, dx=1e-4):
        N_data = self.X.shape[0]
        theta = np.copy(self.kernel.theta)
        N_hyper = len(theta)
        lml_ddTheta = np.zeros((N_hyper))
        for i in range(N_hyper):
            theta_up = np.copy(theta)
            theta_down = np.copy(theta)
            theta_up[i] += 0.5*dx
            theta_down[i] -= 0.5*dx

            lml_up = self.neg_log_marginal_likelihood(theta_up, eval_gradient=False)
            lml_down = self.neg_log_marginal_likelihood(theta_down, eval_gradient=False)
Malthe Kjær Bisbo's avatar
Malthe Kjær Bisbo committed
256
            lml_ddTheta[i] = (lml_up - lml_down)/dx
Malthe Kjær Bisbo's avatar
update    
Malthe Kjær Bisbo committed
257
        return lml_ddTheta
Malthe Kjær Bisbo's avatar
Malthe Kjær Bisbo committed
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286

    def numerical_forces(self, a, dx=1e-4, eval_std=False):
        Na, Nd = a.positions.shape
        if not eval_std:
            F = np.zeros((Na,Nd))
            for ia in range(Na):
                for idim in range(Nd):
                    a_up = a.copy()
                    a_down = a.copy()
                    a_up.positions[ia,idim] += 0.5*dx
                    a_down.positions[ia,idim] -= 0.5*dx
                    
                    E_up = self.predict_energy(a_up)
                    E_down = self.predict_energy(a_down)
                    F[ia,idim] = -(E_up - E_down)/dx
            return F
        else:
            F = np.zeros((Na,Nd))
            Fstd = np.zeros((Na,Nd))
            for ia in range(Na):
                for idim in range(Nd):
                    a_up = a.copy()
                    a_down = a.copy()
                    a_up.positions[ia,idim] += 0.5*dx
                    a_down.positions[ia,idim] -= 0.5*dx
                    
                    E_up, Estd_up = self.predict_energy(a_up, eval_std=True)
                    E_down, Estd_down = self.predict_energy(a_down, eval_std=True)
                    F[ia,idim] = -(E_up - E_down)/dx
287
                    Fstd[ia,idim] = -(Estd_up - Estd_down)/dx
Malthe Kjær Bisbo's avatar
Malthe Kjær Bisbo committed
288
            return F, Fstd
289
290
291

    def get_calculator(self, kappa):
        return gpr_calculator(self, kappa)
Malthe Kjær Bisbo's avatar
Malthe Kjær Bisbo committed
292