Coverage for /builds/ase/ase/ase/optimize/gpmin/kernel.py : 70.37%

Hot-keys on this page
r m x p toggle line displays
j k next/prev highlighted chunk
0 (zero) top of page
1 (one) first highlighted chunk
1import numpy as np
2import numpy.linalg as la
5class Kernel():
6 def __init__(self):
7 pass
9 def set_params(self, params):
10 pass
12 def kernel(self, x1, x2):
13 """Kernel function to be fed to the Kernel matrix"""
14 pass
16 def K(self, X1, X2):
17 """Compute the kernel matrix """
18 return np.block([[self.kernel(x1, x2) for x2 in X2] for x1 in X1])
21class SE_kernel(Kernel):
22 """Squared exponential kernel without derivatives"""
24 def __init__(self):
25 Kernel.__init__(self)
27 def set_params(self, params):
28 """Set the parameters of the squared exponential kernel.
30 Parameters:
32 params: [weight, l] Parameters of the kernel:
33 weight: prefactor of the exponential
34 l : scale of the kernel
35 """
36 self.weight = params[0]
37 self.l = params[1]
39 def squared_distance(self, x1, x2):
40 """Returns the norm of x1-x2 using diag(l) as metric """
41 return np.sum((x1 - x2) * (x1 - x2)) / self.l**2
43 def kernel(self, x1, x2):
44 """ This is the squared exponential function"""
45 return self.weight**2 * np.exp(-0.5 * self.squared_distance(x1, x2))
47 def dK_dweight(self, x1, x2):
48 """Derivative of the kernel respect to the weight """
49 return 2 * self.weight * np.exp(-0.5 * self.squared_distance(x1, x2))
51 def dK_dl(self, x1, x2):
52 """Derivative of the kernel respect to the scale"""
53 return self.kernel * la.norm(x1 - x2)**2 / self.l**3
56class SquaredExponential(SE_kernel):
57 """Squared exponential kernel with derivatives.
58 For the formulas see Koistinen, Dagbjartsdottir, Asgeirsson, Vehtari,
59 Jonsson.
60 Nudged elastic band calculations accelerated with Gaussian process
61 regression. Section 3.
63 Before making any predictions, the parameters need to be set using the
64 method SquaredExponential.set_params(params) where the parameters are a
65 list whose first entry is the weight (prefactor of the exponential) and
66 the second is the scale (l).
68 Parameters:
70 dimensionality: The dimensionality of the problem to optimize, typically
71 3*N where N is the number of atoms. If dimensionality is
72 None, it is computed when the kernel method is called.
74 Attributes:
75 ----------------
76 D: int. Dimensionality of the problem to optimize
77 weight: float. Multiplicative constant to the exponenetial kernel
78 l : float. Length scale of the squared exponential kernel
80 Relevant Methods:
81 ----------------
82 set_params: Set the parameters of the Kernel, i.e. change the
83 attributes
84 kernel_function: Squared exponential covariance function
85 kernel: covariance matrix between two points in the manifold.
86 Note that the inputs are arrays of shape (D,)
87 kernel_matrix: Kernel matrix of a data set to itself, K(X,X)
88 Note the input is an array of shape (nsamples, D)
89 kernel_vector Kernel matrix of a point x to a dataset X, K(x,X).
91 gradient: Gradient of K(X,X) with respect to the parameters of
92 the kernel i.e. the hyperparameters of the Gaussian
93 process.
94 """
96 def __init__(self, dimensionality=None):
97 self.D = dimensionality
98 SE_kernel.__init__(self)
100 def kernel_function(self, x1, x2):
101 """ This is the squared exponential function"""
102 return self.weight**2 * np.exp(-0.5 * self.squared_distance(x1, x2))
104 def kernel_function_gradient(self, x1, x2):
105 """Gradient of kernel_function respect to the second entry.
106 x1: first data point
107 x2: second data point
108 """
109 prefactor = (x1 - x2) / self.l**2
110 # return prefactor * self.kernel_function(x1,x2)
111 return prefactor
113 def kernel_function_hessian(self, x1, x2):
114 """Second derivatives matrix of the kernel function"""
115 P = np.outer(x1 - x2, x1 - x2) / self.l**2
116 prefactor = (np.identity(self.D) - P) / self.l**2
117 return prefactor
119 def kernel(self, x1, x2):
120 """Squared exponential kernel including derivatives.
121 This function returns a D+1 x D+1 matrix, where D is the dimension of
122 the manifold.
123 """
124 K = np.identity(self.D + 1)
125 K[0, 1:] = self.kernel_function_gradient(x1, x2)
126 K[1:, 0] = -K[0, 1:]
127 # K[1:,1:] = self.kernel_function_hessian(x1, x2)
128 P = np.outer(x1 - x2, x1 - x2) / self.l**2
129 K[1:, 1:] = (K[1:, 1:] - P) / self.l**2
130 # return np.block([[k,j2],[j1,h]])*self.kernel_function(x1, x2)
131 return K * self.kernel_function(x1, x2)
133 def kernel_matrix(self, X):
134 """This is the same method than self.K for X1=X2, but using the matrix
135 is then symmetric.
136 """
137 n, D = np.atleast_2d(X).shape
138 K = np.identity(n * (D + 1))
139 self.D = D
140 D1 = D + 1
142 # fill upper triangular:
143 for i in range(n):
144 for j in range(i + 1, n):
145 k = self.kernel(X[i], X[j])
146 K[i * D1:(i + 1) * D1, j * D1:(j + 1) * D1] = k
147 K[j * D1:(j + 1) * D1, i * D1:(i + 1) * D1] = k.T
148 K[i * D1:(i + 1) * D1, i * D1:(i + 1) * D1] = self.kernel(
149 X[i], X[i])
150 return K
152 def kernel_vector(self, x, X, nsample):
153 return np.hstack([self.kernel(x, x2) for x2 in X])
155 # ---------Derivatives--------
156 def dK_dweight(self, X):
157 """Return the derivative of K(X,X) respect to the weight """
158 return self.K(X, X) * 2 / self.weight
160 # ----Derivatives of the kernel function respect to the scale ---
161 def dK_dl_k(self, x1, x2):
162 """Returns the derivative of the kernel function respect to l"""
163 return self.squared_distance(x1, x2) / self.l
165 def dK_dl_j(self, x1, x2):
166 """Returns the derivative of the gradient of the kernel function
167 respect to l
168 """
169 prefactor = -2 * (1 - 0.5 * self.squared_distance(x1, x2)) / self.l
170 return self.kernel_function_gradient(x1, x2) * prefactor
172 def dK_dl_h(self, x1, x2):
173 """Returns the derivative of the hessian of the kernel function respect
174 to l
175 """
176 I = np.identity(self.D)
177 P = np.outer(x1 - x2, x1 - x2) / self.l**2
178 prefactor = 1 - 0.5 * self.squared_distance(x1, x2)
179 return -2 * (prefactor * (I - P) - P) / self.l**3
181 def dK_dl_matrix(self, x1, x2):
182 k = np.asarray(self.dK_dl_k(x1, x2)).reshape((1, 1))
183 j2 = self.dK_dl_j(x1, x2).reshape(1, -1)
184 j1 = self.dK_dl_j(x2, x1).reshape(-1, 1)
185 h = self.dK_dl_h(x1, x2)
186 return np.block([[k, j2], [j1, h]]) * self.kernel_function(x1, x2)
188 def dK_dl(self, X):
189 """Return the derivative of K(X,X) respect of l"""
190 return np.block([[self.dK_dl_matrix(x1, x2) for x2 in X] for x1 in X])
192 def gradient(self, X):
193 """Computes the gradient of matrix K given the data respect to the
194 hyperparameters. Note matrix K here is self.K(X,X).
195 Returns a 2-entry list of n(D+1) x n(D+1) matrices
196 """
197 return [self.dK_dweight(X), self.dK_dl(X)]