Coverage for /builds/ase/ase/ase/optimize/sciopt.py : 60.78%

Hot-keys on this page
r m x p toggle line displays
j k next/prev highlighted chunk
0 (zero) top of page
1 (one) first highlighted chunk
1import numpy as np
2import scipy.optimize as opt
3from ase.optimize.optimize import Optimizer
6class Converged(Exception):
7 pass
10class OptimizerConvergenceError(Exception):
11 pass
14class SciPyOptimizer(Optimizer):
15 """General interface for SciPy optimizers
17 Only the call to the optimizer is still needed
18 """
20 def __init__(self, atoms, logfile='-', trajectory=None,
21 callback_always=False, alpha=70.0, master=None,
22 force_consistent=None):
23 """Initialize object
25 Parameters:
27 atoms: Atoms object
28 The Atoms object to relax.
30 trajectory: string
31 Pickle file used to store trajectory of atomic movement.
33 logfile: file object or str
34 If *logfile* is a string, a file with that name will be opened.
35 Use '-' for stdout.
37 callback_always: book
38 Should the callback be run after each force call (also in the
39 linesearch)
41 alpha: float
42 Initial guess for the Hessian (curvature of energy surface). A
43 conservative value of 70.0 is the default, but number of needed
44 steps to converge might be less if a lower value is used. However,
45 a lower value also means risk of instability.
47 master: boolean
48 Defaults to None, which causes only rank 0 to save files. If
49 set to true, this rank will save files.
51 force_consistent: boolean or None
52 Use force-consistent energy calls (as opposed to the energy
53 extrapolated to 0 K). By default (force_consistent=None) uses
54 force-consistent energies if available in the calculator, but
55 falls back to force_consistent=False if not.
56 """
57 restart = None
58 Optimizer.__init__(self, atoms, restart, logfile, trajectory,
59 master, force_consistent=force_consistent)
60 self.force_calls = 0
61 self.callback_always = callback_always
62 self.H0 = alpha
64 def x0(self):
65 """Return x0 in a way SciPy can use
67 This class is mostly usable for subclasses wanting to redefine the
68 parameters (and the objective function)"""
69 return self.atoms.get_positions().reshape(-1)
71 def f(self, x):
72 """Objective function for use of the optimizers"""
73 self.atoms.set_positions(x.reshape(-1, 3))
74 # Scale the problem as SciPy uses I as initial Hessian.
75 return (self.atoms.get_potential_energy(
76 force_consistent=self.force_consistent) / self.H0)
78 def fprime(self, x):
79 """Gradient of the objective function for use of the optimizers"""
80 self.atoms.set_positions(x.reshape(-1, 3))
81 self.force_calls += 1
83 if self.callback_always:
84 self.callback(x)
86 # Remember that forces are minus the gradient!
87 # Scale the problem as SciPy uses I as initial Hessian.
88 return - self.atoms.get_forces().reshape(-1) / self.H0
90 def callback(self, x):
91 """Callback function to be run after each iteration by SciPy
93 This should also be called once before optimization starts, as SciPy
94 optimizers only calls it after each iteration, while ase optimizers
95 call something similar before as well.
97 :meth:`callback`() can raise a :exc:`Converged` exception to signal the
98 optimisation is complete. This will be silently ignored by
99 :meth:`run`().
100 """
101 f = self.atoms.get_forces()
102 self.log(f)
103 self.call_observers()
104 if self.converged(f):
105 raise Converged
106 self.nsteps += 1
108 def run(self, fmax=0.05, steps=100000000):
109 if self.force_consistent is None:
110 self.set_force_consistent()
111 self.fmax = fmax
112 try:
113 # As SciPy does not log the zeroth iteration, we do that manually
114 self.callback(None)
115 # Scale the problem as SciPy uses I as initial Hessian.
116 self.call_fmin(fmax / self.H0, steps)
117 except Converged:
118 pass
120 def dump(self, data):
121 pass
123 def load(self):
124 pass
126 def call_fmin(self, fmax, steps):
127 raise NotImplementedError
130class SciPyFminCG(SciPyOptimizer):
131 """Non-linear (Polak-Ribiere) conjugate gradient algorithm"""
133 def call_fmin(self, fmax, steps):
134 output = opt.fmin_cg(self.f,
135 self.x0(),
136 fprime=self.fprime,
137 # args=(),
138 gtol=fmax * 0.1, # Should never be reached
139 norm=np.inf,
140 # epsilon=
141 maxiter=steps,
142 full_output=1,
143 disp=0,
144 # retall=0,
145 callback=self.callback)
146 warnflag = output[-1]
147 if warnflag == 2:
148 raise OptimizerConvergenceError(
149 'Warning: Desired error not necessarily achieved '
150 'due to precision loss')
153class SciPyFminBFGS(SciPyOptimizer):
154 """Quasi-Newton method (Broydon-Fletcher-Goldfarb-Shanno)"""
156 def call_fmin(self, fmax, steps):
157 output = opt.fmin_bfgs(self.f,
158 self.x0(),
159 fprime=self.fprime,
160 # args=(),
161 gtol=fmax * 0.1, # Should never be reached
162 norm=np.inf,
163 # epsilon=1.4901161193847656e-08,
164 maxiter=steps,
165 full_output=1,
166 disp=0,
167 # retall=0,
168 callback=self.callback)
169 warnflag = output[-1]
170 if warnflag == 2:
171 raise OptimizerConvergenceError(
172 'Warning: Desired error not necessarily achieved '
173 'due to precision loss')
176class SciPyGradientlessOptimizer(Optimizer):
177 """General interface for gradient less SciPy optimizers
179 Only the call to the optimizer is still needed
181 Note: If you redefine x0() and f(), you don't even need an atoms object.
182 Redefining these also allows you to specify an arbitrary objective
183 function.
185 XXX: This is still a work in progress
186 """
188 def __init__(self, atoms, logfile='-', trajectory=None,
189 callback_always=False, master=None,
190 force_consistent=None):
191 """Initialize object
193 Parameters:
195 atoms: Atoms object
196 The Atoms object to relax.
198 trajectory: string
199 Pickle file used to store trajectory of atomic movement.
201 logfile: file object or str
202 If *logfile* is a string, a file with that name will be opened.
203 Use '-' for stdout.
205 callback_always: book
206 Should the callback be run after each force call (also in the
207 linesearch)
209 alpha: float
210 Initial guess for the Hessian (curvature of energy surface). A
211 conservative value of 70.0 is the default, but number of needed
212 steps to converge might be less if a lower value is used. However,
213 a lower value also means risk of instability.
215 master: boolean
216 Defaults to None, which causes only rank 0 to save files. If
217 set to true, this rank will save files.
219 force_consistent: boolean or None
220 Use force-consistent energy calls (as opposed to the energy
221 extrapolated to 0 K). By default (force_consistent=None) uses
222 force-consistent energies if available in the calculator, but
223 falls back to force_consistent=False if not.
224 """
225 restart = None
226 Optimizer.__init__(self, atoms, restart, logfile, trajectory,
227 master, force_consistent=force_consistent)
228 self.function_calls = 0
229 self.callback_always = callback_always
231 def x0(self):
232 """Return x0 in a way SciPy can use
234 This class is mostly usable for subclasses wanting to redefine the
235 parameters (and the objective function)"""
236 return self.atoms.get_positions().reshape(-1)
238 def f(self, x):
239 """Objective function for use of the optimizers"""
240 self.atoms.set_positions(x.reshape(-1, 3))
241 self.function_calls += 1
242 # Scale the problem as SciPy uses I as initial Hessian.
243 return self.atoms.get_potential_energy(
244 force_consistent=self.force_consistent)
246 def callback(self, x):
247 """Callback function to be run after each iteration by SciPy
249 This should also be called once before optimization starts, as SciPy
250 optimizers only calls it after each iteration, while ase optimizers
251 call something similar before as well.
252 """
253 # We can't assume that forces are available!
254 # f = self.atoms.get_forces()
255 # self.log(f)
256 self.call_observers()
257 # if self.converged(f):
258 # raise Converged
259 self.nsteps += 1
261 def run(self, ftol=0.01, xtol=0.01, steps=100000000):
262 if self.force_consistent is None:
263 self.set_force_consistent()
264 self.xtol = xtol
265 self.ftol = ftol
266 # As SciPy does not log the zeroth iteration, we do that manually
267 self.callback(None)
268 try:
269 # Scale the problem as SciPy uses I as initial Hessian.
270 self.call_fmin(xtol, ftol, steps)
271 except Converged:
272 pass
274 def dump(self, data):
275 pass
277 def load(self):
278 pass
280 def call_fmin(self, fmax, steps):
281 raise NotImplementedError
284class SciPyFmin(SciPyGradientlessOptimizer):
285 """Nelder-Mead Simplex algorithm
287 Uses only function calls.
289 XXX: This is still a work in progress
290 """
292 def call_fmin(self, xtol, ftol, steps):
293 opt.fmin(self.f,
294 self.x0(),
295 # args=(),
296 xtol=xtol,
297 ftol=ftol,
298 maxiter=steps,
299 # maxfun=None,
300 # full_output=1,
301 disp=0,
302 # retall=0,
303 callback=self.callback)
306class SciPyFminPowell(SciPyGradientlessOptimizer):
307 """Powell's (modified) level set method
309 Uses only function calls.
311 XXX: This is still a work in progress
312 """
314 def __init__(self, *args, **kwargs):
315 """Parameters:
317 direc: float
318 How much to change x to initially. Defaults to 0.04.
319 """
320 direc = kwargs.pop('direc', None)
321 SciPyGradientlessOptimizer.__init__(self, *args, **kwargs)
323 if direc is None:
324 self.direc = np.eye(len(self.x0()), dtype=float) * 0.04
325 else:
326 self.direc = np.eye(len(self.x0()), dtype=float) * direc
328 def call_fmin(self, xtol, ftol, steps):
329 opt.fmin_powell(self.f,
330 self.x0(),
331 # args=(),
332 xtol=xtol,
333 ftol=ftol,
334 maxiter=steps,
335 # maxfun=None,
336 # full_output=1,
337 disp=0,
338 # retall=0,
339 callback=self.callback,
340 direc=self.direc)