comparison m-toolbox/classes/@ao/xfit.m @ 0:f0afece42f48

Import.
author Daniele Nicolodi <nicolodi@science.unitn.it>
date Wed, 23 Nov 2011 19:22:13 +0100
parents
children
comparison
equal deleted inserted replaced
-1:000000000000 0:f0afece42f48
1 % XFIT fit a function of x to data.
2 %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
3 %
4 % DESCRIPTION: XFIT performs a non-linear fit of a function of x to data.
5 % smodels fitting is also supported.
6 %
7 % ALGORITHM: XFIT does a chi-squared minimization by means of different
8 % algorithms (see details in the default plist). Covariance matrix is also
9 % computed from the Fisher's Information Matrix. In case the Information
10 % Matrix is not positive-definite, uncertainties will not be stored in the
11 % output.
12 %
13 % CALL: b = xfit(a, pl)
14 %
15 % INPUTS: a - input AO to fit to
16 % pl - parameter list (see below)
17 %
18 % OUTPUTs: b - a pest object containing the best-fit parameters,
19 % goodness-of-fit reduced chi-squared, fit degree-of-freedom
20 % covariance matrix and uncertainties. Additional
21 % quantities, like the Information Matrix, are contained
22 % within the procinfo. The best-fit model can be evaluated
23 % from pest\eval.
24 %
25 % <a href="matlab:utils.helper.displayMethodInfo('ao', 'xfit')">Parameters Description</a>
26 %
27 % EXAMPLES:
28 %
29 % % 1) Fit to a frequency-series
30 %
31 % % Create a frequency-series
32 % datapl = plist('fsfcn', '0.01./(0.0001+f) + 5*abs(randn(size(f))) ', 'f1', 1e-5, 'f2', 5, 'nf', 1000, ...
33 % 'xunits', 'Hz', 'yunits', 'N/Hz');
34 % data = ao(datapl);
35 % data.setName;
36 %
37 % % Do fit
38 % fitpl = plist('Function', 'P(1)./(P(2) + Xdata) + P(3)', ...
39 % 'P0', [0.1 0.01 1]);
40 % params = xfit(data, fitpl)
41 %
42 % % Evaluate model
43 % BestModel = eval(params,plist('type','fsdata','xdata',data,'xfield','x'));
44 % BestModel.setName;
45 %
46 % % Display results
47 % iplot(data,BestModel)
48 %
49 % % 2) Fit to a noisy sine-wave
50 %
51 % % Create a noisy sine-wave
52 % fs = 10;
53 % nsecs = 500;
54 % datapl = plist('waveform', 'Sine wave', 'f', 0.01, 'A', 0.6, 'fs', fs, 'nsecs', nsecs, ...
55 % 'xunits', 's', 'yunits', 'm');
56 % sw = ao(datapl);
57 % noise = ao(plist('tsfcn', '0.01*randn(size(t))', 'fs', fs, 'nsecs', nsecs));
58 % data = sw+noise;
59 % data.setName;
60 %
61 % % Do fit
62 % fitpl = plist('Function', 'P(1).*sin(2*pi*P(2).*Xdata + P(3))', ...
63 % 'P0', [1 0.01 0]);
64 % params = xfit(data, fitpl)
65 %
66 % % Evaluate model
67 % BestModel = eval(params,plist('type','tsdata','xdata',sw,'xfield','x'));
68 % BestModel.setName;
69 %
70 % % Display results
71 % iplot(data,BestModel)
72 %
73 % % 3) Fit an smodel of a straight line to some data
74 %
75 % % Create a noisy straight-line
76 % datapl = plist('xyfcn', '2.33 + 0.1*x + 0.01*randn(size(x))', 'x', 0:0.1:10, ...
77 % 'xunits', 's', 'yunits', 'm');
78 % data = ao(datapl);
79 % data.setName;
80 %
81 % % Model to fit
82 % mdl = smodel('a + b*x');
83 % mdl.setXvar('x');
84 % mdl.setParams({'a', 'b'}, {1 2});
85 %
86 % % Fit model
87 % fitpl = plist('Function', mdl, 'P0', [1 1]);
88 % params = xfit(data, fitpl)
89 %
90 % % Evaluate model
91 % BestModel = eval(params,plist('xdata',data,'xfield','x'));
92 % BestModel.setName;
93 %
94 % % Display results
95 % iplot(data,BestModel)
96 %
97 % % 4) Fit a chirp-sine firstly starting from an initial guess (quite close
98 % % to the true values) (bad convergency) and secondly by a Monte Carlo
99 % % search (good convergency)
100 %
101 % % Create a noisy chirp-sine
102 % fs = 10;
103 % nsecs = 1000;
104 %
105 % % Model to fit and generate signal
106 % mdl = smodel(plist('name', 'chirp', 'expression', 'A.*sin(2*pi*(f + f0.*t).*t + p)', ...
107 % 'params', {'A','f','f0','p'}, 'xvar', 't', 'xunits', 's', 'yunits', 'm'));
108 %
109 % % signal
110 % s = mdl.setValues({10,1e-4,1e-5,0.3});
111 % s.setXvals(0:1/fs:nsecs-1/fs);
112 % signal = s.eval;
113 % signal.setName;
114 %
115 % % noise
116 % noise = ao(plist('tsfcn', '1*randn(size(t))', 'fs', fs, 'nsecs', nsecs));
117 %
118 % % data
119 % data = signal + noise;
120 % data.setName;
121 %
122 % % Fit model from the starting guess
123 % fitpl_ig = plist('Function', mdl, 'P0',[8,9e-5,9e-6,0]);
124 % params_ig = xfit(data, fitpl_ig);
125 %
126 % % Evaluate model
127 % BestModel_ig = eval(params_ig,plist('xdata',data,'xfield','x'));
128 % BestModel_ig.setName;
129 %
130 % % Display results
131 % iplot(data,BestModel_ig)
132 %
133 % % Fit model by a Monte Carlo search
134 % fitpl_mc = plist('Function', mdl, ...
135 % 'MonteCarlo', 'yes', 'Npoints', 1000, 'LB', [8,9e-5,9e-6,0], 'UB', [11,3e-4,2e-5,2*pi]);
136 % params_mc = xfit(data, fitpl_mc)
137 %
138 % % Evaluate model
139 % BestModel_mc = eval(params_mc,plist('xdata',data,'xfield','x'));
140 % BestModel_mc.setName;
141 %
142 % % Display results
143 % iplot(data,BestModel_mc)
144 %
145 % % 5) Multichannel fit of smodels
146 %
147 % % Ch.1 data
148 % datapl = plist('xyfcn', '0.1*x + 0.01*randn(size(x))', 'x', 0:0.1:10, 'name', 'channel 1', ...
149 % 'xunits', 'K', 'yunits', 'Pa');
150 % a1 = ao(datapl);
151 % % Ch.2 data
152 % datapl = plist('xyfcn', '2.5*x + 0.1*sin(2*pi*x) + 0.01*randn(size(x))', 'x', 0:0.1:10, 'name', 'channel 2', ...
153 % 'xunits', 'K', 'yunits', 'T');
154 % a2 = ao(datapl);
155 %
156 % % Model to fit
157 % mdl1 = smodel('a*x');
158 % mdl1.setXvar('x');
159 % mdl1.setParams({'a'}, {1});
160 % mdl1.setXunits('K');
161 % mdl1.setYunits('Pa');
162 %
163 % mdl2 = smodel('b*x + a*sin(2*pi*x)');
164 % mdl2.setXvar('x');
165 % mdl2.setParams({'a','b'}, {1,2});
166 % mdl2.setXunits('K');
167 % mdl2.setYunits('T');
168 %
169 % % Fit model
170 % params = xfit(a1,a2, plist('Function', [mdl1,mdl2]));
171 %
172 % % evaluate model
173 % b = eval(params, plist('index',1,'xdata',a1,'xfield','x'));
174 % b.setName('fit Ch.1');
175 % r = a1-b;
176 % r.setName('residuals');
177 % iplot(a1,b,r)
178 %
179 % b = eval(params, plist('index',2,'xdata',a2,'xfield','x'));
180 % b.setName('fit Ch.2');
181 % r = a2-b;
182 % r.setName('residuals');
183 % iplot(a2,b,r)
184 %
185 % <a href="matlab:utils.helper.displayMethodInfo('ao', 'xfit')">Parameters Description</a>
186 %
187 % VERSION: $Id: xfit.m,v 1.83 2011/05/12 07:46:29 mauro Exp $
188 %
189 % HISTORY: 17-09-2009 G. Congedo
190 %
191 %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
192
193 function varargout = xfit(varargin)
194
195 % global variables
196 global Pidx Ydata weights modelFuncs dFcns hFcns lb ub Nch Ndata estimator
197
198
199 % Check if this is a call for parameters
200 if utils.helper.isinfocall(varargin{:})
201 varargout{1} = getInfo(varargin{3});
202 return
203 end
204
205 import utils.const.*
206 utils.helper.msg(msg.PROC3, 'running %s/%s', mfilename('class'), mfilename);
207
208 % Collect input variable names
209 in_names = cell(size(varargin));
210 for ii = 1:nargin,in_names{ii} = inputname(ii);end
211
212 % Collect all AOs and plists
213 [as, ao_invars] = utils.helper.collect_objects(varargin(:), 'ao', in_names);
214 pl = utils.helper.collect_objects(varargin(:), 'plist', in_names);
215
216 if nargout == 0
217 error('### xfit cannot be used as a modifier. Please give an output variable.');
218 end
219
220 % combine plists
221 pl = parse(pl, getDefaultPlist());
222
223 % Extract necessary parameters
224 targetFcn = pl.find('Function');
225 P0 = pl.find('P0');
226 % ADDP = pl.find('ADDP');
227 userOpts = pl.find('OPTSET');
228 weights = pl.find('WEIGHTS');
229 FitUnc = pl.find('FitUnc');
230 UncMtd = pl.find('UncMtd');
231 linUnc = pl.find('LinUnc');
232 FastHess = pl.find('FastHess');
233 SymDiff = pl.find('SymDiff');
234 SymGrad = pl.find('SymGrad');
235 SymHess = pl.find('SymHess');
236 DiffOrder = pl.find('DiffOrder');
237 lb = pl.find('LB');
238 ub = pl.find('UB');
239 MCsearch = pl.find('MonteCarlo');
240 Npoints = pl.find('Npoints');
241 Noptims = pl.find('Noptims');
242 % SVDsearch = pl.find('SVD');
243 % nSVD = pl.find('nSVD');
244 Algorithm = pl.find('Algorithm');
245 % AdpScale = pl.find('AdpScale');
246 % only for function handle fitting
247 pnames = pl.find('pnames');
248 % pvalues = pl.find('pvalues');
249 estimator = pl.find('estimator');
250
251 % Convert yes/no, true/false, etc. to booleans
252 FitUnc = utils.prog.yes2true(FitUnc);
253 linUnc = utils.prog.yes2true(linUnc);
254 FastHess = utils.prog.yes2true(FastHess);
255 SymDiff = utils.prog.yes2true(SymDiff);
256 MCsearch = utils.prog.yes2true(MCsearch);
257 % SVDsearch = utils.prog.yes2true(SVDsearch);
258 % AdpScale = utils.prog.yes2true(AdpScale);
259
260 % Check the fitting algorithm
261 % AlgoCheck = ~strcmpi(Algorithm,'fminsearch') & ~strcmpi(Algorithm,'fminunc') & ~isempty(Algorithm);
262
263 if isempty(Algorithm)
264 Algorithm = 'fminsearch';
265 utils.helper.msg(msg.IMPORTANT, 'using %s as the fitting algorithm', upper(Algorithm));
266 elseif ischar(Algorithm)
267 Algorithm = lower(Algorithm);
268 switch Algorithm
269 case 'fminsearch'
270 utils.helper.msg(msg.IMPORTANT, 'using %s as the fitting algorithm', upper(Algorithm));
271 Algorithm = 'fminsearch';
272 case 'fminunc'
273 if exist('fminunc','file')==2
274 utils.helper.msg(msg.IMPORTANT, 'using %s as the fitting algorithm', upper(Algorithm));
275 Algorithm = 'fminunc';
276 else
277 error('### you must install Optimization Toolbox in order to use %s', upper(Algorithm))
278 end
279 case 'fmincon'
280 if exist('fmincon','file')==2
281 utils.helper.msg(msg.IMPORTANT, 'using %s as the fitting algorithm', upper(Algorithm));
282 Algorithm = 'fmincon';
283 else
284 error('### you must install Optimization Toolbox in order to use %s', upper(Algorithm))
285 end
286 case 'patternsearch'
287 if exist('patternsearch','file')==2
288 utils.helper.msg(msg.IMPORTANT, 'using %s as the fitting algorithm', upper(Algorithm));
289 Algorithm = 'patternsearch';
290 else
291 error('### you must install Genetic Algorithm and Direct Search Toolbox in order to use %s', upper(Algorithm))
292 end
293 case 'ga'
294 if exist('ga','file')==2
295 utils.helper.msg(msg.IMPORTANT, 'using %s as the fitting algorithm', upper(Algorithm));
296 Algorithm = 'ga';
297 else
298 error('### you must install Genetic Algorithm and Direct Search Toolbox in order to use %s', upper(Algorithm))
299 end
300 case 'simulannealbnd'
301 if exist('simulannealbnd','file')==2
302 utils.helper.msg(msg.IMPORTANT, 'using %s as the fitting algorithm', upper(Algorithm));
303 Algorithm = 'simulannealbnd';
304 else
305 error('### you must install Genetic Algorithm and Direct Search Toolbox in order to use %s', upper(Algorithm))
306 end
307 otherwise
308 error('### unknown fitting algorithm')
309 end
310 else
311 error('### unknown format for ALGORITHM parameter')
312 end
313
314 % Estimator
315 if isempty(estimator)
316 estimator = 'chi2';
317 elseif ~strcmp(estimator,{'chi2','abs','log','median'})
318 error('### unknown name of the estimator')
319 end
320
321
322 % Data we will fit to
323 Xdata = as.x;
324 Ydata = as.y;
325 % dYdata = as.dy;
326
327 % Number of data point per each channel
328 Ndata = numel(as(1).x);
329
330 % Number of channels
331 Nch = numel(as);
332 multiCh = Nch-1;
333
334 % Number of models
335 if all(isa(targetFcn, 'smodel'))
336 Nmdls = numel(targetFcn);
337 elseif iscell(targetFcn)
338 Nmdls = numel(targetFcn);
339 else
340 Nmdls = 1;
341 end
342
343 % consistency check on the data units
344 Xunits = as.xunits;
345 % Xunits = as(1).xunits.strs;
346 if Nch>1
347 XunitsCheck = Xunits(1).strs;
348 for kk=2:Nch
349 if ~strcmp(Xunits(kk).strs,XunitsCheck)
350 error('### in multi-channel fitting the xunits of all data objects must be the same')
351 end
352 end
353 end
354 Xunits = as(1).xunits;
355 Yunits = as.yunits;
356
357 % consistency check on the inputs
358
359 if isempty(targetFcn)
360 error('### please specify at least a model');
361 end
362
363 if Nch~=Nmdls
364 error('### number of data channels and models do not match')
365 end
366
367 for kk=1:Nch
368 if any(numel(as(kk).x)~=Ndata)
369 error('### the number of data is not self-consistent: please check that all channels have the same length')
370 end
371 end
372
373 for kk=1:Nch
374 for kkk=1:Nch
375 if any(Xdata(:,kk)~=Xdata(:,kkk))
376 error('### in multi-channel fitting the x-field of all data objects must be the same')
377 end
378 end
379 end
380
381 cellMdl = iscell(targetFcn);
382
383 if multiCh
384 if cellMdl
385 for kk=1:Nmdls
386 if ~isa(targetFcn{kk}, 'function_handle')
387 error('### please use a cell array of function handles')
388 end
389 end
390 else
391 for kk=1:Nmdls
392 if ~isa(targetFcn(kk), 'smodel')
393 error('### multi-channel fitting is only possible with smodels')
394 end
395 if isempty(targetFcn(kk).expr)
396 error('### please specify a target function for all smodels');
397 end
398 if ~isempty(targetFcn(kk).values) & size(targetFcn(kk).values)~=size(targetFcn(kk).params)
399 error('### please give an initial value for each parameter')
400 end
401 end
402 if ~isempty(P0)
403 error('in multi-channel fitting the initial values for the parameters must be within each smodel')
404 end
405 checkExistAllParams = 0;
406 for kk=1:Nch
407 checkExistAllParams = checkExistAllParams + ~isempty(targetFcn(kk).values);
408 end
409 if checkExistAllParams==0 && ~MCsearch
410 error('### please give initial values for all parameters or use Monte Carlo instead')
411 end
412 end
413 end
414
415 % check P0
416 if isempty(P0) && ~MCsearch
417 for kk=1:Nmdls
418 if isa(targetFcn(kk), 'smodel') && isempty(targetFcn(kk).values)
419 error('### please give initial values for all parameters or use Monte Carlo instead')
420 elseif ischar(targetFcn)
421 error('### please give initial values for all parameters or use Monte Carlo instead')
422 % elseif cellMdl
423 % error('### please give initial values for all parameters or use Monte Carlo instead')
424 end
425 end
426 end
427
428 % Extract anonymous functions
429
430 if multiCh
431
432 if ~cellMdl
433 % concatenate models and parameters
434 [params,mdl_params,Pidx] = cat_mdls(targetFcn);
435 else%if iscell(P0)
436 [params,mdl_params,Pidx] = cat_mdls_cell(targetFcn, pnames);
437 % else
438 % params = pnames;
439 % Pidx = cell(1,Nch);
440 % mdl_params = pnames;
441 % for ii=1:Nch
442 % Pidx{ii} = ones(1,numel(pnames));
443 % end
444 end
445
446 % create the full initial value array
447 if ~MCsearch && ~cellMdl
448 P0 = zeros(1,numel(params));
449 for ii=1:numel(params)
450 for kk=1:Nmdls
451 for jj=1:numel(targetFcn(kk).params)
452 if strcmp(params{ii},targetFcn(kk).params{jj})
453 P0(ii) = targetFcn(kk).values{jj};
454 end
455 end
456 end
457 end
458 elseif cellMdl
459 if isempty(P0)% || ~iscell(pvalues)
460 error('### please give initial values')
461 end
462 if isempty(pnames) || ~iscell(pnames)
463 error('### please give parameter names in a cell array')
464 end
465 if size(P0)~=size(pnames)
466 error('### the size of pnames and pvalues does not match')
467 end
468 % P0 = zeros(1,numel(params));
469 % for ii=1:numel(P0)
470 % P0(ii) = pvalues{ii};
471 % end
472 if iscell(P0) && ~MCsearch
473 for ii=1:numel(P0)
474 if isempty(P0{ii})
475 error('### please give initial values for all parameters or use Monte Carlo instead');
476 end
477 end
478 for ii=1:numel(params)
479 for kk=1:Nmdls
480 for jj=1:numel(pnames{kk})
481 if strcmp(params{ii},pnames{kk}{jj})
482 P0new(ii) = P0{kk}(jj);
483 end
484 end
485 end
486 end
487 P0 = P0new;
488 end
489 end
490
491 if all(isa(targetFcn,'smodel'))
492 % anonymous fcns
493 modelFuncs = cell(1,Nch);
494 for kk=1:Nch
495 targetFcn(kk).setXvals(Xdata(:,kk));
496 modelFuncs{kk} = targetFcn(kk).fitfunc;
497 end
498 end
499 % if cellMdl
500 % for kk=1:Nch
501 % modelFuncs{kk} = targetFcn{kk};
502 % end
503 % end
504
505 else
506
507 % Check parameters
508 if isempty(P0)
509 if isa(targetFcn, 'smodel')
510 P0 = [targetFcn.values{:}];
511 elseif isempty(P0) && ~MCsearch
512 error('### please give initial values for all parameters or use Monte Carlo instead');
513 end
514 end
515 if size(P0)~=[1 numel(P0)]
516 P0 = P0';
517 end
518
519 % convert input regular expression to anonymous function only for
520 % single-channel fitting
521 % create anonymouse function from user input expression
522 if ischar(targetFcn)
523 checkFcn = regexp(targetFcn, 'Xdata');
524 if isempty(checkFcn)
525 error('### when using a string expression for the input model, the independent variable must be named as Xdata')
526 end
527 % tfunc = eval(['@(P,Xdata,ADDP) (',targetFcn,')']);
528 tfunc = eval(['@(P,Xdata) (',targetFcn,')']);
529 % now create another anonymous function that only depends on the
530 % parameters
531 % modelFunc = @(x)tfunc(x, Xdata, ADDP);
532 modelFuncs{1} = @(x)tfunc(x, Xdata);
533 elseif isa(targetFcn,'smodel')
534 targetFcn.setXvals(Xdata);
535 modelFuncs{1} = targetFcn.fitfunc;
536 % elseif isa(targetFcn,'function_handle')
537 % modelFunc = targetFcn;
538 end
539
540 end
541
542 if cellMdl
543 for kk=1:Nch
544 modelFuncs{kk} = targetFcn{kk};
545 end
546 % if ~multiCh
547 % modelFunc = modelFuncs{1};
548 % end
549 end
550
551
552
553 % check lb and ub
554
555 % constrained search or not
556 conSearch = ~isempty(lb) || ~isempty(ub);
557
558 if MCsearch || conSearch
559 if isempty(lb) || isempty(ub)
560 error('### please give LB and UB')
561 end
562 if multiCh && (~iscell(lb) || ~iscell(ub))
563 error('### in multi-channel fitting upper and lower bounds must be cell array')
564 end
565 if size(lb)~=size(ub) % | size(lb)~=size(P0) | size(ub)~=size(P0)
566 error('### LB and UB must be of the same size');
567 end
568 if multiCh && numel(lb)~=Nch && numel(ub)~=Nch
569 error('### in multi-channel fitting LB and UB must be cell array whose number of elements is equal to the number of models');
570 end
571 if ~multiCh && ~all(lb<=ub)
572 error('### UB must me greater equal to LB');
573 end
574 if multiCh
575 for kk=1:Nmdls
576 if numel(lb{kk})~=numel(mdl_params{kk})
577 error('### please give the proper number of values for LB for each model')
578 end
579 if numel(ub{kk})~=numel(mdl_params{kk})
580 error('### please give the proper number of values for UB for each model')
581 end
582 if ~all(lb{kk}<=ub{kk})
583 error('### UB must me greater equal to LB for all parameters and models');
584 end
585 if size(lb{kk})~=[1 numel(lb{kk})]
586 lb{kk} = lb{kk}';
587 ub{kk} = ub{kk}';
588 end
589 end
590 end
591 if ~multiCh
592 if size(lb)~=[1 numel(lb)]
593 lb = lb';
594 ub = ub';
595 end
596 end
597 end
598
599
600 % create the full bounds array
601 if (MCsearch || conSearch) && multiCh && ~cellMdl
602 lb_full = zeros(1,numel(params));
603 ub_full = zeros(1,numel(params));
604 for ii=1:numel(params)
605 for kk=1:Nmdls
606 for jj=1:numel(targetFcn(kk).params)
607 if strcmp(params{ii},targetFcn(kk).params{jj})
608 lb_full(ii) = lb{kk}(jj);
609 ub_full(ii) = ub{kk}(jj);
610 end
611 end
612 end
613 end
614 lb = lb_full;
615 ub = ub_full;
616 elseif (MCsearch || conSearch) && multiCh && cellMdl
617 lb_full = zeros(1,numel(params));
618 ub_full = zeros(1,numel(params));
619 for ii=1:numel(params)
620 for kk=1:Nmdls
621 for jj=1:numel(mdl_params{kk})
622 if strcmp(params{ii},mdl_params{kk}(jj))
623 lb_full(ii) = lb{kk}(jj);
624 ub_full(ii) = ub{kk}(jj);
625 end
626 end
627 end
628 end
629 lb = lb_full;
630 ub = ub_full;
631 % elseif cellMdl
632 % lb = lb;
633 % ub = ub;
634 end
635
636 % if ~iscell(ADDP)
637 % ADDP = {ADDP};
638 % end
639
640 % Get input options
641 switch Algorithm
642 case 'fminsearch'
643 opts = optimset(@fminsearch);
644 if isstruct(userOpts)
645 opts = optimset(opts, userOpts);
646 else
647 for j=1:2:numel(userOpts)
648 opts = optimset(opts, userOpts{j}, userOpts{j+1});
649 end
650 end
651 case 'fminunc'
652 opts = optimset(@fminunc);
653 if isstruct(userOpts)
654 opts = optimset(opts, userOpts);
655 else
656 for j=1:2:numel(userOpts)
657 opts = optimset(opts, userOpts{j}, userOpts{j+1});
658 end
659 end
660 case 'fmincon'
661 opts = optimset(@fmincon);
662 if isstruct(userOpts)
663 opts = optimset(opts, userOpts);
664 else
665 for j=1:2:numel(userOpts)
666 opts = optimset(opts, userOpts{j}, userOpts{j+1});
667 end
668 end
669 case 'patternsearch'
670 opts = psoptimset(@patternsearch);
671 if isstruct(userOpts)
672 opts = psoptimset(opts, userOpts);
673 else
674 for j=1:2:numel(userOpts)
675 opts = psoptimset(opts, userOpts{j}, userOpts{j+1});
676 end
677 end
678 case 'ga'
679 opts = gaoptimset(@ga);
680 if isstruct(userOpts)
681 opts = gaoptimset(opts, userOpts);
682 else
683 for j=1:2:numel(userOpts)
684 opts = gaoptimset(opts, userOpts{j}, userOpts{j+1});
685 end
686 end
687 case 'simulannealbnd'
688 opts = saoptimset(@simulannealbnd);
689 if isstruct(userOpts)
690 opts = saoptimset(opts, userOpts);
691 else
692 for j=1:2:numel(userOpts)
693 opts = saoptimset(opts, userOpts{j}, userOpts{j+1});
694 end
695 end
696 end
697
698
699 % compute the right weights
700 [weights,unweighted] = find_weights(as,weights);
701
702
703 % define number of free parameters
704 if ~MCsearch
705 Nfreeparams = length(P0);
706 else
707 Nfreeparams = length(lb);
708 end
709
710 if Nch==1
711 Pidx{1}=1:Nfreeparams;
712 % modelFuncs{1}=modelFunc;
713 end
714
715 % Check for user-supplied analytical gradient as cell-array of function
716 % handles
717 if ~isempty(SymGrad)
718 dFcns = cell(Nmdls,1);
719 for kk=1:Nmdls
720 for ii=1:numel(SymGrad{kk})
721 dFcns{kk}{ii} = SymGrad{kk}{ii};
722 end
723 end
724 end
725
726 % Check for user-supplied analytical hessian as cell-array of function
727 % handles
728 if ~isempty(SymGrad) && ~isempty(SymHess)
729 hFcns = cell(Nmdls,1);
730 for kk=1:Nmdls
731 for jj=1:numel(dFcns{kk})
732 for ii=1:jj
733 hFcns{kk}{ii,jj} = SymHess{kk}{ii,jj};
734 end
735 end
736 end
737 end
738
739 % If requested, compute the analytical gradient
740 if SymDiff || linUnc && isempty(SymGrad)
741 utils.helper.msg(msg.IMPORTANT, 'evaluating symbolic derivatives');
742 if ~isa(targetFcn,'smodel')
743 error('### smodel functions must be used in order to do symbolic differentiation')
744 end
745 % compute symbolic 1st-order differentiation
746 dFcnsSmodel = cell(Nmdls,1);
747 for kk=1:Nmdls
748 p = targetFcn(kk).params;
749 for ii=1:numel(p)
750 dFcnsSmodel{kk}(ii) = diff(targetFcn(kk),p{ii});
751 end
752 end
753 % extract anonymous function
754 dFcns = cell(Nmdls,1);
755 for kk=1:Nmdls
756 for ii=1:numel(dFcnsSmodel{kk})
757 dFcns{kk}{ii} = dFcnsSmodel{kk}(ii).fitfunc;
758 end
759 end
760 if DiffOrder==2;
761 % compute symbolic 2nd-order differentiation
762 hFcnsSmodel = cell(Nmdls,1);
763 for kk=1:Nmdls
764 p = targetFcn(kk).params;
765 for jj=1:numel(p)
766 for ii=1:jj
767 hFcnsSmodel{kk}(ii,jj) = diff(dFcnsSmodel{kk}(ii),p{jj});
768 end
769 end
770 end
771 % extract anonymous function
772 hFcns = cell(Nmdls,1);
773 for kk=1:Nmdls
774 for jj=1:numel(dFcnsSmodel{kk})
775 for ii=1:jj
776 hFcns{kk}{ii,jj} = hFcnsSmodel{kk}(ii,jj).fitfunc;
777 end
778 end
779 end
780 end
781 end
782
783 % Set optimset in order to take care of eventual symbolic differentiation
784 if ~isempty(dFcns) && any(strcmp(Algorithm,{'fminunc','fmincon'}))
785 opts = optimset(opts, 'GradObj', 'on');
786 end
787 if ~isempty(hFcns) && any(strcmp(Algorithm,{'fminunc','fmincon'}))
788 opts = optimset(opts, 'Hessian', 'on');
789 end
790
791 % Start the best-fit search
792
793 if MCsearch
794
795 utils.helper.msg(msg.IMPORTANT, 'performing a Monte Carlo search');
796
797 % find best-fit by a Monte Carlo search
798 [P,chi2,exitflag,output,h,MChistory] = ...
799 find_bestfit_montecarlo(lb, ub, Npoints, Noptims, opts, Algorithm, FastHess);
800
801 else
802
803 utils.helper.msg(msg.IMPORTANT, 'looking for a best-fit from the initial guess');
804
805 % find best-fit starting from an initial guess
806 [P,chi2,exitflag,output,h,chainHistory] = ...
807 find_bestfit_guess(P0, opts, Algorithm, FastHess);
808
809 end
810
811
812 % degrees of freedom in the problem
813 % dof = Nch * (Ndata - Nfreeparams);
814 dof = Nch * Ndata - Nfreeparams;
815
816 % redefine MChistory's column to put the reduced chi2s
817 if MCsearch
818 MChistory(:,1) = MChistory(:,1)./dof;
819 end
820
821 % redefine history to put the reduced chi2s
822 if ~MCsearch
823 chainHistory.fval = chainHistory.fval./dof;
824 end
825
826 % Confidence intervals contruction
827
828 if FitUnc
829
830 utils.helper.msg(msg.IMPORTANT, 'estimating confidence intervals');
831
832 % find best-fit errors
833 [se,Sigma,Corr,I,H,errH,J,errJ] = ...
834 find_errors(modelFuncs, P, chi2, dof, UncMtd, FastHess, h, weights, unweighted, linUnc, dFcns);
835
836
837 % report issue on covariance matrix in case of
838 % degeneracy/quasi-singularity/ill-conditioning, etc.
839 posDef = all(diag(Sigma)>=0);
840 if ~posDef
841 % analysis of information matrix in order to cancel out un-important
842 % parameters
843 Inorm=I./norm(I);
844 pnorms = zeros(1,size(Inorm,2));
845 for ii=1:size(I,2);
846 pnorms(ii) = norm(Inorm(:,ii));
847 end
848 [pnorms_sort,IX] = sort(pnorms,'descend');
849 if Nmdls>1
850 pnames_sort = params(IX);
851 elseif isa(targetFcn,'smodel') && Nmdls==1%&& ~isempty(targetFcn.params)
852 params = targetFcn.params;
853 pnames_sort = params(IX);
854 elseif isa(targetFcn,'function_handle') %&& ~isempty(targetFcn.params)
855 pnames_sort = pnames(IX);
856 end
857 utils.helper.msg(msg.IMPORTANT, ['Information matrix is quasi-singular due to degeneracy or ill-conditioning: \n'...
858 'consider eliminating the parameters with low information.']); % ...'In the following, parameters are reported in descending order of information']);
859 for ii=1:numel(pnorms_sort);
860 if exist('pnames_sort','var')
861 utils.helper.msg(msg.IMPORTANT, 'param %s: %g ', pnames_sort{ii}, pnorms_sort(ii));
862 else
863 utils.helper.msg(msg.IMPORTANT, 'param %d: %g ', IX(ii), pnorms_sort(ii));
864 end
865 end
866
867 end
868
869 end
870
871 utils.helper.msg(msg.IMPORTANT, 'final best-fit found at reduced chi2, dof: %g %d ', chi2/dof, dof);
872
873 if FitUnc && ~isempty(se)
874 for kk=1:numel(P)
875 utils.helper.msg(msg.IMPORTANT, 'best-fit param %d: %g +- %2.1g ', kk, P(kk), se(kk));
876 end
877 else
878 for kk=1:numel(P)
879 utils.helper.msg(msg.IMPORTANT, 'best-fit param %d: %g ', kk, P(kk));
880 end
881 end
882
883
884 % check the existence of all variables
885 if ~exist('se','var'); se = []; end
886 if ~exist('Sigma','var'); Sigma = []; end
887 if ~exist('Corr','var'); Corr = []; end
888 if ~exist('I','var'); I = []; end
889 if ~exist('H','var'); H = []; end
890 if ~exist('errH','var'); errH = []; end
891 if ~exist('J','var'); J = []; end
892 if ~exist('errJ','var'); errJ = []; end
893 if ~exist('Sigma','var'); Sigma = []; end
894 if ~exist('MChistory','var'); MChistory = []; end
895 % if ~exist('SVDhistory','var'); SVDhistory = []; end
896 if ~exist('output','var'); output = []; end
897
898
899 % Make output pest
900 out = pest;
901 if Nch>1 %exist('params')
902 out.setNames(params);
903 elseif Nch==1 && isa(targetFcn, 'smodel')
904 out.setNames(targetFcn.params);
905 end
906 out.setY(P');
907 out.setDy(se');
908 out.setCov(Sigma);
909 out.setCorr(Corr);
910 out.setChi2(chi2/dof);
911 out.setDof(dof);
912 if ~MCsearch
913 out.setChain([chainHistory.fval,chainHistory.x]);
914 end
915
916 % add the output best-fit models
917 outFcn = targetFcn;
918 if isa(targetFcn, 'smodel') && Nmdls>1
919 for kk=1:Nmdls
920 p = P(Pidx{kk});
921 outFcn(kk).setValues(p);
922 % outFcn(kk).setXvals(Xdata(:,1));
923 outFcn(kk).setXunits(Xunits);
924 outFcn(kk).setYunits(Yunits(kk));
925 outFcn(kk).setName(['Best-fit model ' num2str(kk)]);
926 end
927 out.setModels(outFcn);
928 elseif isa(targetFcn, 'smodel') && Nmdls==1
929 outFcn.setValues(P');
930 % outFcn.setXvals(Xdata(:,1));
931 outFcn.setXunits(Xunits);
932 outFcn.setYunits(Yunits);
933 outFcn.setName('Best-fit model');
934 out.setModels(outFcn);
935 elseif ischar(targetFcn)
936 % convert regular expression into smodel
937 targetFcn = regexprep(targetFcn, 'Xdata', 'x');
938 for ii=1:Nfreeparams
939 targetFcn = regexprep(targetFcn, ['P\(' num2str(ii) '\)'], ['P' num2str(ii)]);
940 end
941 outFcn = smodel((targetFcn));
942 pnames = cell(1,Nfreeparams);
943 for ii=1:Nfreeparams
944 pnames{ii} = ['P' num2str(ii)];
945 end
946 outFcn.setParams(pnames, P');
947 outFcn.setXvar('x');
948 % outFcn.setXvals(Xdata);
949 outFcn.setXunits(Xunits);
950 outFcn.setYunits(Yunits);
951 outFcn.setName('Best-fit model');
952 out.setModels(outFcn);
953 out.setNames(pnames);
954 end
955
956
957 % Set Name, History and Procinfo
958 if ~cellMdl
959 mdlname = char(targetFcn);
960 if numel(mdlname)>20
961 mdlname = mdlname(1:20);
962 end
963 out.name = sprintf('xfit(%s)', mdlname);
964 else
965 mdlname = func2str(targetFcn{1});
966 for kk=2:Nch
967 mdlname = strcat(mdlname,[',' func2str(targetFcn{kk})]);
968 end
969 out.name = sprintf('xfit(%s)', mdlname);
970 end
971 out.addHistory(getInfo('None'), pl, ao_invars(:), [as(:).hist]);
972 out.procinfo = plist('algorithm', Algorithm, 'exitflag', exitflag, 'output', output, ...
973 'InfoMatrix', I,...
974 'MChistory', MChistory);
975 % 'SVDhistory', SVDhistory, 'res', res, 'hess', H, 'errhess', errH, 'jac', J, 'errjac', errJ);
976
977 % Set outputs
978 if nargout > 0
979 varargout{1} = out;
980 end
981
982 clear global Pidx Ydata weights modelFuncs dFcns hFcns lb ub scale Nch Ndata estimator
983
984 end
985
986 %--------------------------------------------------------------------------
987 % Included Functions
988 %--------------------------------------------------------------------------
989
990 function [chi2,g,H] = fit_chi2(x)
991
992 global Pidx Ydata weights modelFuncs dFcns hFcns lb ub scale estimator
993
994 Nfreeparams = numel(x);
995 Nmdls = numel(modelFuncs);
996 Ndata = numel(Ydata(:,1));
997
998 mdldata = zeros(Ndata,Nmdls);
999 for kk=1:Nmdls
1000 p = x(Pidx{kk}).*scale(Pidx{kk});
1001 mdldata(:,kk) = modelFuncs{kk}(p);
1002 end
1003 res = (mdldata-Ydata).*weights;
1004 if all(x.*scale>=lb & x.*scale<=ub)
1005 % chi2 = res'*res;
1006 % chi2 = sum(diag(chi2));
1007 if strcmp(estimator,'chi2')
1008 chi2 = sum(sum(res.^2));
1009 elseif strcmp(estimator,'abs')
1010 chi2 = sum(sum(abs(res)));
1011 elseif strcmp(estimator,'log')
1012 chi2 = sum(sum(log(1+res.^2/2)));
1013 elseif strcmp(estimator,'median')
1014 dof = Ndata*Nmdls - Nfreeparams;
1015 res = reshape(res,numel(res),1);
1016 chi2 = median(abs(res))*dof;
1017 end
1018 else
1019 chi2 = 10e50;
1020 end
1021
1022 if nargout > 1 % gradient required
1023 % if all(x.*scale>=lb & x.*scale<=ub)
1024 grad = cell(Nmdls,1);
1025 g = zeros(Nmdls,Nfreeparams);
1026 for kk=1:Nmdls
1027 p = x(Pidx{kk}).*scale(Pidx{kk});
1028 Np = numel(p);
1029 grad{kk} = zeros(Ndata,Np);
1030 for ii=1:Np
1031 grad{kk}(:,ii) = dFcns{kk}{ii}(p);
1032 g(kk,Pidx{kk}(ii)) = 2.*res(:,kk)'*grad{kk}(:,ii);
1033 % dF=dFcns{kk}{ii}(p);
1034 % g(kk,Pidx{kk}(ii)) = 2.*sum(res(:,kk)'*dF);
1035 end
1036 end
1037 if Nmdls>1
1038 g = sum(g);
1039 end
1040 % elseif any(x.*scale<lb)
1041 % g = repmat(-10e50,[1 Nfreeparams]);
1042 % elseif any(x.*scale>ub)
1043 % g = repmat(10e50,[1 Nfreeparams]);
1044 % end
1045 end
1046
1047 if nargout > 2 % hessian required
1048 % hess = cell(Nmdls,1);
1049 H = zeros(Nmdls,Nfreeparams,Nfreeparams);
1050 for kk=1:Nmdls
1051 p = x(Pidx{kk}).*scale(Pidx{kk});
1052 Np = numel(p);
1053 % hess{kk} = zeros(Ndata,Np);
1054 for jj=1:Np
1055 for ii=1:jj
1056 hF = hFcns{kk}{ii,jj}(p);
1057 H1 = sum(res(:,kk)'*hF);
1058 H2 = sum(weights(:,kk).*grad{kk}(:,ii).*grad{kk}(:,jj));
1059 H(kk,Pidx{kk}(ii),Pidx{kk}(jj)) = 2*(H1+H2);
1060 end
1061 end
1062 end
1063 H = squeeze(sum(H,1));
1064 % if Nmdls>1
1065 % H = sum(H);
1066 % end
1067 H = H+triu(H,1)';
1068 end
1069
1070 end
1071
1072 %--------------------------------------------------------------------------
1073
1074 % function chi2 = fit_chi2(P, ydata, weights, fcn)
1075 %
1076 % % evaluate model
1077 % mdldata = fcn(P);
1078 % % if ~isequal(size(mdldata), size(ydata))
1079 % % ydata = ydata.';
1080 % % end
1081 %
1082 % chi2 = sum((abs((mdldata-ydata)).^2).*weights);
1083 %
1084 % end
1085
1086 %--------------------------------------------------------------------------
1087
1088 % function chi2 = fit_chi2_bounds(P, ydata, weights, fcn, LB, UB)
1089 %
1090 % % evaluate model
1091 % mdldata = fcn(P);
1092 % % if ~isequal(size(mdldata), size(ydata))
1093 % % ydata = ydata.';
1094 % % end
1095 %
1096 % if all(P>=LB & P<=UB)
1097 % chi2 = sum((abs((mdldata-ydata)).^2).*weights);
1098 % else
1099 % chi2 = 10e50;
1100 % end
1101 %
1102 % end
1103
1104 %--------------------------------------------------------------------------
1105
1106 % function g = scaling(f, x, scale)
1107 %
1108 % g = f(x.*scale);
1109 %
1110 % end
1111
1112 %--------------------------------------------------------------------------
1113
1114 function scale = find_scale(P0, LB, UB)
1115
1116 if ~isempty(P0)
1117 Nfreeparams = numel(P0);
1118 else
1119 Nfreeparams = numel(LB);
1120 end
1121
1122 % define parameter scale
1123 % scale = ones(1,Nfreeparams);
1124
1125 % if ~isempty(LB) & ~isempty(UB)
1126 % for ii=1:Nfreeparams
1127 % if ~(LB(ii)==0 & UB(ii)==0)
1128 % if LB(ii)==0 | UB(ii)==0
1129 % scale(ii) = max(abs(LB(ii)),abs(UB(ii)));
1130 % elseif abs(LB(ii))==abs(UB(ii))
1131 % scale(ii) = abs(UB);
1132 % elseif LB(ii)~=UB(ii)
1133 % scale(ii) = sqrt(abs(LB(ii)) * abs(UB(ii))); % (abs(lb(ii)) + abs(ub(ii)))/2;
1134 % end
1135 % end
1136 % end
1137 % end
1138 if ~isempty(LB) && ~isempty(UB)
1139 scale = sqrt(abs(LB) .* abs(UB));
1140 for ii=1:Nfreeparams
1141 if scale(ii)==0
1142 scale(ii) = max(abs(LB(ii)),abs(UB(ii)));
1143 end
1144 end
1145 else
1146 scale = abs(P0);
1147 for ii=1:Nfreeparams
1148 if scale(ii)==0
1149 scale(ii) = 1;
1150 end
1151 end
1152 end
1153
1154 end
1155
1156 %--------------------------------------------------------------------------
1157
1158 function [weightsOut,unweighted] = find_weights(as,weightsIn)
1159
1160 Ydata = as.y;
1161 dYdata = as.dy;
1162
1163 noWeights = isempty(weightsIn);
1164 noDY = isempty(dYdata);
1165
1166 unweighted = noWeights & noDY;
1167
1168 % check data uncertainties
1169 if any(dYdata==0) && ~noDY
1170 error('### some of the data uncertainties are zero: cannot fit')
1171 end
1172
1173 if length(dYdata)~=length(Ydata) && ~noDY
1174 error('### length of Y fields and dY fields do not match')
1175 end
1176
1177 if noWeights
1178 % The user did not input weights.
1179 % Looking for the dy field of the input data
1180 if noDY
1181 % Really no input from user
1182 weightsOut = ones(size(Ydata));
1183 else
1184 % Uses uncertainties in Ydata to evaluate data weights
1185 weightsOut = 1./dYdata.^2;
1186 end
1187 % elseif isnumeric(weightsIn)
1188 % if size(weightsIn)~=size(Ydata)
1189 % weightsOut = weightsIn';
1190 % end
1191 elseif numel(weightsIn) == 1
1192 if isnumeric(weightsIn)
1193 weightsOut = weightsIn .* ones(size(Ydata));
1194 elseif isa(weightsIn, 'ao')
1195 if weightsIn.yunits == as.yunits
1196 weightsOut = weightsIn.y;
1197 elseif size(weightsIn.data.getY)~=size(Ydata)
1198 error('### size of data ao and weights ao do not match')
1199 else
1200 error('### units for data and uncertainties do not match')
1201 end
1202 else
1203 error('### unknown format for weights parameter');
1204 end
1205 else
1206 weightsOut = weightsIn;
1207 end
1208
1209 end
1210
1211 %--------------------------------------------------------------------------
1212
1213 function [params,mdl_params,paramidx] = cat_mdls(mdls)
1214
1215 % This function concatenates the parameter names and values for
1216 % all the input models to construct the new parameter list.
1217
1218 Nmdls = numel(mdls);
1219
1220 % create the full parameter name array
1221 params_cat = horzcat(mdls(:).params);
1222 params_new = cell(1);
1223 for ii=1:numel(params_cat)
1224 if ~strcmp(params_cat{ii},params_new)
1225 params_new = [params_new params_cat{ii}];
1226 end
1227 end
1228 params = params_new(2:numel(params_new));
1229
1230 % create the parameter list for each model
1231 for kk=1:Nmdls
1232 mdl_params{kk} = mdls(kk).params;
1233 end
1234
1235 % create a cell array of indeces which map the parameter vector of each
1236 % model to the full one
1237 paramidx = cell(1,Nmdls);
1238 for kk=1:Nmdls
1239 for jdx=1:length(mdl_params{kk})
1240 for idx=1:length(params)
1241 if (strcmp(params{idx}, mdl_params{kk}(jdx)))
1242 paramidx{kk}(jdx) = idx;
1243 break;
1244 else
1245 paramidx{kk}(jdx) = 0;
1246 end
1247 end
1248 end
1249 end
1250
1251 end
1252
1253 %--------------------------------------------------------------------------
1254
1255 function [params,mdl_params,paramidx] = cat_mdls_cell(mdls, pnames)
1256
1257 % This function concatenates the parameter names and values for
1258 % all the input models to construct the new parameter list.
1259
1260 Nmdls = numel(mdls);
1261
1262 % create the full parameter name array
1263 params_cat = horzcat(pnames{:});
1264 params_new = cell(1);
1265 for ii=1:numel(params_cat)
1266 if ~strcmp(params_cat{ii},params_new)
1267 params_new = [params_new params_cat{ii}];
1268 end
1269 end
1270 params = params_new(2:numel(params_new));
1271
1272 % create the parameter list for each model
1273 for kk=1:Nmdls
1274 mdl_params{kk} = pnames{kk};
1275 end
1276
1277 % create a cell array of indeces which map the parameter vector of each
1278 % model to the full one
1279 paramidx = cell(1,Nmdls);
1280 for kk=1:Nmdls
1281 for jdx=1:length(mdl_params{kk})
1282 for idx=1:length(params)
1283 if (strcmp(params{idx}, mdl_params{kk}(jdx)))
1284 paramidx{kk}(jdx) = idx;
1285 break;
1286 else
1287 paramidx{kk}(jdx) = 0;
1288 end
1289 end
1290 end
1291 end
1292
1293 end
1294
1295 %--------------------------------------------------------------------------
1296
1297 % function chi2 = fit_chi2_multich(P, Pidx, ydata, weights, fcns)
1298 %
1299 % Nmdls = numel(fcns);
1300 % Ndata = numel(ydata(:,1));
1301 %
1302 % mdldata = zeros(Ndata,Nmdls);
1303 % for kk=1:Nmdls
1304 % p = P(Pidx{kk});
1305 % mdldata(:,kk) = fcns{kk}(p);
1306 % end
1307 %
1308 % res = (ydata-mdldata).*weights;
1309 %
1310 % chi2 = res'*res;
1311 % chi2 = sum(diag(chi2));
1312 %
1313 % end
1314
1315 %--------------------------------------------------------------------------
1316
1317 % function chi2 = fit_chi2_multich_bounds(P, Pidx, ydata, weights, fcns, LB, UB)
1318 %
1319 % if all(P>=LB & P<=UB)
1320 %
1321 % Nmdls = numel(fcns);
1322 % Ndata = numel(ydata(:,1));
1323 %
1324 % mdldata = zeros(Ndata,Nmdls);
1325 % for kk=1:Nmdls
1326 % p = P(Pidx{kk});
1327 % mdldata(:,kk) = fcns{kk}(p);
1328 % end
1329 %
1330 % res = (ydata-mdldata).*weights;
1331 %
1332 % chi2 = res'*res;
1333 % chi2 = sum(diag(chi2));
1334 %
1335 % else
1336 % chi2 = 10e50;
1337 % end
1338 %
1339 % end
1340
1341 %--------------------------------------------------------------------------
1342
1343 function chi2_guesses = montecarlo(func, LB, UB, Npoints)
1344
1345 Nfreeparams = numel(LB);
1346
1347 % construct the guess matrix: each rows contain the chi2 and the
1348 % parameter guess
1349 guesses = zeros(Npoints,Nfreeparams);
1350 for jj=1:Nfreeparams
1351 guesses(:,jj) = LB(jj)+(UB(jj)-LB(jj))*rand(1,Npoints);
1352 end
1353
1354 % evaluate the chi2 at each guess
1355 chi2_guesses = zeros(1,Nfreeparams+1);
1356 for ii=1:Npoints
1357 % check = ii-1/Npoints*100;
1358 % if rem(fix(check),10)==0 && check==fix(check)
1359 %
1360 % disp(sprintf('%d%% done',check));
1361 % end
1362 chi2_guess = func(guesses(ii,:));
1363 chi2_guesses = cat(1,chi2_guesses,[chi2_guess,guesses(ii,:)]);
1364 end
1365 chi2_guesses(1,:) = [];
1366
1367 % sort the guesses for ascending chi2
1368 chi2_guesses = sortrows(chi2_guesses);
1369
1370 end
1371
1372 %--------------------------------------------------------------------------
1373
1374 function [x,fval,exitflag,output,h,MChistory] = ...
1375 find_bestfit_montecarlo(LB, UB, Npoints, Noptims, opts, algorithm, FastHess)
1376
1377 global scale
1378
1379 Nfreeparams = length(LB);
1380
1381 % check Npoints
1382 if isempty(Npoints)
1383 Npoints = 100000;
1384 end
1385 % check Noptims
1386 if isempty(Noptims)
1387 Noptims = 10;
1388 end
1389
1390 if Npoints<Noptims
1391 error('### Npoints must be at least equal to Noptims')
1392 end
1393
1394 % define parameter scale
1395 scale = find_scale([], LB, UB);
1396
1397 % scale function to minimize
1398 % func = @(x)scaling(func, x, scale);
1399
1400 % scale bounds
1401 LB = LB./scale;
1402 UB = UB./scale;
1403
1404 % do a Monte Carlo search
1405 chi2_guesses = montecarlo(@fit_chi2, LB, UB, Npoints);
1406
1407 % minimize over the first guesses
1408 fitresults = zeros(Noptims,Nfreeparams+1);
1409 exitflags = {};
1410 outputs = {};
1411 hs = {};
1412 for ii=1:Noptims
1413 P0 = chi2_guesses(ii,2:Nfreeparams+1);
1414 if strcmpi(algorithm,'fminunc')
1415 [x,fval,exitflag,output,dummy,h] = fminunc(@fit_chi2, P0, opts);
1416 elseif strcmpi(algorithm,'fmincon')
1417 [x,fval,exitflag,output,dummy,dummy,h] = fmincon(@fit_chi2, P0, [], [], [], [], LB, UB, [], opts);
1418 elseif strcmpi(algorithm,'patternsearch')
1419 [x,fval,exitflag,output] = patternsearch(@fit_chi2, P0, [], [], [], [], LB, UB, [], opts);
1420 elseif strcmpi(algorithm,'ga')
1421 [x,fval,exitflag,output] = ga(@fit_chi2, numel(P0), [], [], [], [], LB, UB, [], opts);
1422 elseif strcmpi(algorithm,'simulannealbnd')
1423 [x,fval,exitflag,output] = simulannealbnd(@fit_chi2, P0, LB, UB, opts);
1424 else
1425 [x,fval,exitflag,output] = fminsearch(@fit_chi2, P0, opts);
1426 end
1427 fitresults(ii,1) = fval;
1428 fitresults(ii,2:Nfreeparams+1) = x;
1429 exitflags{ii} = exitflag;
1430 outputs{ii} = output;
1431 if FastHess && ~exist('h','var')
1432 h = fastHessian(@fit_chi2,x,fval);
1433 end
1434 if exist('h','var')
1435 hs{ii} = h;
1436 end
1437 end
1438
1439 % sort the results
1440 [dummy, ix] = sort(fitresults(:,1));
1441 fitresults = fitresults(ix,:);
1442
1443 % % refine fit over the first bestfit
1444 % P0 = fitresults(1,2:Nfreeparams+1);
1445 % if strcmpi(algorithm,'fminunc')
1446 % [x,fval,exitflag,output] = fminunc(func, P0, opts);
1447 % elseif strcmpi(algorithm,'fmincon')
1448 % [x,fval,exitflag,output] = fmincon(func, P0, [], [], [], [], LB, UB, [], opts);
1449 % elseif strcmpi(algorithm,'patternsearch')
1450 % [x,fval,exitflag,output] = patternsearch(func, P0, [], [], [], [], LB, UB, [], opts);
1451 % elseif strcmpi(algorithm,'ga')
1452 % [x,fval,exitflag,output] = ga(func, numel(P0), [], [], [], [], LB, UB, [], opts); % ga does not improve the bestfit
1453 % elseif strcmpi(algorithm,'simulannealbnd')
1454 % [x,fval,exitflag,output] = simulannealbnd(func, P0, LB, UB, opts);
1455 % else
1456 % [x,fval,exitflag,output] = fminsearch(func, P0, opts);
1457 % end
1458
1459 % set the best-fit
1460 fval = fitresults(1,1);
1461 x = fitresults(1,2:Nfreeparams+1);
1462
1463 % scale best-fit
1464 x = x.*scale;
1465
1466 % scale hessian
1467 if exist('hs','var')
1468 for kk=1:numel(hs)
1469 for ii=1:numel(x)
1470 for jj=1:numel(x)
1471 hs{kk}(ii,jj) = hs{kk}(ii,jj)/scale(ii)/scale(jj);
1472 end
1473 end
1474 end
1475 % else
1476 % h = [];
1477 end
1478
1479 % scale fit results
1480 MChistory = fitresults;
1481 for ii=1:size(MChistory,1)
1482 MChistory(ii,2:Nfreeparams+1) = MChistory(ii,2:Nfreeparams+1).*scale;
1483 end
1484
1485 % set the proper exitflag & output
1486 exitflags = exitflags(ix);
1487 outputs = outputs(ix);
1488 if exist('hs','var') && ~(isempty(hs))
1489 hs = hs(ix);
1490 h = hs{1};
1491 else
1492 h = [];
1493 end
1494 exitflag = exitflags{1};
1495 output = outputs{1};
1496
1497 clear global scale
1498
1499 end
1500
1501 %--------------------------------------------------------------------------
1502
1503 function [x,fval,exitflag,output,h,outHistory] = ...
1504 find_bestfit_guess(P0, opts, algorithm, FastHess)
1505
1506 global lb ub scale chainHistory
1507
1508 if isempty(lb)
1509 lb = -Inf;
1510 end
1511 if isempty(ub)
1512 ub = Inf;
1513 end
1514
1515 % define parameter scale
1516 scale = find_scale(P0, [], []);
1517
1518 % scale initial guess
1519 P0 = P0./scale;
1520
1521 % initialize history
1522 chainHistory.x = [];
1523 chainHistory.fval = [];
1524 opts=optimset(opts,'outputfcn',@outfun);
1525
1526 % minimize over the initial guess
1527 if strcmpi(algorithm,'fminunc')
1528 [x,fval,exitflag,output,dummy,h] = fminunc(@fit_chi2, P0, opts);
1529 elseif strcmpi(algorithm,'fmincon')
1530 [x,fval,exitflag,output,dummy,dummy,h] = fmincon(@fit_chi2, P0, [], [], [], [], lb, ub, [], opts);
1531 elseif strcmpi(algorithm,'patternsearch')
1532 [x,fval,exitflag,output] = patternsearch(@fit_chi2, P0, [], [], [], [], lb, ub, [], opts);
1533 elseif strcmpi(algorithm,'ga')
1534 [x,fval,exitflag,output] = ga(@fit_chi2, numel(P0), [], [], [], [], lb, ub, [], opts);
1535 elseif strcmpi(algorithm,'simulannealbnd')
1536 [x,fval,exitflag,output] = simulannealbnd(@fit_chi2, P0, lb, ub, opts);
1537 else
1538 [x,fval,exitflag,output] = fminsearch(@fit_chi2, P0, opts);
1539 end
1540
1541 if FastHess && ~exist('h','var')
1542 h = fastHessian(@fit_chi2,x,fval);
1543 end
1544
1545 % scale best-fit
1546 x = x.*scale;
1547
1548 % scale hessian
1549 if exist('h','var')
1550 for ii=1:numel(x)
1551 for jj=1:numel(x)
1552 h(ii,jj) = h(ii,jj)/scale(ii)/scale(jj);
1553 end
1554 end
1555 else
1556 h = [];
1557 end
1558
1559 outHistory = chainHistory;
1560 outHistory.x = chainHistory.x.*repmat(scale,size(chainHistory.x,1),1);
1561
1562 clear global lb ub scale chainHistory
1563
1564 end
1565
1566 %--------------------------------------------------------------------------
1567
1568 function stop = outfun(x,optimvalues,state)
1569
1570 global chainHistory
1571
1572 stop = false;
1573 if strcmp(state,'iter')
1574 chainHistory.fval = [chainHistory.fval; optimvalues.fval];
1575 chainHistory.x = [chainHistory.x; x];
1576 end
1577
1578 end
1579
1580 %--------------------------------------------------------------------------
1581
1582 function hess = fastHessian(fun,x0,fx0)
1583 % fastHessian calculates the numerical Hessian of fun evaluated at x
1584 % using finite differences.
1585
1586 nVars = numel(x0);
1587
1588 hess = zeros(nVars);
1589
1590 % Define stepsize
1591 stepSize = eps^(1/4)*sign(x0).*max(abs(x0),1);
1592
1593 % Min e Max change
1594 DiffMinChange = 1e-008;
1595 DiffMaxChange = 0.1;
1596
1597 % Make sure step size lies within DiffMinChange and DiffMaxChange
1598 stepSize = sign(stepSize+eps).*min(max(abs(stepSize),DiffMinChange),DiffMaxChange);
1599 % Calculate the upper triangle of the finite difference Hessian element
1600 % by element, using only function values. The forward difference formula
1601 % we use is
1602 %
1603 % Hessian(i,j) = 1/(h(i)*h(j)) * [f(x+h(i)*ei+h(j)*ej) - f(x+h(i)*ei)
1604 % - f(x+h(j)*ej) + f(x)] (2)
1605 %
1606 % The 3rd term in (2) is common within each column of Hessian and thus
1607 % can be reused. We first calculate that term for each column and store
1608 % it in the row vector fplus_array.
1609 fplus_array = zeros(1,nVars);
1610 for j = 1:nVars
1611 xplus = x0;
1612 xplus(j) = x0(j) + stepSize(j);
1613 % evaluate
1614 fplus = fun(xplus);
1615 fplus_array(j) = fplus;
1616 end
1617
1618 for i = 1:nVars
1619 % For each row, calculate the 2nd term in (4). This term is common to
1620 % the whole row and thus it can be reused within the current row: we
1621 % store it in fplus_i.
1622 xplus = x0;
1623 xplus(i) = x0(i) + stepSize(i);
1624 % evaluate
1625 fplus_i = fun(xplus);
1626
1627 for j = i:nVars % start from i: only upper triangle
1628 % Calculate the 1st term in (2); this term is unique for each element
1629 % of Hessian and thus it cannot be reused.
1630 xplus = x0;
1631 xplus(i) = x0(i) + stepSize(i);
1632 xplus(j) = xplus(j) + stepSize(j);
1633 % evaluate
1634 fplus = fun(xplus);
1635
1636 hess(i,j) = (fplus - fplus_i - fplus_array(j) + fx0)/(stepSize(i)*stepSize(j));
1637 end
1638 end
1639
1640 % Fill in the lower triangle of the Hessian
1641 hess = hess + triu(hess,1)';
1642
1643 end
1644
1645 %--------------------------------------------------------------------------
1646
1647 % function [x,fval,exitflag,output,h,SVDhistory] = ...
1648 % find_bestfit_guess_SVD(P0, lb, ub, opts, algorithm, FastHess, nSVD)
1649 %
1650 % if isempty(lb)
1651 % lb = -Inf;
1652 % end
1653 % if isempty(ub)
1654 % ub = Inf;
1655 % end
1656 %
1657 % % define parameter scale
1658 % scale = find_scale(P0, [], []);
1659 %
1660 % % % scale function to minimize
1661 % % func = @(x)scaling(func, x, scale);
1662 %
1663 % % scale initial guess
1664 % P0 = P0./scale;
1665 %
1666 % % scale bounds
1667 % if ~isempty(lb)
1668 % lb = lb./scale;
1669 % end
1670 % if ~isempty(ub)
1671 % ub = ub./scale;
1672 % end
1673 %
1674 % % set the guess for 0-iteration
1675 % x = P0;
1676 %
1677 % Nfreeparams = numel(P0);
1678 %
1679 % fitresults = zeros(nSVD,Nfreeparams+1);
1680 % exitflags = {};
1681 % outputs = {};
1682 % hs = {};
1683 %
1684 % % start SVD loop
1685 % for ii=1:nSVD
1686 %
1687 % % minimize over the old parameter space
1688 % if strcmpi(algorithm,'fminunc')
1689 % [x,fval,exitflag,output,grad,h] = fminunc(@fit_chi2, x, opts);
1690 % elseif strcmpi(algorithm,'fmincon')
1691 % [x,fval,exitflag,output,lambda,grad,h] = fmincon(@fit_chi2, x, [], [], [], [], lb, ub, [], opts);
1692 % elseif strcmpi(algorithm,'patternsearch')
1693 % [x,fval,exitflag,output] = patternsearch(@fit_chi2, P0, [], [], [], [], lb, ub, [], opts);
1694 % elseif strcmpi(algorithm,'ga')
1695 % [x,fval,exitflag,output] = ga(@fit_chi2, numel(P0), [], [], [], [], lb, ub, [], opts);
1696 % elseif strcmpi(algorithm,'simulannealbnd')
1697 % [x,fval,exitflag,output] = simulannealbnd(@fit_chi2, P0, lb, ub, opts);
1698 % else
1699 % [x,fval,exitflag,output] = fminsearch(@fit_chi2, P0, opts);
1700 % end
1701 %
1702 % if FastHess && ~exist('h','var')
1703 % h = fastHessian(func,x,fval);
1704 % end
1705 %
1706 % % compute Jacobian
1707 % J = zeros(Nch*Ndata,Np);
1708 % for kk=1:Nch
1709 % for ll=1:Np
1710 % J(((ii-1)*Ndata+1):ii*Ndata,ll) = dFcns{kk}{ll}(x.*scale);
1711 % end
1712 % end
1713 %
1714 % % take SVD decomposition of hessian
1715 % [U,S,V] = svd(J);
1716 %
1717 % % cancel out column with very low information
1718 % thresh = 100*eps;
1719 % idx = (diag(S)./norm(diag(S)))<=thresh;
1720 % pos = find(idx~=0);
1721 %
1722 % % reduced matrix
1723 % Sred = S;
1724 % Ured = U;
1725 % Vred = V;
1726 % Sred(:,pos) = [];
1727 % Sred(pos,:) = [];
1728 % Ured(:,pos) = [];
1729 % Ured(pos,:) = [];
1730 % Vred(:,pos) = [];
1731 % Vred(pos,:) = [];
1732 %
1733 %
1734 % % start from here inserting norm-rescaling... also in funcSVD!
1735 %
1736 % % guess in the new parameter space
1737 % % pay attention because here x is row-vector and xSVD ic column-vector
1738 % xSVD = U'*x';
1739 % xSVD(pos) = [];
1740 %
1741 % % % bounds in the new parameter space
1742 % % if ~isempty(lb) && ~isempty(ub)
1743 % % lbSVD = U'*lb';
1744 % % ubSVD = U'*ub';
1745 % % end
1746 %
1747 % % do the change in the variables
1748 % funcSVD = @(xSVD)func_SVD(func,xSVD,U,pos);
1749 %
1750 % % minimize over the new parameter space
1751 % if strcmpi(algorithm,'fminunc')
1752 % [xSVD,fvalSVD,exitflag,output,grad,hSVD] = fminunc(funcSVD, xSVD, opts);
1753 % elseif strcmpi(algorithm,'fmincon')
1754 % [xSVD,fvalSVD,exitflag,output,lambda,grad,hSVD] = fmincon(funcSVD, xSVD, [], [], [], [], [], [], [], opts);
1755 % elseif strcmpi(algorithm,'patternsearch')
1756 % [xSVD,fvalSVD,exitflag,output] = patternsearch(funcSVD, xSVD, [], [], [], [], [], [], [], opts);
1757 % elseif strcmpi(algorithm,'ga')
1758 % [xSVD,fvalSVD,exitflag,output] = ga(funcSVD, numel(xSVD), [], [], [], [], [], [], [], opts);
1759 % elseif strcmpi(algorithm,'simulannealbnd')
1760 % [xSVD,fvalSVD,exitflag,output] = simulannealbnd(funcSVD, xSVD, [], [], opts);
1761 % else
1762 % [xSVD,fvalSVD,exitflag,output] = fminsearch(funcSVD, xSVD, opts);
1763 % end
1764 %
1765 % if FastHess && ~exist('hSVD','var')
1766 % hSVD = fastHessian(funcSVD,xSVD,fvalSVD);
1767 % end
1768 %
1769 % % if fvalSVD<fval
1770 % % % take the new
1771 % % x = (U'*xSVD)';
1772 % % fval = fvalSVD;
1773 % % % trasformare la nuova h nella vecchia
1774 % % h = U'*hSVD;
1775 % % else
1776 % % % take the old
1777 % % end
1778 %
1779 % % return to the full parameter vector
1780 % xSVDfull = zeros(numel(xSVD)+numel(pos),1);
1781 % setVals = setdiff(1:numel(xSVDfull),pos);
1782 % xSVDfull(setVals) = xSVD;
1783 % % x = xb;
1784 %
1785 % % back to the old parameter space
1786 % x = (U*xSVDfull)';
1787 % fval = fvalSVD;
1788 % hRed = Ured*hSVD*Vred';
1789 % h = hRed;
1790 % for kk=1:numel(pos)
1791 % h(pos(kk),:) = zeros(1,size(h,2));
1792 % h(:,pos(kk)) = zeros(size(h,1),1);
1793 % end
1794 %
1795 % fitresults(ii,1) = fval;
1796 % fitresults(ii,2:Nfreeparams+1) = x;
1797 % exitflags{ii} = exitflag;
1798 % outputs{ii} = output;
1799 % hs{ii} = h;
1800 %
1801 % end
1802 %
1803 % % sort the results
1804 % [chi2s, ix] = sort(fitresults(:,1));
1805 % fitresults = fitresults(ix,:);
1806 %
1807 % % set the best-fit
1808 % fval = fitresults(1,1);
1809 % x = fitresults(1,2:Nfreeparams+1);
1810 %
1811 % % scale best-fit
1812 % x = x.*scale;
1813 %
1814 % % scale hessian
1815 % for ii=1:numel(x)
1816 % for jj=1:numel(x)
1817 % h(ii,jj) = h(ii,jj)/scale(ii)/scale(jj);
1818 % end
1819 % end
1820 %
1821 % % scale fit results
1822 % SVDhistory = fitresults;
1823 % for ii=1:size(SVDhistory,1)
1824 % SVDhistory(ii,2:Nfreeparams+1) = SVDhistory(ii,2:Nfreeparams+1).*scale;
1825 % end
1826 %
1827 % % set the proper exitflag & output
1828 % exitflags = exitflags(ix);
1829 % outputs = outputs(ix);
1830 % hs = hs(ix);
1831 % exitflag = exitflags{1};
1832 % output = outputs{1};
1833 % h = hs{1};
1834 %
1835 % end
1836
1837 %--------------------------------------------------------------------------
1838
1839 % function z = func_SVD(func,y,U,pos)
1840 %
1841 % yFull = zeros(numel(y)+numel(pos),1);
1842 % setVals = setdiff(1:numel(yFull),pos);
1843 % yFull(setVals) = y;
1844 %
1845 % x = (U*yFull)';
1846 %
1847 % z = func(x);
1848 %
1849 % end
1850
1851 %--------------------------------------------------------------------------
1852
1853 % function adaptive_scaling(P0, scaleIn, func, Niter, opts, algorithm)
1854 %
1855 % Nfreeparams = numel(P0);
1856 %
1857 % for ii=1:Niter
1858 %
1859 % % new guess
1860 % P0_new = P0;
1861 % % new scale
1862 % scale = find_scale(P0, [], []);
1863 %
1864 % % scale bounds
1865 % LB = LB./scale;
1866 % UB = UB./scale;
1867 %
1868 % % if ~multiCh
1869 % % rescale model
1870 % modelFunc = @(x)scaling(modelFunc, x, scale_new./scale);
1871 %
1872 % % define function to minimize
1873 % if isempty(lb) & isempty(ub)
1874 % func = @(x)fit_chi2(x, Ydata, weights, modelFunc);
1875 % else
1876 % lb = lb.*scale./scale_new;
1877 % ub = ub.*scale./scale_new;
1878 % func = @(x)fit_chi2_bounds(x, Ydata, weights, modelFunc, lb, ub);
1879 % end
1880 %
1881 % % else
1882 % % % func = @(x)fit_chi2_multich(params, x, Ydata, mdl_params, modelFuncs);
1883 % % func = @(x)scaling(func, x, scale_new/scale);
1884 % % end
1885 %
1886 % % Make new fit
1887 % [x,fval_new,exitflag,output] = fminsearch(func, P0_new./scale_new, opts);
1888 % % stopping criterion
1889 % if abs(fval_new-fval) <= 3*eps(fval_new-fval)
1890 % fval = fval_new;
1891 % scale = scale_new;
1892 % break
1893 % end
1894 % fval = fval_new;
1895 % scale = scale_new;
1896 % end
1897 % end
1898 %
1899 % end
1900
1901 %--------------------------------------------------------------------------
1902
1903 function [se,Sigma,Corr,I,H,errH,J,errJ] = find_errors(modelFcn, x, fval, dof, UncMtd, FastHess, h, weights, unweighted, linUnc, dFcns)
1904
1905 global scale Nch Ndata lb ub
1906
1907 % set infinite bounds
1908 lb = repmat(-Inf,size(x));
1909 ub = repmat(Inf,size(x));
1910
1911 % check on UncMtd and FastHess
1912 if strcmpi(UncMtd,'jacobian')
1913 FastHess = 0;
1914 elseif FastHess==1 && isempty(h)
1915 FastHess = 0;
1916 end
1917
1918 Nfreeparams = numel(x);
1919
1920 if ~FastHess
1921
1922 % find new scale based on the best-fit found
1923 scale = find_scale(x, [], []);
1924
1925 else
1926
1927 scale = ones(1,Nfreeparams);
1928
1929 end
1930
1931 % scale best-fit
1932 x = x./scale;
1933
1934
1935 % standard deviation of the residuals
1936 sdr = sqrt(fval/dof);
1937
1938 % compute information matrix from linear approximation: error propagation
1939 % from symbolical gradient
1940 if linUnc
1941 % compute Jacobian
1942 J = zeros(Nch*Ndata,Nfreeparams);
1943 for ii=1:Nch
1944 for ll=1:Nfreeparams
1945 J(((ii-1)*Ndata+1):ii*Ndata,ll) = dFcns{ii}{ll}(x.*scale);
1946 end
1947 end
1948 elseif ~strcmpi(UncMtd,'jacobian') || numel(modelFcn)~=1 && ~linUnc
1949
1950 % Hessian method.
1951 % Brief description.
1952 % Here I use the most general method: Hessian of chi2 function. For further readings see
1953 % Numerical Recipes. Note that for inv, H should be non-singular.
1954 % Badly-scaled matrices may take to singularities: use Cholesky
1955 % factorization instead.
1956 % In practice, a singular Hessian matrix may go out for the following main reasons:
1957 % 1 - you have reached a false minimum (H should be strictly
1958 % positive-definite for a well-behaved minimum);
1959 % 2 - strong correlation between some parameters;
1960 % 3 - independency of the model on some parameters.
1961 % In the last two cases you should reduce the dimensionality of the
1962 % problem.
1963 % One last point discussed here in Trento is that the Hessian method reduces
1964 % to the Jacobian, when considering linear models.
1965 % Feedbacks are welcome.
1966 % Trento, Giuseppe Congedo
1967
1968 if ~FastHess
1969 % scale function to find the hessian
1970 % func = @(x)scaling(func, x, scale);
1971
1972 [H,errH] = hessian(@fit_chi2,x);
1973
1974 elseif ~isempty(h) && FastHess % here the hessian is not scaled
1975 H = h;
1976 end
1977
1978 if unweighted
1979 % information matrix based on sdr
1980 I = H / sdr^2 / 2;
1981 else
1982 % information matrix based on canonical form
1983 I = H./2;
1984 end
1985
1986 elseif ~linUnc
1987
1988 % % scale function to find the jacobian
1989 % modelFcn = @(x)scaling(modelFcn, x, scale);
1990
1991 % Jacobian method
1992 [J,errJ] = jacobianest(modelFcn{1},x);
1993 end
1994
1995 if exist('J','var')
1996 if unweighted
1997 % information matrix based on sdr
1998 I = J'*J ./ sdr^2;
1999 else
2000 % redefine the jacobian to take care of the weights
2001 if size(weights)~=size(J(:,1))
2002 weights = weights';
2003 end
2004 for ii=1:size(J,2)
2005 J(:,ii) = sqrt(weights).*J(:,ii);
2006 end
2007 % information matrix based on jacobian
2008 I = J'*J;
2009 end
2010
2011 end
2012
2013 % check positive-definiteness
2014 % [R,p] = chol(I);
2015 % posDef = p==0;
2016
2017 % Covariance matrix from inverse
2018 Sigma = inv(I);
2019
2020 % % Covariance matrix from pseudo-inverse
2021 % if issparse(I)
2022 % % [U,S,V] = svds(I,Nfreeparams);
2023 % % I = full(I);
2024 % I = full(I);
2025 % [U,S,V] = svd(I);
2026 % else
2027 % [U,S,V] = svd(I);
2028 % end
2029 % Sigma = V/S*U';
2030
2031 % Does the covariance give proper positive-definite variances?
2032 posDef = all(diag(Sigma)>=0);
2033
2034 if posDef
2035
2036 % Compute correlation matrix
2037 Corr = zeros(size(Sigma));
2038 for ii=1:size(Sigma,1)
2039 for jj=1:size(Sigma,2)
2040 Corr(ii,jj) = Sigma(ii,jj) / sqrt(Sigma(ii,ii)) / sqrt(Sigma(jj,jj));
2041 end
2042 end
2043
2044 % Parameter standard errors
2045 se = sqrt(diag(Sigma))';
2046
2047 % rescale errors and covariance matrix
2048 if ~FastHess && ~linUnc % otherwise H is already scaled in find_bestfit function
2049 se = se.*scale;
2050 for ii=1:Nfreeparams
2051 for jj=1:Nfreeparams
2052 Sigma(ii,jj) = Sigma(ii,jj)*scale(ii)*scale(jj);
2053 end
2054 end
2055 end
2056 end
2057
2058 % rescale information matrix
2059 if ~FastHess && ~linUnc % otherwise H is already scaled in find_bestfit function
2060 for ii=1:Nfreeparams
2061 for jj=1:Nfreeparams
2062 I(ii,jj) = I(ii,jj)/scale(ii)/scale(jj);
2063 end
2064 end
2065 end
2066
2067 if ~exist('se','var'); se = []; end
2068 if ~exist('Sigma','var'); Sigma = []; end
2069 if ~exist('Corr','var'); Corr = []; end
2070 if ~exist('I','var'); I = []; end
2071 if ~exist('H','var'); H = []; end
2072 if ~exist('errH','var'); errH = []; end
2073 if ~exist('J','var'); J = []; end
2074 if ~exist('errJ','var'); errJ = []; end
2075
2076 clear global scale
2077
2078 end
2079
2080 %--------------------------------------------------------------------------
2081 % Get Info Object
2082 %--------------------------------------------------------------------------
2083 function ii = getInfo(varargin)
2084 if nargin == 1 && strcmpi(varargin{1}, 'None')
2085 sets = {};
2086 pl = [];
2087 else
2088 sets = {'Default'};
2089 pl = getDefaultPlist;
2090 end
2091 % Build info object
2092 ii = minfo(mfilename, 'ao', 'ltpda', utils.const.categories.sigproc, '$Id: xfit.m,v 1.83 2011/05/12 07:46:29 mauro Exp $', sets, pl);
2093 ii.setModifier(false);
2094 end
2095
2096 %--------------------------------------------------------------------------
2097 % Get Default Plist
2098 %--------------------------------------------------------------------------
2099 function plout = getDefaultPlist()
2100 persistent pl;
2101 if exist('pl', 'var')==0 || isempty(pl)
2102 pl = buildplist();
2103 end
2104 plout = pl;
2105 end
2106
2107 function pl = buildplist()
2108
2109 pl = plist();
2110
2111 % Function
2112 p = param({'Function', 'The function (or symbolic model <tt>smodel</tt>) of Xdata that you want to fit. For example: ''P(1)*Xdata''.'}, paramValue.EMPTY_STRING);
2113 pl.append(p);
2114
2115 % Weights
2116 p = param({'Weights', ['An array of weights, one value per X sample.<br>'...
2117 'By default, <tt>xfit</tt> takes data uncertainties from the dy field of the input object.'...
2118 'If provided, <tt>weights</tt> make <tt>xfit</tt> ignore these.'...
2119 'Otherwise <tt>weights</tt> will be considered as ones.']}, paramValue.EMPTY_STRING);
2120 pl.append(p);
2121
2122 % P0
2123 p = param({'P0', ['An array of starting guesses for the parameters.<br>'...
2124 'This is not necessary if you fit with <tt>smodel</tt>s; in that case the parameter'...
2125 'values from the <tt>smodel</tt> are taken as the initial guess.']}, paramValue.EMPTY_DOUBLE);
2126 pl.append(p);
2127
2128 % % ADDP
2129 % p = param({'ADDP', 'A cell-array of additional parameters to pass to the function. These will not be fit.'}, {1, {'{}'}, paramValue.OPTIONAL});
2130 % pl.append(p);
2131
2132 % LB
2133 p = param({'LB', ['Lower bounds for the parameters.<br>'...
2134 'This improves the convergency. Mandatory for Monte Carlo.<br>'...
2135 'For multichannel fitting it has to be a cell-array.']}, paramValue.EMPTY_DOUBLE);
2136 pl.append(p);
2137
2138 % UB
2139 p = param({'UB', ['Upper bounds for the parameters.<br>'...
2140 'This improves the convergency. Mandatory for Monte Carlo.<br>'...
2141 'For multichannel fitting it has to be a cell-array.']}, paramValue.EMPTY_DOUBLE);
2142 pl.append(p);
2143
2144 % Algorithm
2145 p = param({'ALGORITHM', ['A string defining the fitting algorithm.<br>'...
2146 '<tt>fminunc</tt>, <tt>fmincon</tt> require ''Optimization Toolbox'' to be installed.<br>'...
2147 '<tt>patternsearch</tt>, <tt>ga</tt>, <tt>simulannealbnd</tt> require ''Genetic Algorithm and Direct Search'' to be installed.<br>']}, ...
2148 {1, {'fminsearch', 'fminunc', 'fmincon', 'patternsearch', 'ga', 'simulannealbnd'}, paramValue.SINGLE});
2149 pl.append(p);
2150
2151 % OPTSET
2152 p = param({'OPTSET', ['An optimisation structure to pass to the fitting algorithm.<br>'...
2153 'See <tt>fminsearch</tt>, <tt>fminunc</tt>, <tt>fmincon</tt>, <tt>optimset</tt>, for details.<br>'...
2154 'See <tt>patternsearch</tt>, <tt>psoptimset</tt>, for details.<br>'...
2155 'See <tt>ga</tt>, <tt>gaoptimset</tt>, for details.<br>'...
2156 'See <tt>simulannealbnd</tt>, <tt>saoptimset</tt>, for details.']}, paramValue.EMPTY_STRING);
2157 pl.append(p);
2158
2159 % FitUnc
2160 p = param({'FitUnc', 'Fit parameter uncertainties or not.'}, paramValue.YES_NO);
2161 pl.append(p);
2162
2163 % UncMtd
2164 p = param({'UncMtd', ['Choose the uncertainties estimation method.<br>'...
2165 'For multi-channel fitting <tt>hessian</tt> is mandatory.']}, {1, {'hessian', 'jacobian'}, paramValue.SINGLE});
2166 pl.append(p);
2167
2168 % FastHess
2169 p = param({'FastHess', ['Choose whether or not the hessian should be estimated from a fast-forward finite differences algorithm; '...
2170 'use this method to achieve better performance in CPU-time, but accuracy is not ensured; '...
2171 'otherwise, the default (computer expensive, but more accurate) method will be used.']}, paramValue.NO_YES);
2172 pl.append(p);
2173
2174 % MonteCarlo
2175 p = param({'MonteCarlo', ['Do a Monte Carlo search in the parameter space.<br>'...
2176 'Useful when dealing with high multiplicity of local minima. May be computer-expensive.<br>'...
2177 'Note that, if used, P0 will be ignored. It also requires to define LB and UB.']}, paramValue.NO_YES);
2178 pl.append(p);
2179
2180 % Npoints
2181 p = param({'Npoints', 'Set the number of points in the parameter space to be extracted.'}, 100000);
2182 pl.append(p);
2183
2184 % Noptims
2185 p = param({'Noptims', 'Set the number of optimizations to be performed after the Monte Carlo.'}, 10);
2186 pl.append(p);
2187
2188 % estimator
2189 p = param({'estimator', ['Choose the robust local M-estimate as the statistical estimator of the fit:<br>'...
2190 ' ''chi2'' (least square) for data with normal distribution;<br>'...
2191 ' ''abs'' (mean absolute deviation) for data with double exponential distribution;<br>'...
2192 ' ''log'' (log least square) for data with Lorentzian distribution.']}, ...
2193 {1, {'chi2', 'abs', 'log'}, paramValue.SINGLE});
2194 pl.append(p);
2195
2196
2197 end
2198 % END
2199
2200 %--------------------------------------------------------------------------
2201
2202 % Author: John D'Errico
2203 % e-mail: woodchips@rochester.rr.com
2204
2205 %--------------------------------------------------------------------------
2206 % DERIVEST SUITE
2207 %--------------------------------------------------------------------------
2208
2209 function [der,errest,finaldelta] = derivest(fun,x0,varargin)
2210 % DERIVEST: estimate the n'th derivative of fun at x0, provide an error estimate
2211 % usage: [der,errest] = DERIVEST(fun,x0) % first derivative
2212 % usage: [der,errest] = DERIVEST(fun,x0,prop1,val1,prop2,val2,...)
2213 %
2214 % Derivest will perform numerical differentiation of an
2215 % analytical function provided in fun. It will not
2216 % differentiate a function provided as data. Use gradient
2217 % for that purpose, or differentiate a spline model.
2218 %
2219 % The methods used by DERIVEST are finite difference
2220 % approximations of various orders, coupled with a generalized
2221 % (multiple term) Romberg extrapolation. This also yields
2222 % the error estimate provided. DERIVEST uses a semi-adaptive
2223 % scheme to provide the best estimate that it can by its
2224 % automatic choice of a differencing interval.
2225 %
2226 % Finally, While I have not written this function for the
2227 % absolute maximum speed, speed was a major consideration
2228 % in the algorithmic design. Maximum accuracy was my main goal.
2229 %
2230 %
2231 % Arguments (input)
2232 % fun - function to differentiate. May be an inline function,
2233 % anonymous, or an m-file. fun will be sampled at a set
2234 % of distinct points for each element of x0. If there are
2235 % additional parameters to be passed into fun, then use of
2236 % an anonymous function is recommended.
2237 %
2238 % fun should be vectorized to allow evaluation at multiple
2239 % locations at once. This will provide the best possible
2240 % speed. IF fun is not so vectorized, then you MUST set
2241 % 'vectorized' property to 'no', so that derivest will
2242 % then call your function sequentially instead.
2243 %
2244 % Fun is assumed to return a result of the same
2245 % shape as its input x0.
2246 %
2247 % x0 - scalar, vector, or array of points at which to
2248 % differentiate fun.
2249 %
2250 % Additional inputs must be in the form of property/value pairs.
2251 % Properties are character strings. They may be shortened
2252 % to the extent that they are unambiguous. Properties are
2253 % not case sensitive. Valid property names are:
2254 %
2255 % 'DerivativeOrder', 'MethodOrder', 'Style', 'RombergTerms'
2256 % 'FixedStep', 'MaxStep'
2257 %
2258 % All properties have default values, chosen as intelligently
2259 % as I could manage. Values that are character strings may
2260 % also be unambiguously shortened. The legal values for each
2261 % property are:
2262 %
2263 % 'DerivativeOrder' - specifies the derivative order estimated.
2264 % Must be a positive integer from the set [1,2,3,4].
2265 %
2266 % DEFAULT: 1 (first derivative of fun)
2267 %
2268 % 'MethodOrder' - specifies the order of the basic method
2269 % used for the estimation.
2270 %
2271 % For 'central' methods, must be a positive integer
2272 % from the set [2,4].
2273 %
2274 % For 'forward' or 'backward' difference methods,
2275 % must be a positive integer from the set [1,2,3,4].
2276 %
2277 % DEFAULT: 4 (a second order method)
2278 %
2279 % Note: higher order methods will generally be more
2280 % accurate, but may also suffere more from numerical
2281 % problems.
2282 %
2283 % Note: First order methods would usually not be
2284 % recommended.
2285 %
2286 % 'Style' - specifies the style of the basic method
2287 % used for the estimation. 'central', 'forward',
2288 % or 'backwards' difference methods are used.
2289 %
2290 % Must be one of 'Central', 'forward', 'backward'.
2291 %
2292 % DEFAULT: 'Central'
2293 %
2294 % Note: Central difference methods are usually the
2295 % most accurate, but sometiems one must not allow
2296 % evaluation in one direction or the other.
2297 %
2298 % 'RombergTerms' - Allows the user to specify the generalized
2299 % Romberg extrapolation method used, or turn it off
2300 % completely.
2301 %
2302 % Must be a positive integer from the set [0,1,2,3].
2303 %
2304 % DEFAULT: 2 (Two Romberg terms)
2305 %
2306 % Note: 0 disables the Romberg step completely.
2307 %
2308 % 'FixedStep' - Allows the specification of a fixed step
2309 % size, preventing the adaptive logic from working.
2310 % This will be considerably faster, but not necessarily
2311 % as accurate as allowing the adaptive logic to run.
2312 %
2313 % DEFAULT: []
2314 %
2315 % Note: If specified, 'FixedStep' will define the
2316 % maximum excursion from x0 that will be used.
2317 %
2318 % 'Vectorized' - Derivest will normally assume that your
2319 % function can be safely evaluated at multiple locations
2320 % in a single call. This would minimize the overhead of
2321 % a loop and additional function call overhead. Some
2322 % functions are not easily vectorizable, but you may
2323 % (if your matlab release is new enough) be able to use
2324 % arrayfun to accomplish the vectorization.
2325 %
2326 % When all else fails, set the 'vectorized' property
2327 % to 'no'. This will cause derivest to loop over the
2328 % successive function calls.
2329 %
2330 % DEFAULT: 'yes'
2331 %
2332 %
2333 % 'MaxStep' - Specifies the maximum excursion from x0 that
2334 % will be allowed, as a multiple of x0.
2335 %
2336 % DEFAULT: 100
2337 %
2338 % 'StepRatio' - Derivest uses a proportionally cascaded
2339 % series of function evaluations, moving away from your
2340 % point of evaluation. The StepRatio is the ratio used
2341 % between sequential steps.
2342 %
2343 % DEFAULT: 2
2344 %
2345 %
2346 % See the document DERIVEST.pdf for more explanation of the
2347 % algorithms behind the parameters of DERIVEST. In most cases,
2348 % I have chosen good values for these parameters, so the user
2349 % should never need to specify anything other than possibly
2350 % the DerivativeOrder. I've also tried to make my code robust
2351 % enough that it will not need much. But complete flexibility
2352 % is in there for your use.
2353 %
2354 %
2355 % Arguments: (output)
2356 % der - derivative estimate for each element of x0
2357 % der will have the same shape as x0.
2358 %
2359 % errest - 95% uncertainty estimate of the derivative, such that
2360 %
2361 % abs(der(j) - f'(x0(j))) < erest(j)
2362 %
2363 % finaldelta - The final overall stepsize chosen by DERIVEST
2364 %
2365 %
2366 % Example usage:
2367 % First derivative of exp(x), at x == 1
2368 % [d,e]=derivest(@(x) exp(x),1)
2369 % d =
2370 % 2.71828182845904
2371 %
2372 % e =
2373 % 1.02015503167879e-14
2374 %
2375 % True derivative
2376 % exp(1)
2377 % ans =
2378 % 2.71828182845905
2379 %
2380 % Example usage:
2381 % Third derivative of x.^3+x.^4, at x = [0,1]
2382 % derivest(@(x) x.^3 + x.^4,[0 1],'deriv',3)
2383 % ans =
2384 % 6 30
2385 %
2386 % True derivatives: [6,30]
2387 %
2388 %
2389 % See also: gradient
2390 %
2391 %
2392 % Author: John D'Errico
2393 % e-mail: woodchips@rochester.rr.com
2394 % Release: 1.0
2395 % Release date: 12/27/2006
2396
2397 par.DerivativeOrder = 1;
2398 par.MethodOrder = 4;
2399 par.Style = 'central';
2400 par.RombergTerms = 2;
2401 par.FixedStep = [];
2402 par.MaxStep = 100;
2403 par.StepRatio = 2;
2404 par.NominalStep = [];
2405 par.Vectorized = 'yes';
2406
2407 na = length(varargin);
2408 if (rem(na,2)==1)
2409 error 'Property/value pairs must come as PAIRS of arguments.'
2410 elseif na>0
2411 par = parse_pv_pairs(par,varargin);
2412 end
2413 par = check_params(par);
2414
2415 % Was fun a string, or an inline/anonymous function?
2416 if (nargin<1)
2417 help derivest
2418 return
2419 elseif isempty(fun)
2420 error 'fun was not supplied.'
2421 elseif ischar(fun)
2422 % a character function name
2423 fun = str2func(fun);
2424 end
2425
2426 % no default for x0
2427 if (nargin<2) || isempty(x0)
2428 error 'x0 was not supplied'
2429 end
2430 par.NominalStep = max(x0,0.02);
2431
2432 % was a single point supplied?
2433 nx0 = size(x0);
2434 n = prod(nx0);
2435
2436 % Set the steps to use.
2437 if isempty(par.FixedStep)
2438 % Basic sequence of steps, relative to a stepsize of 1.
2439 delta = par.MaxStep*par.StepRatio .^(0:-1:-25)';
2440 ndel = length(delta);
2441 else
2442 % Fixed, user supplied absolute sequence of steps.
2443 ndel = 3 + ceil(par.DerivativeOrder/2) + ...
2444 par.MethodOrder + par.RombergTerms;
2445 if par.Style(1) == 'c'
2446 ndel = ndel - 2;
2447 end
2448 delta = par.FixedStep*par.StepRatio .^(-(0:(ndel-1)))';
2449 end
2450
2451 % generate finite differencing rule in advance.
2452 % The rule is for a nominal unit step size, and will
2453 % be scaled later to reflect the local step size.
2454 fdarule = 1;
2455 switch par.Style
2456 case 'central'
2457 % for central rules, we will reduce the load by an
2458 % even or odd transformation as appropriate.
2459 if par.MethodOrder==2
2460 switch par.DerivativeOrder
2461 case 1
2462 % the odd transformation did all the work
2463 fdarule = 1;
2464 case 2
2465 % the even transformation did all the work
2466 fdarule = 2;
2467 case 3
2468 % the odd transformation did most of the work, but
2469 % we need to kill off the linear term
2470 fdarule = [0 1]/fdamat(par.StepRatio,1,2);
2471 case 4
2472 % the even transformation did most of the work, but
2473 % we need to kill off the quadratic term
2474 fdarule = [0 1]/fdamat(par.StepRatio,2,2);
2475 end
2476 else
2477 % a 4th order method. We've already ruled out the 1st
2478 % order methods since these are central rules.
2479 switch par.DerivativeOrder
2480 case 1
2481 % the odd transformation did most of the work, but
2482 % we need to kill off the cubic term
2483 fdarule = [1 0]/fdamat(par.StepRatio,1,2);
2484 case 2
2485 % the even transformation did most of the work, but
2486 % we need to kill off the quartic term
2487 fdarule = [1 0]/fdamat(par.StepRatio,2,2);
2488 case 3
2489 % the odd transformation did much of the work, but
2490 % we need to kill off the linear & quintic terms
2491 fdarule = [0 1 0]/fdamat(par.StepRatio,1,3);
2492 case 4
2493 % the even transformation did much of the work, but
2494 % we need to kill off the quadratic and 6th order terms
2495 fdarule = [0 1 0]/fdamat(par.StepRatio,2,3);
2496 end
2497 end
2498 case {'forward' 'backward'}
2499 % These two cases are identical, except at the very end,
2500 % where a sign will be introduced.
2501
2502 % No odd/even trans, but we already dropped
2503 % off the constant term
2504 if par.MethodOrder==1
2505 if par.DerivativeOrder==1
2506 % an easy one
2507 fdarule = 1;
2508 else
2509 % 2:4
2510 v = zeros(1,par.DerivativeOrder);
2511 v(par.DerivativeOrder) = 1;
2512 fdarule = v/fdamat(par.StepRatio,0,par.DerivativeOrder);
2513 end
2514 else
2515 % par.MethodOrder methods drop off the lower order terms,
2516 % plus terms directly above DerivativeOrder
2517 v = zeros(1,par.DerivativeOrder + par.MethodOrder - 1);
2518 v(par.DerivativeOrder) = 1;
2519 fdarule = v/fdamat(par.StepRatio,0,par.DerivativeOrder+par.MethodOrder-1);
2520 end
2521
2522 % correct sign for the 'backward' rule
2523 if par.Style(1) == 'b'
2524 fdarule = -fdarule;
2525 end
2526
2527 end % switch on par.style (generating fdarule)
2528 nfda = length(fdarule);
2529
2530 % will we need fun(x0)?
2531 if (rem(par.DerivativeOrder,2) == 0) || ~strncmpi(par.Style,'central',7)
2532 if strcmpi(par.Vectorized,'yes')
2533 f_x0 = fun(x0);
2534 else
2535 % not vectorized, so loop
2536 f_x0 = zeros(size(x0));
2537 for j = 1:numel(x0)
2538 f_x0(j) = fun(x0(j));
2539 end
2540 end
2541 else
2542 f_x0 = [];
2543 end
2544
2545 % Loop over the elements of x0, reducing it to
2546 % a scalar problem. Sorry, vectorization is not
2547 % complete here, but this IS only a single loop.
2548 der = zeros(nx0);
2549 errest = der;
2550 finaldelta = der;
2551 for i = 1:n
2552 x0i = x0(i);
2553 h = par.NominalStep(i);
2554
2555 % a central, forward or backwards differencing rule?
2556 % f_del is the set of all the function evaluations we
2557 % will generate. For a central rule, it will have the
2558 % even or odd transformation built in.
2559 if par.Style(1) == 'c'
2560 % A central rule, so we will need to evaluate
2561 % symmetrically around x0i.
2562 if strcmpi(par.Vectorized,'yes')
2563 f_plusdel = fun(x0i+h*delta);
2564 f_minusdel = fun(x0i-h*delta);
2565 else
2566 % not vectorized, so loop
2567 f_minusdel = zeros(size(delta));
2568 f_plusdel = zeros(size(delta));
2569 for j = 1:numel(delta)
2570 f_plusdel(j) = fun(x0i+h*delta(j));
2571 f_minusdel(j) = fun(x0i-h*delta(j));
2572 end
2573 end
2574
2575 if ismember(par.DerivativeOrder,[1 3])
2576 % odd transformation
2577 f_del = (f_plusdel - f_minusdel)/2;
2578 else
2579 f_del = (f_plusdel + f_minusdel)/2 - f_x0(i);
2580 end
2581 elseif par.Style(1) == 'f'
2582 % forward rule
2583 % drop off the constant only
2584 if strcmpi(par.Vectorized,'yes')
2585 f_del = fun(x0i+h*delta) - f_x0(i);
2586 else
2587 % not vectorized, so loop
2588 f_del = zeros(size(delta));
2589 for j = 1:numel(delta)
2590 f_del(j) = fun(x0i+h*delta(j)) - f_x0(i);
2591 end
2592 end
2593 else
2594 % backward rule
2595 % drop off the constant only
2596 if strcmpi(par.Vectorized,'yes')
2597 f_del = fun(x0i-h*delta) - f_x0(i);
2598 else
2599 % not vectorized, so loop
2600 f_del = zeros(size(delta));
2601 for j = 1:numel(delta)
2602 f_del(j) = fun(x0i-h*delta(j)) - f_x0(i);
2603 end
2604 end
2605 end
2606
2607 % check the size of f_del to ensure it was properly vectorized.
2608 f_del = f_del(:);
2609 if length(f_del)~=ndel
2610 error 'fun did not return the correct size result (fun must be vectorized)'
2611 end
2612
2613 % Apply the finite difference rule at each delta, scaling
2614 % as appropriate for delta and the requested DerivativeOrder.
2615 % First, decide how many of these estimates we will end up with.
2616 ne = ndel + 1 - nfda - par.RombergTerms;
2617
2618 % Form the initial derivative estimates from the chosen
2619 % finite difference method.
2620 der_init = vec2mat(f_del,ne,nfda)*fdarule.';
2621
2622 % scale to reflect the local delta
2623 der_init = der_init(:)./(h*delta(1:ne)).^par.DerivativeOrder;
2624
2625 % Each approximation that results is an approximation
2626 % of order par.DerivativeOrder to the desired derivative.
2627 % Additional (higher order, even or odd) terms in the
2628 % Taylor series also remain. Use a generalized (multi-term)
2629 % Romberg extrapolation to improve these estimates.
2630 switch par.Style
2631 case 'central'
2632 rombexpon = 2*(1:par.RombergTerms) + par.MethodOrder - 2;
2633 otherwise
2634 rombexpon = (1:par.RombergTerms) + par.MethodOrder - 1;
2635 end
2636 [der_romb,errors] = rombextrap(par.StepRatio,der_init,rombexpon);
2637
2638 % Choose which result to return
2639
2640 % first, trim off the
2641 if isempty(par.FixedStep)
2642 % trim off the estimates at each end of the scale
2643 nest = length(der_romb);
2644 switch par.DerivativeOrder
2645 case {1 2}
2646 trim = [1 2 nest-1 nest];
2647 case 3
2648 trim = [1:4 nest+(-3:0)];
2649 case 4
2650 trim = [1:6 nest+(-5:0)];
2651 end
2652
2653 [der_romb,tags] = sort(der_romb);
2654
2655 der_romb(trim) = [];
2656 tags(trim) = [];
2657 errors = errors(tags);
2658 trimdelta = delta(tags);
2659
2660 [errest(i),ind] = min(errors);
2661
2662 finaldelta(i) = h*trimdelta(ind);
2663 der(i) = der_romb(ind);
2664 else
2665 [errest(i),ind] = min(errors);
2666 finaldelta(i) = h*delta(ind);
2667 der(i) = der_romb(ind);
2668 end
2669 end
2670
2671 end % mainline end
2672
2673
2674 function [jac,err] = jacobianest(fun,x0)
2675 % gradest: estimate of the Jacobian matrix of a vector valued function of n variables
2676 % usage: [jac,err] = jacobianest(fun,x0)
2677 %
2678 %
2679 % arguments: (input)
2680 % fun - (vector valued) analytical function to differentiate.
2681 % fun must be a function of the vector or array x0.
2682 %
2683 % x0 - vector location at which to differentiate fun
2684 % If x0 is an nxm array, then fun is assumed to be
2685 % a function of n*m variables.
2686 %
2687 %
2688 % arguments: (output)
2689 % jac - array of first partial derivatives of fun.
2690 % Assuming that x0 is a vector of length p
2691 % and fun returns a vector of length n, then
2692 % jac will be an array of size (n,p)
2693 %
2694 % err - vector of error estimates corresponding to
2695 % each partial derivative in jac.
2696 %
2697 %
2698 % Example: (nonlinear least squares)
2699 % xdata = (0:.1:1)';
2700 % ydata = 1+2*exp(0.75*xdata);
2701 % fun = @(c) ((c(1)+c(2)*exp(c(3)*xdata)) - ydata).^2;
2702 %
2703 % [jac,err] = jacobianest(fun,[1 1 1])
2704 %
2705 % jac =
2706 % -2 -2 0
2707 % -2.1012 -2.3222 -0.23222
2708 % -2.2045 -2.6926 -0.53852
2709 % -2.3096 -3.1176 -0.93528
2710 % -2.4158 -3.6039 -1.4416
2711 % -2.5225 -4.1589 -2.0795
2712 % -2.629 -4.7904 -2.8742
2713 % -2.7343 -5.5063 -3.8544
2714 % -2.8374 -6.3147 -5.0518
2715 % -2.9369 -7.2237 -6.5013
2716 % -3.0314 -8.2403 -8.2403
2717 %
2718 % err =
2719 % 5.0134e-15 5.0134e-15 0
2720 % 5.0134e-15 0 2.8211e-14
2721 % 5.0134e-15 8.6834e-15 1.5804e-14
2722 % 0 7.09e-15 3.8227e-13
2723 % 5.0134e-15 5.0134e-15 7.5201e-15
2724 % 5.0134e-15 1.0027e-14 2.9233e-14
2725 % 5.0134e-15 0 6.0585e-13
2726 % 5.0134e-15 1.0027e-14 7.2673e-13
2727 % 5.0134e-15 1.0027e-14 3.0495e-13
2728 % 5.0134e-15 1.0027e-14 3.1707e-14
2729 % 5.0134e-15 2.0053e-14 1.4013e-12
2730 %
2731 % (At [1 2 0.75], jac should be numerically zero)
2732 %
2733 %
2734 % See also: derivest, gradient, gradest
2735 %
2736 %
2737 % Author: John D'Errico
2738 % e-mail: woodchips@rochester.rr.com
2739 % Release: 1.0
2740 % Release date: 3/6/2007
2741
2742 % get the length of x0 for the size of jac
2743 nx = numel(x0);
2744
2745 MaxStep = 100;
2746 StepRatio = 2;
2747
2748 % was a string supplied?
2749 if ischar(fun)
2750 fun = str2func(fun);
2751 end
2752
2753 % get fun at the center point
2754 f0 = fun(x0);
2755 f0 = f0(:);
2756 n = length(f0);
2757 if n==0
2758 % empty begets empty
2759 jac = zeros(0,nx);
2760 err = jac;
2761 return
2762 end
2763
2764 relativedelta = MaxStep*StepRatio .^(0:-1:-25);
2765 nsteps = length(relativedelta);
2766
2767 % total number of derivatives we will need to take
2768 jac = zeros(n,nx);
2769 err = jac;
2770 for i = 1:nx
2771 x0_i = x0(i);
2772 if x0_i ~= 0
2773 delta = x0_i*relativedelta;
2774 else
2775 delta = relativedelta;
2776 end
2777
2778 % evaluate at each step, centered around x0_i
2779 % difference to give a second order estimate
2780 fdel = zeros(n,nsteps);
2781 for j = 1:nsteps
2782 fdif = fun(swapelement(x0,i,x0_i + delta(j))) - ...
2783 fun(swapelement(x0,i,x0_i - delta(j)));
2784
2785 fdel(:,j) = fdif(:);
2786 end
2787
2788 % these are pure second order estimates of the
2789 % first derivative, for each trial delta.
2790 derest = fdel.*repmat(0.5 ./ delta,n,1);
2791
2792 % The error term on these estimates has a second order
2793 % component, but also some 4th and 6th order terms in it.
2794 % Use Romberg exrapolation to improve the estimates to
2795 % 6th order, as well as to provide the error estimate.
2796
2797 % loop here, as rombextrap coupled with the trimming
2798 % will get complicated otherwise.
2799 for j = 1:n
2800 [der_romb,errest] = rombextrap(StepRatio,derest(j,:),[2 4]);
2801
2802 % trim off 3 estimates at each end of the scale
2803 nest = length(der_romb);
2804 trim = [1:3, nest+(-2:0)];
2805 [der_romb,tags] = sort(der_romb);
2806 der_romb(trim) = [];
2807 tags(trim) = [];
2808
2809 errest = errest(tags);
2810
2811 % now pick the estimate with the lowest predicted error
2812 [err(j,i),ind] = min(errest);
2813 jac(j,i) = der_romb(ind);
2814 end
2815 end
2816
2817 end % mainline function end
2818
2819
2820 function [grad,err,finaldelta] = gradest(fun,x0)
2821 % gradest: estimate of the gradient vector of an analytical function of n variables
2822 % usage: [grad,err,finaldelta] = gradest(fun,x0)
2823 %
2824 % Uses derivest to provide both derivative estimates
2825 % and error estimates. fun needs not be vectorized.
2826 %
2827 % arguments: (input)
2828 % fun - analytical function to differentiate. fun must
2829 % be a function of the vector or array x0.
2830 %
2831 % x0 - vector location at which to differentiate fun
2832 % If x0 is an nxm array, then fun is assumed to be
2833 % a function of n*m variables.
2834 %
2835 % arguments: (output)
2836 % grad - vector of first partial derivatives of fun.
2837 % grad will be a row vector of length numel(x0).
2838 %
2839 % err - vector of error estimates corresponding to
2840 % each partial derivative in grad.
2841 %
2842 % finaldelta - vector of final step sizes chosen for
2843 % each partial derivative.
2844 %
2845 %
2846 % Example:
2847 % [grad,err] = gradest(@(x) sum(x.^2),[1 2 3])
2848 % grad =
2849 % 2 4 6
2850 % err =
2851 % 5.8899e-15 1.178e-14 0
2852 %
2853 %
2854 % Example:
2855 % At [x,y] = [1,1], compute the numerical gradient
2856 % of the function sin(x-y) + y*exp(x)
2857 %
2858 % z = @(xy) sin(diff(xy)) + xy(2)*exp(xy(1))
2859 %
2860 % [grad,err ] = gradest(z,[1 1])
2861 % grad =
2862 % 1.7183 3.7183
2863 % err =
2864 % 7.537e-14 1.1846e-13
2865 %
2866 %
2867 % Example:
2868 % At the global minimizer (1,1) of the Rosenbrock function,
2869 % compute the gradient. It should be essentially zero.
2870 %
2871 % rosen = @(x) (1-x(1)).^2 + 105*(x(2)-x(1).^2).^2;
2872 % [g,err] = gradest(rosen,[1 1])
2873 % g =
2874 % 1.0843e-20 0
2875 % err =
2876 % 1.9075e-18 0
2877 %
2878 %
2879 % See also: derivest, gradient
2880 %
2881 %
2882 % Author: John D'Errico
2883 % e-mail: woodchips@rochester.rr.com
2884 % Release: 1.0
2885 % Release date: 2/9/2007
2886
2887 % get the size of x0 so we can reshape
2888 % later.
2889 sx = size(x0);
2890
2891 % total number of derivatives we will need to take
2892 nx = numel(x0);
2893
2894 grad = zeros(1,nx);
2895 err = grad;
2896 finaldelta = grad;
2897 for ind = 1:nx
2898 [grad(ind),err(ind),finaldelta(ind)] = derivest( ...
2899 @(xi) fun(swapelement(x0,ind,xi)), ...
2900 x0(ind),'deriv',1,'vectorized','no', ...
2901 'methodorder',2);
2902 end
2903
2904 end % mainline function end
2905
2906
2907 function [HD,err,finaldelta] = hessdiag(fun,x0)
2908 % HESSDIAG: diagonal elements of the Hessian matrix (vector of second partials)
2909 % usage: [HD,err,finaldelta] = hessdiag(fun,x0)
2910 %
2911 % When all that you want are the diagonal elements of the hessian
2912 % matrix, it will be more efficient to call HESSDIAG than HESSIAN.
2913 % HESSDIAG uses DERIVEST to provide both second derivative estimates
2914 % and error estimates. fun needs not be vectorized.
2915 %
2916 % arguments: (input)
2917 % fun - SCALAR analytical function to differentiate.
2918 % fun must be a function of the vector or array x0.
2919 %
2920 % x0 - vector location at which to differentiate fun
2921 % If x0 is an nxm array, then fun is assumed to be
2922 % a function of n*m variables.
2923 %
2924 % arguments: (output)
2925 % HD - vector of second partial derivatives of fun.
2926 % These are the diagonal elements of the Hessian
2927 % matrix, evaluated at x0.
2928 % HD will be a row vector of length numel(x0).
2929 %
2930 % err - vector of error estimates corresponding to
2931 % each second partial derivative in HD.
2932 %
2933 % finaldelta - vector of final step sizes chosen for
2934 % each second partial derivative.
2935 %
2936 %
2937 % Example usage:
2938 % [HD,err] = hessdiag(@(x) x(1) + x(2)^2 + x(3)^3,[1 2 3])
2939 % HD =
2940 % 0 2 18
2941 %
2942 % err =
2943 % 0 0 0
2944 %
2945 %
2946 % See also: derivest, gradient, gradest
2947 %
2948 %
2949 % Author: John D'Errico
2950 % e-mail: woodchips@rochester.rr.com
2951 % Release: 1.0
2952 % Release date: 2/9/2007
2953
2954 % get the size of x0 so we can reshape
2955 % later.
2956 sx = size(x0);
2957
2958 % total number of derivatives we will need to take
2959 nx = numel(x0);
2960
2961 HD = zeros(1,nx);
2962 err = HD;
2963 finaldelta = HD;
2964 for ind = 1:nx
2965 [HD(ind),err(ind),finaldelta(ind)] = derivest( ...
2966 @(xi) fun(swapelement(x0,ind,xi)), ...
2967 x0(ind),'deriv',2,'vectorized','no');
2968 end
2969
2970 end % mainline function end
2971
2972
2973 function [hess,err] = hessian(fun,x0)
2974 % hessian: estimate elements of the Hessian matrix (array of 2nd partials)
2975 % usage: [hess,err] = hessian(fun,x0)
2976 %
2977 % Hessian is NOT a tool for frequent use on an expensive
2978 % to evaluate objective function, especially in a large
2979 % number of dimensions. Its computation will use roughly
2980 % O(6*n^2) function evaluations for n parameters.
2981 %
2982 % arguments: (input)
2983 % fun - SCALAR analytical function to differentiate.
2984 % fun must be a function of the vector or array x0.
2985 % fun does not need to be vectorized.
2986 %
2987 % x0 - vector location at which to compute the Hessian.
2988 %
2989 % arguments: (output)
2990 % hess - nxn symmetric array of second partial derivatives
2991 % of fun, evaluated at x0.
2992 %
2993 % err - nxn array of error estimates corresponding to
2994 % each second partial derivative in hess.
2995 %
2996 %
2997 % Example usage:
2998 % Rosenbrock function, minimized at [1,1]
2999 % rosen = @(x) (1-x(1)).^2 + 105*(x(2)-x(1).^2).^2;
3000 %
3001 % [h,err] = hessian(rosen,[1 1])
3002 % h =
3003 % 842 -420
3004 % -420 210
3005 % err =
3006 % 1.0662e-12 4.0061e-10
3007 % 4.0061e-10 2.6654e-13
3008 %
3009 %
3010 % Example usage:
3011 % cos(x-y), at (0,0)
3012 % Note: this hessian matrix will be positive semi-definite
3013 %
3014 % hessian(@(xy) cos(xy(1)-xy(2)),[0 0])
3015 % ans =
3016 % -1 1
3017 % 1 -1
3018 %
3019 %
3020 % See also: derivest, gradient, gradest, hessdiag
3021 %
3022 %
3023 % Author: John D'Errico
3024 % e-mail: woodchips@rochester.rr.com
3025 % Release: 1.0
3026 % Release date: 2/10/2007
3027
3028 % parameters that we might allow to change
3029 params.StepRatio = 2;
3030 params.RombergTerms = 3;
3031
3032 % get the size of x0 so we can reshape
3033 % later.
3034 sx = size(x0);
3035
3036 % was a string supplied?
3037 if ischar(fun)
3038 fun = str2func(fun);
3039 end
3040
3041 % total number of derivatives we will need to take
3042 nx = length(x0);
3043
3044 % get the diagonal elements of the hessian (2nd partial
3045 % derivatives wrt each variable.)
3046 [hess,err] = hessdiag(fun,x0);
3047
3048 % form the eventual hessian matrix, stuffing only
3049 % the diagonals for now.
3050 hess = diag(hess);
3051 err = diag(err);
3052 if nx<2
3053 % the hessian matrix is 1x1. all done
3054 return
3055 end
3056
3057 % get the gradient vector. This is done only to decide
3058 % on intelligent step sizes for the mixed partials
3059 [grad,graderr,stepsize] = gradest(fun,x0);
3060
3061 % Get params.RombergTerms+1 estimates of the upper
3062 % triangle of the hessian matrix
3063 dfac = params.StepRatio.^(-(0:params.RombergTerms)');
3064 for i = 2:nx
3065 for j = 1:(i-1)
3066 dij = zeros(params.RombergTerms+1,1);
3067 for k = 1:(params.RombergTerms+1)
3068 dij(k) = fun(x0 + swap2(zeros(sx),i, ...
3069 dfac(k)*stepsize(i),j,dfac(k)*stepsize(j))) + ...
3070 fun(x0 + swap2(zeros(sx),i, ...
3071 -dfac(k)*stepsize(i),j,-dfac(k)*stepsize(j))) - ...
3072 fun(x0 + swap2(zeros(sx),i, ...
3073 dfac(k)*stepsize(i),j,-dfac(k)*stepsize(j))) - ...
3074 fun(x0 + swap2(zeros(sx),i, ...
3075 -dfac(k)*stepsize(i),j,dfac(k)*stepsize(j)));
3076
3077 end
3078 dij = dij/4/prod(stepsize([i,j]));
3079 dij = dij./(dfac.^2);
3080
3081 % Romberg extrapolation step
3082 [hess(i,j),err(i,j)] = rombextrap(params.StepRatio,dij,[2 4]);
3083 hess(j,i) = hess(i,j);
3084 err(j,i) = err(i,j);
3085 end
3086 end
3087
3088
3089 end % mainline function end
3090
3091
3092
3093 % ============================================
3094 % subfunction - romberg extrapolation
3095 % ============================================
3096 function [der_romb,errest] = rombextrap(StepRatio,der_init,rombexpon)
3097 % do romberg extrapolation for each estimate
3098 %
3099 % StepRatio - Ratio decrease in step
3100 % der_init - initial derivative estimates
3101 % rombexpon - higher order terms to cancel using the romberg step
3102 %
3103 % der_romb - derivative estimates returned
3104 % errest - error estimates
3105 % amp - noise amplification factor due to the romberg step
3106
3107 srinv = 1/StepRatio;
3108
3109 % do nothing if no romberg terms
3110 nexpon = length(rombexpon);
3111 rmat = ones(nexpon+2,nexpon+1);
3112 switch nexpon
3113 case 0
3114 % rmat is simple: ones(2,1)
3115 case 1
3116 % only one romberg term
3117 rmat(2,2) = srinv^rombexpon;
3118 rmat(3,2) = srinv^(2*rombexpon);
3119 case 2
3120 % two romberg terms
3121 rmat(2,2:3) = srinv.^rombexpon;
3122 rmat(3,2:3) = srinv.^(2*rombexpon);
3123 rmat(4,2:3) = srinv.^(3*rombexpon);
3124 case 3
3125 % three romberg terms
3126 rmat(2,2:4) = srinv.^rombexpon;
3127 rmat(3,2:4) = srinv.^(2*rombexpon);
3128 rmat(4,2:4) = srinv.^(3*rombexpon);
3129 rmat(5,2:4) = srinv.^(4*rombexpon);
3130 end
3131
3132 % qr factorization used for the extrapolation as well
3133 % as the uncertainty estimates
3134 [qromb,rromb] = qr(rmat,0);
3135
3136 % the noise amplification is further amplified by the Romberg step.
3137 % amp = cond(rromb);
3138
3139 % this does the extrapolation to a zero step size.
3140 ne = length(der_init);
3141 rhs = vec2mat(der_init,nexpon+2,max(1,ne - (nexpon+2)));
3142 rombcoefs = rromb\(qromb.'*rhs);
3143 der_romb = rombcoefs(1,:).';
3144
3145 % uncertainty estimate of derivative prediction
3146 s = sqrt(sum((rhs - rmat*rombcoefs).^2,1));
3147 rinv = rromb\eye(nexpon+1);
3148 cov1 = sum(rinv.^2,2); % 1 spare dof
3149 errest = s.'*12.7062047361747*sqrt(cov1(1));
3150
3151 end % rombextrap
3152
3153
3154 % ============================================
3155 % subfunction - vec2mat
3156 % ============================================
3157 function mat = vec2mat(vec,n,m)
3158 % forms the matrix M, such that M(i,j) = vec(i+j-1)
3159 [i,j] = ndgrid(1:n,0:m-1);
3160 ind = i+j;
3161 mat = vec(ind);
3162 if n==1
3163 mat = mat.';
3164 end
3165
3166 end % vec2mat
3167
3168
3169 % ============================================
3170 % subfunction - fdamat
3171 % ============================================
3172 function mat = fdamat(sr,parity,nterms)
3173 % Compute matrix for fda derivation.
3174 % parity can be
3175 % 0 (one sided, all terms included but zeroth order)
3176 % 1 (only odd terms included)
3177 % 2 (only even terms included)
3178 % nterms - number of terms
3179
3180 % sr is the ratio between successive steps
3181 srinv = 1./sr;
3182
3183 switch parity
3184 case 0
3185 % single sided rule
3186 [i,j] = ndgrid(1:nterms);
3187 c = 1./factorial(1:nterms);
3188 mat = c(j).*srinv.^((i-1).*j);
3189 case 1
3190 % odd order derivative
3191 [i,j] = ndgrid(1:nterms);
3192 c = 1./factorial(1:2:(2*nterms));
3193 mat = c(j).*srinv.^((i-1).*(2*j-1));
3194 case 2
3195 % even order derivative
3196 [i,j] = ndgrid(1:nterms);
3197 c = 1./factorial(2:2:(2*nterms));
3198 mat = c(j).*srinv.^((i-1).*(2*j));
3199 end
3200
3201 end % fdamat
3202
3203
3204 % ============================================
3205 % subfunction - check_params
3206 % ============================================
3207 function par = check_params(par)
3208 % check the parameters for acceptability
3209 %
3210 % Defaults
3211 % par.DerivativeOrder = 1;
3212 % par.MethodOrder = 2;
3213 % par.Style = 'central';
3214 % par.RombergTerms = 2;
3215 % par.FixedStep = [];
3216
3217 % DerivativeOrder == 1 by default
3218 if isempty(par.DerivativeOrder)
3219 par.DerivativeOrder = 1;
3220 else
3221 if (length(par.DerivativeOrder)>1) || ~ismember(par.DerivativeOrder,1:4)
3222 error 'DerivativeOrder must be scalar, one of [1 2 3 4].'
3223 end
3224 end
3225
3226 % MethodOrder == 2 by default
3227 if isempty(par.MethodOrder)
3228 par.MethodOrder = 2;
3229 else
3230 if (length(par.MethodOrder)>1) || ~ismember(par.MethodOrder,[1 2 3 4])
3231 error 'MethodOrder must be scalar, one of [1 2 3 4].'
3232 elseif ismember(par.MethodOrder,[1 3]) && (par.Style(1)=='c')
3233 error 'MethodOrder==1 or 3 is not possible with central difference methods'
3234 end
3235 end
3236
3237 % style is char
3238 valid = {'central', 'forward', 'backward'};
3239 if isempty(par.Style)
3240 par.Style = 'central';
3241 elseif ~ischar(par.Style)
3242 error 'Invalid Style: Must be character'
3243 end
3244 ind = find(strncmpi(par.Style,valid,length(par.Style)));
3245 if (length(ind)==1)
3246 par.Style = valid{ind};
3247 else
3248 error(['Invalid Style: ',par.Style])
3249 end
3250
3251 % vectorized is char
3252 valid = {'yes', 'no'};
3253 if isempty(par.Vectorized)
3254 par.Vectorized = 'yes';
3255 elseif ~ischar(par.Vectorized)
3256 error 'Invalid Vectorized: Must be character'
3257 end
3258 ind = find(strncmpi(par.Vectorized,valid,length(par.Vectorized)));
3259 if (length(ind)==1)
3260 par.Vectorized = valid{ind};
3261 else
3262 error(['Invalid Vectorized: ',par.Vectorized])
3263 end
3264
3265 % RombergTerms == 2 by default
3266 if isempty(par.RombergTerms)
3267 par.RombergTerms = 2;
3268 else
3269 if (length(par.RombergTerms)>1) || ~ismember(par.RombergTerms,0:3)
3270 error 'RombergTerms must be scalar, one of [0 1 2 3].'
3271 end
3272 end
3273
3274 % FixedStep == [] by default
3275 if (length(par.FixedStep)>1) || (~isempty(par.FixedStep) && (par.FixedStep<=0))
3276 error 'FixedStep must be empty or a scalar, >0.'
3277 end
3278
3279 % MaxStep == 10 by default
3280 if isempty(par.MaxStep)
3281 par.MaxStep = 10;
3282 elseif (length(par.MaxStep)>1) || (par.MaxStep<=0)
3283 error 'MaxStep must be empty or a scalar, >0.'
3284 end
3285
3286 end % check_params
3287
3288
3289 % ============================================
3290 % Included subfunction - parse_pv_pairs
3291 % ============================================
3292 function params=parse_pv_pairs(params,pv_pairs)
3293 % parse_pv_pairs: parses sets of property value pairs, allows defaults
3294 % usage: params=parse_pv_pairs(default_params,pv_pairs)
3295 %
3296 % arguments: (input)
3297 % default_params - structure, with one field for every potential
3298 % property/value pair. Each field will contain the default
3299 % value for that property. If no default is supplied for a
3300 % given property, then that field must be empty.
3301 %
3302 % pv_array - cell array of property/value pairs.
3303 % Case is ignored when comparing properties to the list
3304 % of field names. Also, any unambiguous shortening of a
3305 % field/property name is allowed.
3306 %
3307 % arguments: (output)
3308 % params - parameter struct that reflects any updated property/value
3309 % pairs in the pv_array.
3310 %
3311 % Example usage:
3312 % First, set default values for the parameters. Assume we
3313 % have four parameters that we wish to use optionally in
3314 % the function examplefun.
3315 %
3316 % - 'viscosity', which will have a default value of 1
3317 % - 'volume', which will default to 1
3318 % - 'pie' - which will have default value 3.141592653589793
3319 % - 'description' - a text field, left empty by default
3320 %
3321 % The first argument to examplefun is one which will always be
3322 % supplied.
3323 %
3324 % function examplefun(dummyarg1,varargin)
3325 % params.Viscosity = 1;
3326 % params.Volume = 1;
3327 % params.Pie = 3.141592653589793
3328 %
3329 % params.Description = '';
3330 % params=parse_pv_pairs(params,varargin);
3331 % params
3332 %
3333 % Use examplefun, overriding the defaults for 'pie', 'viscosity'
3334 % and 'description'. The 'volume' parameter is left at its default.
3335 %
3336 % examplefun(rand(10),'vis',10,'pie',3,'Description','Hello world')
3337 %
3338 % params =
3339 % Viscosity: 10
3340 % Volume: 1
3341 % Pie: 3
3342 % Description: 'Hello world'
3343 %
3344 % Note that capitalization was ignored, and the property 'viscosity'
3345 % was truncated as supplied. Also note that the order the pairs were
3346 % supplied was arbitrary.
3347
3348 npv = length(pv_pairs);
3349 n = npv/2;
3350
3351 if n~=floor(n)
3352 error 'Property/value pairs must come in PAIRS.'
3353 end
3354 if n<=0
3355 % just return the defaults
3356 return
3357 end
3358
3359 if ~isstruct(params)
3360 error 'No structure for defaults was supplied'
3361 end
3362
3363 % there was at least one pv pair. process any supplied
3364 propnames = fieldnames(params);
3365 lpropnames = lower(propnames);
3366 for i=1:n
3367 p_i = lower(pv_pairs{2*i-1});
3368 v_i = pv_pairs{2*i};
3369
3370 ind = strmatch(p_i,lpropnames,'exact');
3371 if isempty(ind)
3372 ind = find(strncmp(p_i,lpropnames,length(p_i)));
3373 if isempty(ind)
3374 error(['No matching property found for: ',pv_pairs{2*i-1}])
3375 elseif length(ind)>1
3376 error(['Ambiguous property name: ',pv_pairs{2*i-1}])
3377 end
3378 end
3379 p_i = propnames{ind};
3380
3381 % override the corresponding default in params
3382 params = setfield(params,p_i,v_i); %#ok
3383
3384 end
3385
3386 end % parse_pv_pairs
3387
3388
3389 % =======================================
3390 % sub-functions
3391 % =======================================
3392 function vec = swapelement(vec,ind,val)
3393 % swaps val as element ind, into the vector vec
3394 vec(ind) = val;
3395
3396 end % sub-function end
3397
3398
3399 % =======================================
3400 % sub-functions
3401 % =======================================
3402 function vec = swap2(vec,ind1,val1,ind2,val2)
3403 % swaps val as element ind, into the vector vec
3404 vec(ind1) = val1;
3405 vec(ind2) = val2;
3406
3407 end % sub-function end
3408
3409 % End of DERIVEST SUITE
3410