optimize.tcl

Go to the documentation of this file.
00001 /* ----------------------------------------------------------------------*/
00002 /* */
00003 /*  math/optimize.tcl --*/
00004 /* */
00005 /*  This file contains functions for optimization of a function*/
00006 /*  or expression.*/
00007 /* */
00008 /*  Copyright (c) 2004, by Arjen Markus.*/
00009 /*  Copyright (c) 2004, 2005 by Kevin B. Kenny.  All rights reserved.*/
00010 /* */
00011 /*  See the file "license.terms" for information on usage and redistribution*/
00012 /*  of this file, and for a DISCLAIMER OF ALL WARRANTIES.*/
00013 /* */
00014 /*  RCS: @(#) $Id: optimize.tcl,v 1.11 2005/09/28 04:51:22 andreas_kupries Exp $*/
00015 /* */
00016 /* ----------------------------------------------------------------------*/
00017 
00018 package require Tcl 8.4
00019 
00020 /*  math::optimize --*/
00021 /*     Namespace for the commands*/
00022 /* */
00023 namespace ::math::optimize {
00024    namespace export minimum  maximum solveLinearProgram linearProgramMaximum
00025    namespace export min_bound_1d min_unbound_1d
00026 
00027    /*  Possible extension: minimumExpr, maximumExpr*/
00028 }
00029 
00030 /*  minimum --*/
00031 /*     Minimize a given function over a given interval*/
00032 /* */
00033 /*  Arguments:*/
00034 /*     begin       Start of the interval*/
00035 /*     end         End of the interval*/
00036 /*     func        Name of the function to be minimized (takes one*/
00037 /*                 argument)*/
00038 /*     maxerr      Maximum relative error (defaults to 1.0e-4)*/
00039 /*  Return value:*/
00040 /*     Computed value for which the function is minimal*/
00041 /*  Notes:*/
00042 /*     The function needs not to be differentiable, but it is supposed*/
00043 /*     to be continuous. There is no provision for sub-intervals where*/
00044 /*     the function is constant (this might happen when the maximum*/
00045 /*     error is very small, < 1.0e-15)*/
00046 /* */
00047 /*  Warning:*/
00048 /*     This procedure is deprecated - use min_bound_1d instead*/
00049 /* */
00050 ret  ::math::optimize::minimum ( type begin , type end , type func , optional maxerr =1.0e-4 ) {
00051 
00052    set nosteps  [expr {3+int(-log($maxerr)/log(2.0))}]
00053    set delta    [expr {0.5*($end-$begin)*$maxerr}]
00054 
00055    for { set step 0 } { $step < $nosteps } { incr step } {
00056       set x1 [expr {($end+$begin)/2.0}]
00057       set x2 [expr {$x1+$delta}]
00058 
00059       set fx1 [uplevel 1 $func $x1]
00060       set fx2 [uplevel 1 $func $x2]
00061 
00062       if {$fx1 < $fx2} {
00063          set end   $x1
00064       } else {
00065          set begin $x1
00066       }
00067    }
00068    return $x1
00069 }
00070 
00071 /*  maximum --*/
00072 /*     Maximize a given function over a given interval*/
00073 /* */
00074 /*  Arguments:*/
00075 /*     begin       Start of the interval*/
00076 /*     end         End of the interval*/
00077 /*     func        Name of the function to be maximized (takes one*/
00078 /*                 argument)*/
00079 /*     maxerr      Maximum relative error (defaults to 1.0e-4)*/
00080 /*  Return value:*/
00081 /*     Computed value for which the function is maximal*/
00082 /*  Notes:*/
00083 /*     The function needs not to be differentiable, but it is supposed*/
00084 /*     to be continuous. There is no provision for sub-intervals where*/
00085 /*     the function is constant (this might happen when the maximum*/
00086 /*     error is very small, < 1.0e-15)*/
00087 /* */
00088 /*  Warning:*/
00089 /*     This procedure is deprecated - use max_bound_1d instead*/
00090 /* */
00091 ret  ::math::optimize::maximum ( type begin , type end , type func , optional maxerr =1.0e-4 ) {
00092 
00093    set nosteps  [expr {3+int(-log($maxerr)/log(2.0))}]
00094    set delta    [expr {0.5*($end-$begin)*$maxerr}]
00095 
00096    for { set step 0 } { $step < $nosteps } { incr step } {
00097       set x1 [expr {($end+$begin)/2.0}]
00098       set x2 [expr {$x1+$delta}]
00099 
00100       set fx1 [uplevel 1 $func $x1]
00101       set fx2 [uplevel 1 $func $x2]
00102 
00103       if {$fx1 > $fx2} {
00104          set end   $x1
00105       } else {
00106          set begin $x1
00107       }
00108    }
00109    return $x1
00110 }
00111 
00112 /* ----------------------------------------------------------------------*/
00113 /* */
00114 /*  min_bound_1d --*/
00115 /* */
00116 /*        Find a local minimum of a function between two given*/
00117 /*        abscissae. Derivative of f is not required.*/
00118 /* */
00119 /*  Usage:*/
00120 /*        min_bound_1d f x1 x2 ?-option value?,,,*/
00121 /* */
00122 /*  Parameters:*/
00123 /*        f - Function to minimize.  Must be expressed as a Tcl*/
00124 /*            command, to which will be appended the value at which*/
00125 /*            to evaluate the function.*/
00126 /*        x1 - Lower bound of the interval in which to search for a*/
00127 /*             minimum*/
00128 /*        x2 - Upper bound of the interval in which to search for a minimum*/
00129 /* */
00130 /*  Options:*/
00131 /*        -relerror value*/
00132 /*                Gives the tolerance desired for the returned*/
00133 /*                abscissa.  Default is 1.0e-7.  Should never be less*/
00134 /*                than the square root of the machine precision.*/
00135 /*        -maxiter n*/
00136 /*                Constrains minimize_bound_1d to evaluate the function*/
00137 /*                no more than n times.  Default is 100.  If convergence*/
00138 /*                is not achieved after the specified number of iterations,*/
00139 /*                an error is thrown.*/
00140 /*        -guess value*/
00141 /*                Gives a point between x1 and x2 that is an initial guess*/
00142 /*                for the minimum.  f(guess) must be at most f(x1) or*/
00143 /*                f(x2).*/
00144 /*         -fguess value*/
00145 /*                 Gives the value of the ordinate at the value of '-guess'*/
00146 /*                 if known.  Default is to evaluate the function*/
00147 /*        -abserror value*/
00148 /*                Gives the desired absolute error for the returned*/
00149 /*                abscissa.  Default is 1.0e-10.*/
00150 /*        -trace boolean*/
00151 /*                A true value causes a trace to the standard output*/
00152 /*                of the function evaluations. Default is 0.*/
00153 /* */
00154 /*  Results:*/
00155 /*        Returns a two-element list comprising the abscissa at which*/
00156 /*        the function reaches a local minimum within the interval,*/
00157 /*        and the value of the function at that point.*/
00158 /* */
00159 /*  Side effects:*/
00160 /*        Whatever side effects arise from evaluating the given function.*/
00161 /* */
00162 /* ----------------------------------------------------------------------*/
00163 
00164 ret  ::math::optimize::min_bound_1d ( type f , type x1 , type x2 , type args ) {
00165 
00166     set f [lreplace $f 0 0 [uplevel 1 [list namespace which [lindex $f 0]]]]
00167 
00168     set phim1 0.6180339887498949
00169     set twomphi 0.3819660112501051
00170 
00171     array set params {
00172         -relerror 1.0e-7
00173         -abserror 1.0e-10
00174         -maxiter 100
00175         -trace 0
00176         -fguess {}
00177     }
00178     set params(-guess) [expr { $phim1 * $x1 + $twomphi * $x2 }]
00179 
00180     if { ( [llength $args] % 2 ) != 0 } {
00181         return -code error -errorcode [list min_bound_1d wrongNumArgs] \
00182             "wrong \# args, should be\
00183                  \"[lreplace [info level 0] 1 end f x1 x2 ?-option value?...]\""
00184     }
00185     foreach { key value } $args {
00186         if { ![info exists params($key)] } {
00187             return -code error -errorcode [list min_bound_1d badoption $key] \
00188                 "unknown option \"$key\",\
00189                      should be -abserror,\
00190                      -fguess, -guess, -initial, -maxiter, -relerror,\
00191                      or -trace"
00192         }
00193     set params($key) $value
00194     }
00195 
00196     # a and b presumably bracket the minimum of the function.  Make sure
00197     # they're in ascending order.
00198 
00199     if { $x1 < $x2 } {
00200         set a $x1; set b $x2
00201     } else {
00202         set b $x1; set a $x2
00203     }
00204 
00205     set x $params(-guess);              # Best abscissa found so far
00206     set w $x;                           # Second best abscissa found so far
00207     set v $x;                           # Most recent earlier value of w
00208 
00209     set e 0.0;                          # Distance moved on the step before
00210                     # last.
00211 
00212     # Evaluate the function at the initial guess
00213 
00214     if { $params(-fguess) ne {} } {
00215         set fx $params(-fguess)
00216     } else {
00217         set s $f; lappend s $x; set fx [eval $s]
00218         if { $params(-trace) } {
00219             puts stdout "f($x) = $fx (initialisation)"
00220         }
00221     }
00222     set fw $fx
00223     set fv $fx
00224 
00225     for { set iter 0 } { $iter < $params(-maxiter) } { incr iter } {
00226 
00227         # Find the midpoint of the current interval
00228 
00229         set xm [expr { 0.5 * ( $a + $b ) }]
00230 
00231         # Compute the current tolerance for x, and twice its value
00232 
00233         set tol [expr { $params(-relerror) * abs($x) + $params(-abserror) }]
00234         set tol2 [expr { $tol + $tol }]
00235         if { abs( $x - $xm ) <= $tol2 - 0.5 * ($b - $a) } {
00236             return [list $x $fx]
00237         }
00238         set golden 1
00239         if { abs($e) > $tol } {
00240 
00241             # Use parabolic interpolation to find a minimum determined
00242             # by the evaluations at x, v, and w.  The size of the step
00243             # to take will be $p/$q.
00244 
00245             set r [expr { ( $x - $w ) * ( $fx - $fv ) }]
00246             set q [expr { ( $x - $v ) * ( $fx - $fw ) }]
00247             set p [expr { ( $x - $v ) * $q - ( $x - $w ) * $r }]
00248             set q [expr { 2. * ( $q - $r ) }]
00249             if { $q > 0 } {
00250                 set p [expr { - $p }]
00251             } else {
00252                 set q [expr { - $q }]
00253             }
00254             set olde $e
00255             set e $d
00256 
00257             # Test if parabolic interpolation results in less than half
00258             # the movement of the step two steps ago.
00259 
00260             if { abs($p) < abs( .5 * $q * $olde )
00261                  && $p > $q * ( $a - $x )
00262                  && $p < $q * ( $b - $x ) } {
00263 
00264                 set d [expr { $p / $q }]
00265                 set u [expr { $x + $d }]
00266                 if { ( $u - $a ) < $tol2 || ( $b - $u ) < $tol2 } {
00267                     if { $xm-$x < 0 } {
00268                         set d [expr { - $tol }]
00269                     } else {
00270                         set d $tol
00271                     }
00272                 }
00273                 set golden 0
00274             }
00275         }
00276 
00277         # If parabolic interpolation didn't come up with an acceptable
00278         # result, use Golden Section instead.
00279 
00280         if { $golden } {
00281             if { $x >= $xm } {
00282                 set e [expr { $a - $x }]
00283             } else {
00284                 set e [expr { $b - $x }]
00285             }
00286             set d [expr { $twomphi * $e }]
00287         }
00288 
00289         # At this point, d is the size of the step to take.  Make sure
00290         # that it's at least $tol.
00291 
00292         if { abs($d) >= $tol } {
00293             set u [expr { $x + $d }]
00294         } elseif { $d < 0 } {
00295             set u [expr { $x - $tol }]
00296         } else {
00297             set u [expr { $x + $tol }]
00298         }
00299 
00300         # Evaluate the function
00301 
00302         set s $f; lappend s $u; set fu [eval $s]
00303         if { $params(-trace) } {
00304             if { $golden } {
00305                 puts stdout "f($u)=$fu (golden section)"
00306             } else {
00307                 puts stdout "f($u)=$fu (parabolic interpolation)"
00308             }
00309         }
00310 
00311         if { $fu <= $fx } {
00312             # We've the best abscissa so far.
00313 
00314             if { $u >= $x } {
00315                 set a $x
00316             } else {
00317                 set b $x
00318             }
00319             set v $w
00320             set fv $fw
00321             set w $x
00322             set fw $fx
00323             set x $u
00324             set fx $fu
00325         } else {
00326 
00327             if { $u < $x } {
00328                 set a $u
00329             } else {
00330                 set b $u
00331             }
00332             if { $fu <= $fw || $w == $x } {
00333                 # We've the second-best abscissa so far
00334                 set v $w
00335                 set fv $fw
00336                 set w $u
00337                 set fw $fu
00338             } elseif { $fu <= $fv || $v == $x || $v == $w } {
00339                 # We've the third-best so far
00340                 set v $u
00341                 set fv $fu
00342             }
00343         }
00344     }
00345 
00346     return -code error -errorcode [list min_bound_1d noconverge $iter] \
00347         "[lindex [info level 0] 0] failed to converge after $iter steps."
00348 
00349 }
00350 
00351 /* ----------------------------------------------------------------------*/
00352 /* */
00353 /*  brackmin --*/
00354 /* */
00355 /*        Find a place along the number line where a given function has*/
00356 /*        a local minimum.*/
00357 /* */
00358 /*  Usage:*/
00359 /*        brackmin f x1 x2 ?trace?*/
00360 /* */
00361 /*  Parameters:*/
00362 /*        f - Function to minimize*/
00363 /*        x1 - Abscissa thought to be near the minimum*/
00364 /*        x2 - Additional abscissa thought to be near the minimum*/
00365 /*  trace - Boolean variable that, if true,*/
00366 /*                causes 'brackmin' to print a trace of its function*/
00367 /*                evaluations to the standard output.  Default is 0.*/
00368 /* */
00369 /*  Results:*/
00370 /*        Returns a three element list {x1 y1 x2 y2 x3 y3} where*/
00371 /*        y1=f(x1), y2=f(x2), y3=f(x3).  x2 lies between x1 and x3, and*/
00372 /*        y1>y2, y3>y2, proving that there is a local minimum somewhere*/
00373 /*        in the interval (x1,x3).*/
00374 /* */
00375 /*  Side effects:*/
00376 /*        Whatever effects the evaluation of f has.*/
00377 /* */
00378 /* ----------------------------------------------------------------------*/
00379 
00380 ret  ::math::optimize::brackmin ( type f , type x1 , type x2 , optional trace =0 ) {
00381 
00382     set f [lreplace $f 0 0 [uplevel 1 [list namespace which [lindex $f 0]]]]
00383 
00384     set phi 1.6180339887498949
00385     set epsilon 1.0e-20
00386     set limit 50.
00387 
00388     # Choose a and b so that f(a) < f(b)
00389 
00390     set cmd $f; lappend cmd $x1; set fx1 [eval $cmd]
00391     if { $trace } {
00392         puts "f($x1) = $fx1 (initialisation)"
00393     }
00394     set cmd $f; lappend cmd $x2; set fx2 [eval $cmd]
00395     if { $trace } {
00396         puts "f($x2) = $fx2 (initialisation)"
00397     }
00398     if { $fx1 > $fx2 } {
00399         set a $x1; set fa $fx1
00400         set b $x2; set fb $fx2
00401     } else {
00402         set a $x2; set fa $fx2
00403         set b $x1; set fb $fx1
00404     }
00405 
00406     # Choose a c in the downhill direction
00407 
00408     set c [expr { $b + $phi * ($b - $a) }]
00409     set cmd $f; lappend cmd $c; set fc [eval $cmd]
00410     if { $trace } {
00411         puts "f($c) = $fc (initial dilatation by phi)"
00412     }
00413 
00414     while { $fb >= $fc } {
00415 
00416         # Try to do parabolic extrapolation to the minimum
00417 
00418         set r [expr { ($b - $a) * ($fb - $fc) }]
00419         set q [expr { ($b - $c) * ($fb - $fa) }]
00420         if { abs( $q - $r ) > $epsilon } {
00421             set denom [expr { $q - $r }]
00422         } elseif { $q > $r } {
00423             set denom $epsilon
00424         } else {
00425             set denom -$epsilon
00426         }
00427         set u [expr { $b - ( (($b - $c) * $q - ($b - $a) * $r)
00428                              / (2. * $denom) ) }]
00429         set ulimit [expr { $b + $limit * ( $c - $b ) }]
00430 
00431         # Test the extrapolated abscissa
00432 
00433         if { ($b - $u) * ($u - $c) > 0 } {
00434 
00435             # u lies between b and c.  Try to interpolate
00436 
00437             set cmd $f; lappend cmd $u; set fu [eval $cmd]
00438             if { $trace } {
00439                 puts "f($u) = $fu (parabolic interpolation)"
00440             }
00441 
00442             if { $fu < $fc } {
00443 
00444                 # fb > fu and fc > fu, so there is a minimum between b and c
00445                 # with u as a starting guess.
00446 
00447                 return [list $b $fb $u $fu $c $fc]
00448 
00449             }
00450 
00451             if { $fu > $fb } {
00452 
00453                 # fb < fu, fb < fa, and u cannot lie between a and b
00454                 # (because it lies between a and c).  There is a minimum
00455                 # somewhere between a and u, with b a starting guess.
00456 
00457                 return [list $a $fa $b $fb $u $fu]
00458 
00459             }
00460 
00461             # Parabolic interpolation was useless. Expand the
00462             # distance by a factor of phi and try again.
00463 
00464             set u [expr { $c + $phi * ($c - $b) }]
00465             set cmd $f; lappend cmd $u; set fu [eval $cmd]
00466             if { $trace } {
00467                 puts "f($u) = $fu (parabolic interpolation failed)"
00468             }
00469 
00470 
00471         } elseif { ( $c - $u ) * ( $u - $ulimit ) > 0 } {
00472 
00473             # u lies between $c and $ulimit.
00474 
00475             set cmd $f; lappend cmd $u; set fu [eval $cmd]
00476             if { $trace } {
00477                 puts "f($u) = $fu (parabolic extrapolation)"
00478             }
00479 
00480             if { $fu > $fc } {
00481 
00482                 # minimum lies between b and u, with c an initial guess.
00483 
00484                 return [list $b $fb $c $fc $u $fu]
00485 
00486             }
00487 
00488             # function is still decreasing fa > fb > fc > fu. Take
00489             # another factor-of-phi step.
00490 
00491             set b $c; set fb $fc
00492             set c $u; set fc $fu
00493             set u [expr { $c + $phi * ( $c - $b ) }]
00494             set cmd $f; lappend cmd $u; set fu [eval $cmd]
00495             if { $trace } {
00496                 puts "f($u) = $fu (parabolic extrapolation ok)"
00497             }
00498 
00499         } elseif { ($u - $ulimit) * ( $ulimit - $c ) >= 0 } {
00500 
00501             # u went past ulimit.  Pull in to ulimit and evaluate there.
00502 
00503             set u $ulimit
00504             set cmd $f; lappend cmd $u; set fu [eval $cmd]
00505             if { $trace } {
00506                 puts "f($u) = $fu (limited step)"
00507             }
00508 
00509         } else {
00510 
00511             # parabolic extrapolation gave a useless value.
00512 
00513             set u [expr { $c + $phi * ( $c - $b ) }]
00514             set cmd $f; lappend cmd $u; set fu [eval $cmd]
00515             if { $trace } {
00516                 puts "f($u) = $fu (parabolic extrapolation failed)"
00517             }
00518 
00519         }
00520 
00521         set a $b; set fa $fb
00522         set b $c; set fb $fc
00523         set c $u; set fc $fu
00524     }
00525 
00526     return [list $a $fa $b $fb $c $fc]
00527 }
00528 
00529 /* ----------------------------------------------------------------------*/
00530 /* */
00531 /*  min_unbound_1d --*/
00532 /* */
00533 /*  Minimize a function of one variable, unconstrained, derivatives*/
00534 /*  not required.*/
00535 /* */
00536 /*  Usage:*/
00537 /*        min_bound_1d f x1 x2 ?-option value?,,,*/
00538 /* */
00539 /*  Parameters:*/
00540 /*        f - Function to minimize.  Must be expressed as a Tcl*/
00541 /*            command, to which will be appended the value at which*/
00542 /*            to evaluate the function.*/
00543 /*        x1 - Initial guess at the minimum*/
00544 /*        x2 - Second initial guess at the minimum, used to set the*/
00545 /*       initial length scale for the search.*/
00546 /* */
00547 /*  Options:*/
00548 /*        -relerror value*/
00549 /*                Gives the tolerance desired for the returned*/
00550 /*                abscissa.  Default is 1.0e-7.  Should never be less*/
00551 /*                than the square root of the machine precision.*/
00552 /*        -maxiter n*/
00553 /*                Constrains min_bound_1d to evaluate the function*/
00554 /*                no more than n times.  Default is 100.  If convergence*/
00555 /*                is not achieved after the specified number of iterations,*/
00556 /*                an error is thrown.*/
00557 /*        -abserror value*/
00558 /*                Gives the desired absolute error for the returned*/
00559 /*                abscissa.  Default is 1.0e-10.*/
00560 /*        -trace boolean*/
00561 /*                A true value causes a trace to the standard output*/
00562 /*                of the function evaluations. Default is 0.*/
00563 /* */
00564 /* ----------------------------------------------------------------------*/
00565 
00566 ret  ::math::optimize::min_unbound_1d ( type f , type x1 , type x2 , type args ) {
00567 
00568     set f [lreplace $f 0 0 [uplevel 1 [list namespace which [lindex $f 0]]]]
00569 
00570     array set params {
00571     -relerror 1.0e-7
00572     -abserror 1.0e-10
00573     -maxiter 100
00574         -trace 0
00575     }
00576     if { ( [llength $args] % 2 ) != 0 } {
00577         return -code error -errorcode [list min_unbound_1d wrongNumArgs] \
00578             "wrong \# args, should be\
00579                  \"[lreplace [info level 0] 1 end \
00580                          f x1 x2 ?-option value?...]\""
00581     }
00582     foreach { key value } $args {
00583         if { ![info exists params($key)] } {
00584             return -code error -errorcode [list min_unbound_1d badoption $key] \
00585                 "unknown option \"$key\",\
00586                      should be -trace"
00587         }
00588         set params($key) $value
00589     }
00590     foreach { a fa b fb c fc } [brackmin $f $x1 $x2 $params(-trace)] {
00591     break
00592     }
00593     return [eval [linsert [array get params] 0 \
00594               min_bound_1d $f $a $c -guess $b -fguess $fb]]
00595 }
00596 
00597 /* ----------------------------------------------------------------------*/
00598 /* */
00599 /*  nelderMead --*/
00600 /* */
00601 /*  Attempt to minimize/maximize a function using the downhill*/
00602 /*  simplex method of Nelder and Mead.*/
00603 /* */
00604 /*  Usage:*/
00605 /*  nelderMead f x ?-keyword value?*/
00606 /* */
00607 /*  Parameters:*/
00608 /*  f - The function to minimize.  The function must be an incomplete*/
00609 /*      Tcl command, to which will be appended N parameters.*/
00610 /*  x - The starting guess for the minimum; a vector of N parameters*/
00611 /*      to be passed to the function f.*/
00612 /* */
00613 /*  Options:*/
00614 /*  -scale xscale*/
00615 /*      Initial guess as to the problem scale.  If '-scale' is*/
00616 /*      supplied, then the parameters will be varied by the*/
00617 /*          specified amounts.  The '-scale' parameter must of the*/
00618 /*      same dimension as the 'x' vector, and all elements must*/
00619 /*      be nonzero.  Default is 0.0001 times the 'x' vector,*/
00620 /*      or 0.0001 for zero elements in the 'x' vector.*/
00621 /* */
00622 /*  -ftol epsilon*/
00623 /*      Requested tolerance in the function value; nelderMead*/
00624 /*      returns if N+1 consecutive iterates all differ by less*/
00625 /*      than the -ftol value.  Default is 1.0e-7*/
00626 /* */
00627 /*  -maxiter N*/
00628 /*      Maximum number of iterations to attempt.  Default is*/
00629 /*      500.*/
00630 /* */
00631 /*  -trace flag*/
00632 /*      If '-trace 1' is supplied, nelderMead writes a record*/
00633 /*      of function evaluations to the standard output as it*/
00634 /*      goes.  Default is 0.*/
00635 /* */
00636 /* ----------------------------------------------------------------------*/
00637 
00638 ret  ::math::optimize::nelderMead ( type f , type startx , type args ) {
00639     array set params {
00640     -ftol 1.e-7
00641     -maxiter 500
00642     -scale {}
00643     -trace 0
00644     }
00645 
00646     # Check arguments
00647 
00648     if { ( [llength $args] % 2 ) != 0 } {
00649         return -code error -errorcode [list nelderMead wrongNumArgs] \
00650             "wrong \# args, should be\
00651                  \"[lreplace [info level 0] 1 end \
00652                          f x1 x2 ?-option value?...]\""
00653     }
00654     foreach { key value } $args {
00655         if { ![info exists params($key)] } {
00656             return -code error -errorcode [list nelderMead badoption $key] \
00657                 "unknown option \"$key\",\
00658                      should be -ftol, -maxiter, -scale or -trace"
00659         }
00660         set params($key) $value
00661     }
00662     
00663     # Construct the initial simplex
00664 
00665     set vertices [list $startx]
00666     if { [llength $params(-scale)] == 0 } {
00667     set i 0
00668     foreach x0 $startx {
00669         if { $x0 == 0 } {
00670         set x1 0.0001
00671         } else {
00672         set x1 [expr {1.0001 * $x0}]
00673         }
00674         lappend vertices [lreplace $startx $i $i $x1]
00675         incr i
00676     }
00677     } elseif { [llength $params(-scale)] != [llength $startx] } {
00678     return -code error -errorcode [list nelderMead badOption -scale] \
00679         "-scale vector must be of same size as starting x vector"
00680     } else {
00681     set i 0
00682     foreach x0 $startx s $params(-scale) {
00683         lappend vertices [lreplace $startx $i $i [expr { $x0 + $s }]]
00684         incr i
00685     }
00686     }
00687 
00688     # Evaluate at the initial points
00689 
00690     set n [llength $startx]
00691     foreach x $vertices {
00692     set cmd $f
00693     foreach xx $x {
00694         lappend cmd $xx
00695     }
00696     set y [uplevel 1 $cmd]
00697     if {$params(-trace)} {
00698         puts "nelderMead: evaluating initial point: x=[list $x] y=$y"
00699     }
00700     lappend yvec $y
00701     }
00702 
00703 
00704     # Loop adjusting the simplex in the 'vertices' array.
00705 
00706     set nIter 0
00707     while { 1 } {
00708 
00709     # Find the highest, next highest, and lowest value in y,
00710     # and save the indices.
00711 
00712     set iBot 0
00713     set yBot [lindex $yvec 0]
00714     set iTop -1
00715     set yTop [lindex $yvec 0]
00716     set iNext -1
00717     set i 0
00718     foreach y $yvec {
00719         if { $y <= $yBot } {
00720         set yBot $y
00721         set iBot $i
00722         }
00723         if { $iTop < 0 || $y >= $yTop } {
00724         set iNext $iTop
00725         set yNext $yTop
00726         set iTop $i
00727         set yTop $y
00728         } elseif { $iNext < 0 || $y >= $yNext } {
00729         set iNext $i
00730         set yNext $y
00731         }
00732         incr i
00733     }
00734 
00735     # Return if the relative error is within an acceptable range
00736 
00737     set rerror [expr { 2. * abs( $yTop - $yBot )
00738                / ( abs( $yTop ) + abs( $yBot ) ) }]
00739     if { $rerror < $params(-ftol) } {
00740         set status ok
00741         break
00742     }
00743 
00744     # Count iterations
00745 
00746     if { [incr nIter] > $params(-maxiter) } {
00747         set status too-many-iterations
00748         break
00749     }
00750     incr nIter
00751 
00752     # Find the centroid of the face opposite the vertex that
00753     # maximizes the function value.
00754 
00755     set centroid {}
00756     for { set i 0 } { $i < $n } { incr i } {
00757         lappend centroid 0.0
00758     }
00759     set i 0
00760     foreach v $vertices {
00761         if { $i != $iTop } {
00762         set newCentroid {}
00763         foreach x0 $centroid x1 $v {
00764             lappend newCentroid [expr { $x0 + $x1 }]
00765         }
00766         set centroid $newCentroid
00767         }
00768         incr i
00769     }
00770     set newCentroid {}
00771     foreach x $centroid {
00772         lappend newCentroid [expr { $x / $n }]
00773     }
00774     set centroid $newCentroid
00775 
00776     # The first trial point is a reflection of the high point
00777     # around the centroid
00778 
00779     set trial {}
00780     foreach x0 [lindex $vertices $iTop] x1 $centroid {
00781         lappend trial [expr {$x1 + ($x1 - $x0)}]
00782     }
00783     set cmd $f
00784     foreach xx $trial {
00785         lappend cmd $xx
00786     } 
00787     set yTrial [uplevel 1 $cmd]
00788     if { $params(-trace) } {
00789         puts "nelderMead: trying reflection: x=[list $trial] y=$yTrial"
00790     }
00791 
00792     # If that reflection yields a new minimum, replace the high point,
00793     # and additionally try dilating in the same direction.
00794 
00795     if { $yTrial < $yBot } {
00796         set trial2 {}
00797         foreach x0 $centroid x1 $trial {
00798         lappend trial2 [expr { $x1 + ($x1 - $x0) }]
00799         }
00800         set cmd $f
00801         foreach xx $trial2 {
00802         lappend cmd $xx
00803         }
00804         set yTrial2 [uplevel 1 $cmd]
00805         if { $params(-trace) } {
00806         puts "nelderMead: trying dilated reflection:\
00807                       x=[list $trial2] y=$y"
00808         }
00809         if { $yTrial2 < $yBot } {
00810 
00811         # Additional dilation yields a new minimum
00812 
00813         lset vertices $iTop $trial2
00814         lset yvec $iTop $yTrial2
00815         } else {
00816 
00817         # Additional dilation failed, but we can still use
00818         # the first trial point.
00819 
00820         lset vertices $iTop $trial
00821         lset yvec $iTop $yTrial
00822 
00823         }
00824     } elseif { $yTrial < $yNext } {
00825 
00826         # The reflected point isn't a new minimum, but it's
00827         # better than the second-highest.  Replace the old high
00828         # point and try again.
00829 
00830         lset vertices $iTop $trial
00831         lset yvec $iTop $yTrial
00832 
00833     } else {
00834 
00835         # The reflected point is worse than the second-highest point.
00836         # If it's better than the highest, keep it... but in any case,
00837         # we want to try contracting the simplex, because a further
00838         # reflection will simply bring us back to the starting point.
00839 
00840         if { $yTrial < $yTop } {
00841         lset vertices $iTop $trial
00842         lset yvec $iTop $yTrial
00843         set yTop $yTrial
00844         }
00845         set trial {}
00846         foreach x0 [lindex $vertices $iTop] x1 $centroid {
00847         lappend trial [expr { ( $x0 + $x1 ) / 2. }]
00848         }
00849         set cmd $f
00850         foreach xx $trial {
00851         lappend cmd $xx
00852         }
00853         set yTrial [uplevel 1 $cmd]
00854         if { $params(-trace) } {
00855         puts "nelderMead: contracting from high point:\
00856                       x=[list $trial] y=$y"
00857         }
00858         if { $yTrial < $yTop } {
00859 
00860         # Contraction gave an improvement, so continue with
00861         # the smaller simplex
00862 
00863         lset vertices $iTop $trial
00864         lset yvec $iTop $yTrial
00865 
00866         } else {
00867 
00868         # Contraction gave no improvement either; we seem to
00869         # be in a valley of peculiar topology.  Contract the
00870         # simplex about the low point and try again.
00871 
00872         set newVertices {}
00873         set newYvec {}
00874         set i 0
00875         foreach v $vertices y $yvec {
00876             if { $i == $iBot } {
00877             lappend newVertices $v
00878             lappend newYvec $y
00879             } else {
00880             set newv {}
00881             foreach x0 $v x1 [lindex $vertices $iBot] {
00882                 lappend newv [expr { ($x0 + $x1) / 2. }]
00883             }
00884             lappend newVertices $newv
00885             set cmd $f
00886             foreach xx $newv {
00887                 lappend cmd $xx
00888             }
00889             lappend newYvec [uplevel 1 $cmd]
00890             if { $params(-trace) } {
00891                 puts "nelderMead: contracting about low point:\
00892                                   x=[list $newv] y=$y"
00893             }
00894             }
00895             incr i
00896         }
00897         set vertices $newVertices
00898         set yvec $newYvec
00899         }
00900 
00901     }
00902 
00903     }
00904     return [list y $yBot x [lindex $vertices $iBot] vertices $vertices yvec $yvec nIter $nIter status $status]
00905 
00906 }
00907 
00908 /*  solveLinearProgram*/
00909 /*     Solve a linear program in standard form*/
00910 /* */
00911 /*  Arguments:*/
00912 /*     objective     Vector defining the objective function*/
00913 /*     constraints   Matrix of constraints (as a list of lists)*/
00914 /* */
00915 /*  Return value:*/
00916 /*     Computed values for the coordinates or "unbounded" or "infeasible"*/
00917 /* */
00918 ret  ::math::optimize::solveLinearProgram ( type objective , type constraints ) {
00919     #
00920     # Check the arguments first and then put them in a more convenient
00921     # form
00922     #
00923 
00924     foreach {nconst nvars matrix} \
00925         [SimplexPrepareMatrix $objective $constraints] {break}
00926 
00927     set solution [SimplexSolve $nconst nvars $matrix]
00928 
00929     if { [llength $solution] > 1 } {
00930         return [lrange $solution 0 [expr {$nvars-1}]]
00931     } else {
00932         return $solution
00933     }
00934 }
00935 
00936 /*  linearProgramMaximum --*/
00937 /*     Compute the value attained at the optimum*/
00938 /* */
00939 /*  Arguments:*/
00940 /*     objective     The coefficients of the objective function*/
00941 /*     result        The coordinate values as obtained by solving the program*/
00942 /* */
00943 /*  Return value:*/
00944 /*     Value at the maximum point*/
00945 /* */
00946 ret  ::math::optimize::linearProgramMaximum (type objective , type result) {
00947 
00948     set value    0.0
00949 
00950     foreach coeff $objective coord $result {
00951         set value [expr {$value+$coeff*$coord}]
00952     }
00953 
00954     return $value
00955 }
00956 
00957 /*  SimplexPrintMatrix*/
00958 /*     Debugging routine: print the matrix in easy to read form*/
00959 /* */
00960 /*  Arguments:*/
00961 /*     matrix        Matrix to be printed*/
00962 /* */
00963 /*  Return value:*/
00964 /*     None*/
00965 /* */
00966 /*  Note:*/
00967 /*     The tableau should be transposed ...*/
00968 /* */
00969 ret  ::math::optimize::SimplexPrintMatrix (type matrix) {
00970     puts "\nBasis:\t[join [lindex $matrix 0] \t]"
00971     foreach col [lrange $matrix 1 end] {
00972         puts "      \t[join $col \t]"
00973     }
00974 }
00975 
00976 /*  SimplexPrepareMatrix*/
00977 /*     Prepare the standard tableau from all program data*/
00978 /* */
00979 /*  Arguments:*/
00980 /*     objective     Vector defining the objective function*/
00981 /*     constraints   Matrix of constraints (as a list of lists)*/
00982 /* */
00983 /*  Return value:*/
00984 /*     List of values as a standard tableau and two values*/
00985 /*     for the sizes*/
00986 /* */
00987 ret  ::math::optimize::SimplexPrepareMatrix (type objective , type constraints) {
00988 
00989     #
00990     # Check the arguments first
00991     #
00992     set nconst [llength $constraints]
00993     set ncols {}
00994     foreach row $constraints {
00995         if { $ncols == {} } {
00996             set ncols [llength $row]
00997         } else {
00998             if { $ncols != [llength $row] } {
00999                 return -code error -errorcode ARGS "Incorrectly formed constraints matrix"
01000             }
01001         }
01002     }
01003 
01004     set nvars [expr {$ncols-1}]
01005 
01006     if { [llength $objective] != $nvars } {
01007         return -code error -errorcode ARGS "Incorrect length for objective vector"
01008     }
01009 
01010     #
01011     # Set up the tableau:
01012     # Easiest manipulations if we store the columns first
01013     # So:
01014     # - First column is the list of variable indices in the basis
01015     # - Second column is the list of maximum values
01016     # - "nvars" columns that follow: the coefficients for the actual
01017     #   variables
01018     # - last "nconst" columns: the slack variables
01019     #
01020     set matrix   [list]
01021     set lastrow  [concat $objective [list 0.0]]
01022 
01023     set newcol   [list]
01024     for {set idx 0} {$idx < $nconst} {incr idx} {
01025         lappend newcol [expr {$nvars+$idx}]
01026     }
01027     lappend newcol "?"
01028     lappend matrix $newcol
01029 
01030     set zvector [list]
01031     foreach row $constraints {
01032         lappend zvector [lindex $row end]
01033     }
01034     lappend zvector 0.0
01035     lappend matrix $zvector
01036 
01037     for {set idx 0} {$idx < $nvars} {incr idx} {
01038         set newcol [list]
01039         foreach row $constraints {
01040             lappend newcol [expr {double([lindex $row $idx])}]
01041         }
01042         lappend newcol [expr {-double([lindex $lastrow $idx])}]
01043          lappend matrix $newcol
01044     }
01045 
01046     #
01047     # Add the columns for the slack variables
01048     #
01049     set zeros {}
01050     for {set idx 0} {$idx <= $nconst} {incr idx} {
01051         lappend zeros 0.0
01052     }
01053     for {set idx 0} {$idx < $nconst} {incr idx} {
01054         lappend matrix [lreplace $zeros $idx $idx 1.0]
01055     }
01056 
01057     return [list $nconst $nvars $matrix]
01058 }
01059 
01060 /*  SimplexSolve --*/
01061 /*     Solve the given linear program using the simplex method*/
01062 /* */
01063 /*  Arguments:*/
01064 /*     nconst        Number of constraints*/
01065 /*     nvars         Number of actual variables*/
01066 /*     tableau       Standard tableau (as a list of columns)*/
01067 /* */
01068 /*  Return value:*/
01069 /*     List of values for the actual variables*/
01070 /* */
01071 ret  ::math::optimize::SimplexSolve (type nconst , type nvars , type tableau) {
01072     set end 0
01073     while { !$end } {
01074 
01075         #
01076         # Find the new variable to put in the basis
01077         #
01078         set nextcol [SimplexFindNextColumn $tableau]
01079         if { $nextcol == -1 } {
01080             set end 1
01081             continue
01082         }
01083 
01084         #
01085         # Now determine which one should leave
01086         # TODO: is a lack of a proper row indeed an
01087         #       indication of the infeasibility?
01088         #
01089         set nextrow [SimplexFindNextRow $tableau $nextcol]
01090         if { $nextrow == -1 } {
01091             return "infeasible"
01092         }
01093 
01094         #
01095         # Make the vector for sweeping through the tableau
01096         #
01097         set vector [SimplexMakeVector $tableau $nextcol $nextrow]
01098 
01099         #
01100         # Sweep through the tableau
01101         #
01102         set tableau [SimplexNewTableau $tableau $nextcol $nextrow $vector]
01103     }
01104 
01105     #
01106     # Now we can return the result
01107     #
01108     SimplexResult $tableau
01109 }
01110 
01111 /*  SimplexResult --*/
01112 /*     Reconstruct the result vector*/
01113 /* */
01114 /*  Arguments:*/
01115 /*     tableau       Standard tableau (as a list of columns)*/
01116 /* */
01117 /*  Return value:*/
01118 /*     Vector of values representing the maximum point*/
01119 /* */
01120 ret  ::math::optimize::SimplexResult (type tableau) {
01121     set result {}
01122 
01123     set firstcol  [lindex $tableau 0]
01124     set secondcol [lindex $tableau 1]
01125     set result    {}
01126 
01127     set nvars     [expr {[llength $tableau]-2}]
01128     for {set i 0} {$i < $nvars } { incr i } {
01129         lappend result 0.0
01130     }
01131 
01132     set idx 0
01133     foreach col [lrange $firstcol 0 end-1] {
01134         set result [lreplace $result $col $col [lindex $secondcol $idx]]
01135         incr idx
01136     }
01137 
01138     return $result
01139 }
01140 
01141 /*  SimplexFindNextColumn --*/
01142 /*     Find the next column - the one with the largest negative*/
01143 /*     coefficient*/
01144 /* */
01145 /*  Arguments:*/
01146 /*     tableau       Standard tableau (as a list of columns)*/
01147 /* */
01148 /*  Return value:*/
01149 /*     Index of the column*/
01150 /* */
01151 ret  ::math::optimize::SimplexFindNextColumn (type tableau) {
01152     set idx        0
01153     set minidx    -1
01154     set mincoeff   0.0
01155 
01156     foreach col [lrange $tableau 2 end] {
01157         set coeff [lindex $col end]
01158         if { $coeff < 0.0 } {
01159             if { $coeff < $mincoeff } {
01160                 set minidx $idx
01161                set mincoeff $coeff
01162             }
01163         }
01164         incr idx
01165     }
01166 
01167     return $minidx
01168 }
01169 
01170 /*  SimplexFindNextRow --*/
01171 /*     Find the next row - the one with the largest negative*/
01172 /*     coefficient*/
01173 /* */
01174 /*  Arguments:*/
01175 /*     tableau       Standard tableau (as a list of columns)*/
01176 /*     nextcol       Index of the variable that will replace this one*/
01177 /* */
01178 /*  Return value:*/
01179 /*     Index of the row*/
01180 /* */
01181 ret  ::math::optimize::SimplexFindNextRow (type tableau , type nextcol) {
01182     set idx        0
01183     set minidx    -1
01184     set mincoeff   {}
01185 
01186     set bvalues [lrange [lindex $tableau 1] 0 end-1]
01187     set yvalues [lrange [lindex $tableau [expr {2+$nextcol}]] 0 end-1]
01188 
01189     foreach rowcoeff $bvalues divcoeff $yvalues {
01190         if { $divcoeff > 0.0 } {
01191             set coeff [expr {$rowcoeff/$divcoeff}]
01192 
01193             if { $mincoeff == {} || $coeff < $mincoeff } {
01194                 set minidx $idx
01195                 set mincoeff $coeff
01196             }
01197         }
01198         incr idx
01199     }
01200 
01201     return $minidx
01202 }
01203 
01204 /*  SimplexMakeVector --*/
01205 /*     Make the "sweep" vector*/
01206 /* */
01207 /*  Arguments:*/
01208 /*     tableau       Standard tableau (as a list of columns)*/
01209 /*     nextcol       Index of the variable that will replace this one*/
01210 /*     nextrow       Index of the variable in the base that will be replaced*/
01211 /* */
01212 /*  Return value:*/
01213 /*     Vector to be used to update the coefficients of the tableau*/
01214 /* */
01215 ret  ::math::optimize::SimplexMakeVector (type tableau , type nextcol , type nextrow) {
01216 
01217     set idx      0
01218     set vector   {}
01219     set column   [lindex $tableau [expr {2+$nextcol}]]
01220     set divcoeff [lindex $column $nextrow]
01221 
01222     foreach colcoeff $column {
01223         if { $idx != $nextrow } {
01224             set coeff [expr {-$colcoeff/$divcoeff}]
01225         } else {
01226             set coeff [expr {1.0/$divcoeff-1.0}]
01227         }
01228         lappend vector $coeff
01229         incr idx
01230     }
01231 
01232     return $vector
01233 }
01234 
01235 /*  SimplexNewTableau --*/
01236 /*     Sweep through the tableau and create the new one*/
01237 /* */
01238 /*  Arguments:*/
01239 /*     tableau       Standard tableau (as a list of columns)*/
01240 /*     nextcol       Index of the variable that will replace this one*/
01241 /*     nextrow       Index of the variable in the base that will be replaced*/
01242 /*     vector        Vector to sweep with*/
01243 /* */
01244 /*  Return value:*/
01245 /*     New tableau*/
01246 /* */
01247 ret  ::math::optimize::SimplexNewTableau (type tableau , type nextcol , type nextrow , type vector) {
01248 
01249     #
01250     # The first column: replace the nextrow-th element
01251     # The second column: replace the value at the nextrow-th element
01252     # For all the others: the same receipe
01253     #
01254     set firstcol   [lreplace [lindex $tableau 0] $nextrow $nextrow $nextcol]
01255     set newtableau [list $firstcol]
01256 
01257     #
01258     # The rest of the matrix
01259     #
01260     foreach column [lrange $tableau 1 end] {
01261         set yval   [lindex $column $nextrow]
01262         set newcol {}
01263         foreach c $column vcoeff $vector {
01264             set newval [expr {$c+$yval*$vcoeff}]
01265             lappend newcol $newval
01266         }
01267         lappend newtableau $newcol
01268     }
01269 
01270     return $newtableau
01271 }
01272 
01273 /*  Now we can announce our presence*/
01274 package provide math::optimize 1.0
01275 
01276 if { ![info exists ::argv0] || [string compare $::argv0 [info script]] } {
01277     return
01278 }
01279 
01280 namespace import math::optimize::min_bound_1d
01281 namespace import math::optimize::maximum
01282 namespace import math::optimize::nelderMead
01283 
01284 ret  f (type x , type y) {
01285     set xx [expr { $x - 3.1415926535897932 / 2. }]
01286     set v1 [expr { 0.3 * exp( -$xx*$xx / 2. ) }]
01287     set d [expr { 10. * $y - sin(9. * $x) }]
01288     set v2 [expr { exp(-10.*$d*$d)}]
01289     set rv [expr { -$v1 - $v2 }]
01290     return $rv
01291 }
01292 
01293 ret  g (type a , type b) {
01294     set x1 [expr {0.1 - $a + $b}]
01295     set x2 [expr {$a + $b - 1.}]
01296     set x3 [expr {3.-8.*$a+8.*$a*$a-8.*$b+8.*$b*$b}]
01297     set x4 [expr {$a/10. + $b/10. + $x1*$x1/3. + $x2*$x2 - $x2 * exp(1-$x3*$x3)}]
01298     return $x4
01299 }
01300 
01301 if { ![package vsatisfies [package provide Tcl] 8.5] } {
01302      tcl = _precision 17
01303 }
01304 puts "f"
01305 puts [math::optimize::nelderMead f {1. 0.} -scale {0.1 0.01} -trace 1]
01306 puts "g"
01307 puts [math::optimize::nelderMead g {0. 0.} -scale {1. 1.} -trace 1]
01308 

Generated on 21 Sep 2010 for Gui by  doxygen 1.6.1