I have a code for Powell's hybrid method for optimization can any one change it to Matlab

9 views (last 30 days)
integer i,iflag,iter,j,jm1,l,msum,ncfail,ncsuc,nslow1,nslow2
integer iwa(1)
logical jeval,sing
double precision actred,delta,epsmch,fnorm,fnorm1,one,pnorm,
* prered,p1,p5,p001,p0001,ratio,sum,temp,xnorm,
* zero
double precision dpmpar,enorm
data one,p1,p5,p001,p0001,zero
* /1.0d0,1.0d-1,5.0d-1,1.0d-3,1.0d-4,0.0d0/
c
c epsmch is the machine precision.
c
epsmch = dpmpar(1)
c
info = 0
iflag = 0
nfev = 0
c
c check the input parameters for errors.
c
if (n .le. 0 .or. xtol .lt. zero .or. maxfev .le. 0
* .or. ml .lt. 0 .or. mu .lt. 0 .or. factor .le. zero
* .or. ldfjac .lt. n .or. lr .lt. (n*(n + 1))/2) go to 300
if (mode .ne. 2) go to 20
do 10 j = 1, n
if (diag(j) .le. zero) go to 300
10 continue
20 continue
c
c evaluate the function at the starting point
c and calculate its norm.
c
iflag = 1
call fcn(n,x,fvec,iflag)
nfev = 1
if (iflag .lt. 0) go to 300
fnorm = enorm(n,fvec)
c
c determine the number of calls to fcn needed to compute
c the jacobian matrix.
c
msum = min0(ml+mu+1,n)
c
c initialize iteration counter and monitors.
c
iter = 1
ncsuc = 0
ncfail = 0
nslow1 = 0
nslow2 = 0
c
c beginning of the outer loop.
c
30 continue
jeval = .true.
c
c calculate the jacobian matrix.
c
iflag = 2
call fdjac1(fcn,n,x,fvec,fjac,ldfjac,iflag,ml,mu,epsfcn,wa1,
* wa2)
nfev = nfev + msum
if (iflag .lt. 0) go to 300
c
c compute the qr factorization of the jacobian.
c
call qrfac(n,n,fjac,ldfjac,.false.,iwa,1,wa1,wa2,wa3)
c
c on the first iteration and if mode is 1, scale according
c to the norms of the columns of the initial jacobian.
c
if (iter .ne. 1) go to 70
if (mode .eq. 2) go to 50
do 40 j = 1, n
diag(j) = wa2(j)
if (wa2(j) .eq. zero) diag(j) = one
40 continue
50 continue
c
c on the first iteration, calculate the norm of the scaled x
c and initialize the step bound delta.
c
do 60 j = 1, n
wa3(j) = diag(j)*x(j)
60 continue
xnorm = enorm(n,wa3)
delta = factor*xnorm
if (delta .eq. zero) delta = factor
70 continue
c
c form (q transpose)*fvec and store in qtf.
c
do 80 i = 1, n
qtf(i) = fvec(i)
80 continue
do 120 j = 1, n
if (fjac(j,j) .eq. zero) go to 110
sum = zero
do 90 i = j, n
sum = sum + fjac(i,j)*qtf(i)
90 continue
temp = -sum/fjac(j,j)
do 100 i = j, n
qtf(i) = qtf(i) + fjac(i,j)*temp
100 continue
110 continue
120 continue
c
c copy the triangular factor of the qr factorization into r.
c
sing = .false.
do 150 j = 1, n
l = j
jm1 = j - 1
if (jm1 .lt. 1) go to 140
do 130 i = 1, jm1
r(l) = fjac(i,j)
l = l + n - i
130 continue
140 continue
r(l) = wa1(j)
if (wa1(j) .eq. zero) sing = .true.
150 continue
c
c accumulate the orthogonal factor in fjac.
c
call qform(n,n,fjac,ldfjac,wa1)
c
c rescale if necessary.
c
if (mode .eq. 2) go to 170
do 160 j = 1, n
diag(j) = dmax1(diag(j),wa2(j))
160 continue
170 continue
c
c beginning of the inner loop.
c
180 continue
c
c if requested, call fcn to enable printing of iterates.
c
if (nprint .le. 0) go to 190
iflag = 0
if (mod(iter-1,nprint) .eq. 0) call fcn(n,x,fvec,iflag)
if (iflag .lt. 0) go to 300
190 continue
c
c determine the direction p.
c
call dogleg(n,r,lr,diag,qtf,delta,wa1,wa2,wa3)
c
c store the direction p and x + p. calculate the norm of p.
c
do 200 j = 1, n
wa1(j) = -wa1(j)
wa2(j) = x(j) + wa1(j)
wa3(j) = diag(j)*wa1(j)
200 continue
pnorm = enorm(n,wa3)
c
c on the first iteration, adjust the initial step bound.
c
if (iter .eq. 1) delta = dmin1(delta,pnorm)
c
c evaluate the function at x + p and calculate its norm.
c
iflag = 1
call fcn(n,wa2,wa4,iflag)
nfev = nfev + 1
if (iflag .lt. 0) go to 300
fnorm1 = enorm(n,wa4)
c
c compute the scaled actual reduction.
c
actred = -one
if (fnorm1 .lt. fnorm) actred = one - (fnorm1/fnorm)**2
c
c compute the scaled predicted reduction.
c
l = 1
do 220 i = 1, n
sum = zero
do 210 j = i, n
sum = sum + r(l)*wa1(j)
l = l + 1
210 continue
wa3(i) = qtf(i) + sum
220 continue
temp = enorm(n,wa3)
prered = zero
if (temp .lt. fnorm) prered = one - (temp/fnorm)**2
c
c compute the ratio of the actual to the predicted
c reduction.
c
ratio = zero
if (prered .gt. zero) ratio = actred/prered
c
c update the step bound.
c
if (ratio .ge. p1) go to 230
ncsuc = 0
ncfail = ncfail + 1
delta = p5*delta
go to 240
230 continue
ncfail = 0
ncsuc = ncsuc + 1
if (ratio .ge. p5 .or. ncsuc .gt. 1)
* delta = dmax1(delta,pnorm/p5)
if (dabs(ratio-one) .le. p1) delta = pnorm/p5
240 continue
c
c test for successful iteration.
c
if (ratio .lt. p0001) go to 260
c
c successful iteration. update x, fvec, and their norms.
c
do 250 j = 1, n
x(j) = wa2(j)
wa2(j) = diag(j)*x(j)
fvec(j) = wa4(j)
250 continue
xnorm = enorm(n,wa2)
fnorm = fnorm1
iter = iter + 1
260 continue
c
c determine the progress of the iteration.
c
nslow1 = nslow1 + 1
if (actred .ge. p001) nslow1 = 0
if (jeval) nslow2 = nslow2 + 1
if (actred .ge. p1) nslow2 = 0
c
c test for convergence.
c
if (delta .le. xtol*xnorm .or. fnorm .eq. zero) info = 1
if (info .ne. 0) go to 300
c
c tests for termination and stringent tolerances.
c
if (nfev .ge. maxfev) info = 2
if (p1*dmax1(p1*delta,pnorm) .le. epsmch*xnorm) info = 3
if (nslow2 .eq. 5) info = 4
if (nslow1 .eq. 10) info = 5
if (info .ne. 0) go to 300
c
c criterion for recalculating jacobian approximation
c by forward differences.
c
if (ncfail .eq. 2) go to 290
c
c calculate the rank one modification to the jacobian
c and update qtf if necessary.
c
do 280 j = 1, n
sum = zero
do 270 i = 1, n
sum = sum + fjac(i,j)*wa4(i)
270 continue
wa2(j) = (sum - wa3(j))/pnorm
wa1(j) = diag(j)*((diag(j)*wa1(j))/pnorm)
if (ratio .ge. p0001) qtf(j) = sum
280 continue
c
c compute the qr factorization of the updated jacobian.
c
call r1updt(n,n,r,lr,wa1,wa2,wa3,sing)
call r1mpyq(n,n,fjac,ldfjac,wa2,wa3)
call r1mpyq(1,n,qtf,1,wa2,wa3)
c
c end of the inner loop.
c
jeval = .false.
go to 180
290 continue
c
c end of the outer loop.
c
go to 30
300 continue
c
c termination, either normal or user imposed.
c
if (iflag .lt. 0) info = iflag
iflag = 0
if (nprint .gt. 0) call fcn(n,x,fvec,iflag)
return
c
c last card of subroutine hybrd.
c
end
subroutine dogleg(n,r,lr,diag,qtb,delta,x,wa1,wa2)
integer n,lr
double precision delta
double precision r(lr),diag(n),qtb(n),x(n),wa1(n),wa2(n)
c **********
c
c subroutine dogleg
c
c given an m by n matrix a, an n by n nonsingular diagonal
c matrix d, an m-vector b, and a positive number delta, the
c problem is to determine the convex combination x of the
c gauss-newton and scaled gradient directions that minimizes
c (a*x - b) in the least squares sense, subject to the
c restriction that the euclidean norm of d*x be at most delta.
c
c this subroutine completes the solution of the problem
c if it is provided with the necessary information from the
c qr factorization of a. that is, if a = q*r, where q has
c orthogonal columns and r is an upper triangular matrix,
c then dogleg expects the full upper triangle of r and
c the first n components of (q transpose)*b.
c
c the subroutine statement is
c
c subroutine dogleg(n,r,lr,diag,qtb,delta,x,wa1,wa2)
c
c where
c
c n is a positive integer input variable set to the order of r.
c
c r is an input array of length lr which must contain the upper
c triangular matrix r stored by rows.
c
c lr is a positive integer input variable not less than
c (n*(n+1))/2.
c
c diag is an input array of length n which must contain the
c diagonal elements of the matrix d.
c
c qtb is an input array of length n which must contain the first
c n elements of the vector (q transpose)*b.
c
c delta is a positive input variable which specifies an upper
c bound on the euclidean norm of d*x.
c
c x is an output array of length n which contains the desired
c convex combination of the gauss-newton direction and the
c scaled gradient direction.
c
c wa1 and wa2 are work arrays of length n.
c
c subprograms called
c
c minpack-supplied ... dpmpar,enorm
c
c fortran-supplied ... dabs,dmax1,dmin1,dsqrt
c
c argonne national laboratory. minpack project. march 1980.
c burton s. garbow, kenneth e. hillstrom, jorge j. more
c
c **********
integer i,j,jj,jp1,k,l
double precision alpha,bnorm,epsmch,gnorm,one,qnorm,sgnorm,sum,
* temp,zero
double precision dpmpar,enorm
data one,zero /1.0d0,0.0d0/
c
c epsmch is the machine precision.
c
epsmch = dpmpar(1)
c
c first, calculate the gauss-newton direction.
c
jj = (n*(n + 1))/2 + 1
do 50 k = 1, n
j = n - k + 1
jp1 = j + 1
jj = jj - k
l = jj + 1
sum = zero
if (n .lt. jp1) go to 20
do 10 i = jp1, n
sum = sum + r(l)*x(i)
l = l + 1
10 continue
20 continue
temp = r(jj)
if (temp .ne. zero) go to 40
l = j
do 30 i = 1, j
temp = dmax1(temp,dabs(r(l)))
l = l + n - i
30 continue
temp = epsmch*temp
if (temp .eq. zero) temp = epsmch
40 continue
x(j) = (qtb(j) - sum)/temp
50 continue
c
c test whether the gauss-newton direction is acceptable.
c
do 60 j = 1, n
wa1(j) = zero
wa2(j) = diag(j)*x(j)
60 continue
qnorm = enorm(n,wa2)
if (qnorm .le. delta) go to 140
c
c the gauss-newton direction is not acceptable.
c next, calculate the scaled gradient direction.
c
l = 1
do 80 j = 1, n
temp = qtb(j)
do 70 i = j, n
wa1(i) = wa1(i) + r(l)*temp
l = l + 1
70 continue
wa1(j) = wa1(j)/diag(j)
80 continue
c
c calculate the norm of the scaled gradient and test for
c the special case in which the scaled gradient is zero.
c
gnorm = enorm(n,wa1)
sgnorm = zero
alpha = delta/qnorm
if (gnorm .eq. zero) go to 120
c
c calculate the point along the scaled gradient
c at which the quadratic is minimized.
c
do 90 j = 1, n
wa1(j) = (wa1(j)/gnorm)/diag(j)
90 continue
l = 1
do 110 j = 1, n
sum = zero
do 100 i = j, n
sum = sum + r(l)*wa1(i)
l = l + 1
100 continue
wa2(j) = sum
110 continue
temp = enorm(n,wa2)
sgnorm = (gnorm/temp)/temp
c
c test whether the scaled gradient direction is acceptable.
c
alpha = zero
if (sgnorm .ge. delta) go to 120
c
c the scaled gradient direction is not acceptable.
c finally, calculate the point along the dogleg
c at which the quadratic is minimized.
c
bnorm = enorm(n,qtb)
temp = (bnorm/gnorm)*(bnorm/qnorm)*(sgnorm/delta)
temp = temp - (delta/qnorm)*(sgnorm/delta)**2
* + dsqrt((temp-(delta/qnorm))**2
* +(one-(delta/qnorm)**2)*(one-(sgnorm/delta)**2))
alpha = ((delta/qnorm)*(one - (sgnorm/delta)**2))/temp
120 continue
c
c form appropriate convex combination of the gauss-newton
c direction and the scaled gradient direction.
c
temp = (one - alpha)*dmin1(sgnorm,delta)
do 130 j = 1, n
x(j) = temp*wa1(j) + alpha*x(j)
130 continue
140 continue
return
c
c last card of subroutine dogleg.
c
end

Answers (1)

Matt Kindig
Matt Kindig on 22 Jul 2012
Edited: Walter Roberson on 22 Jul 2012
Hi Raj,
This code appears to be written in Fortran. In general, nobody on this forum is going to have the time to convert your code to Matlab, especially given that it contains a number of constructs, such as do-while loops and go-to commands that aren't even defined in Matlab.
From a quick search, it appears that this code is part of MINPACK, a suite of numerical solvers from Argonne. Given that the code is readily available for free, you could either access the code from within Matlab (see the article on "External Interfaces" in the "Getting Started" section of the Help). Alternatively, you can compile the file using a Fortran compiler and access the executable through Matlab. A quick web search has also found a C/C++ version of MINPACK which might interface better with Matlab ( http://devernay.free.fr/hacks/cminpack/index.html).
If you absolutely need Matlab code, I would recommend going back to the original Powell paper to learn the algorithm, and then start to build it in Matlab from scratch. You can always come back here if you run into problems with your Matlab code.
Good luck,
Matt
  2 Comments
Walter Roberson
Walter Roberson on 22 Jul 2012
It is not Fortran. The first line starts the "integer" declaration in column 1, which is not valid for fixed format Fortran. It is not free-format Fortran, though, as free-Format Fortran uses & at the end of a line as the continuation character rather than putting a character into column 6 such as the "*" used on the line after the "data" statement.
Star Strider
Star Strider on 22 Jul 2012
All due respects, but it is an early version of FORTRAN (possibly FORTRAN IV), probably dating from the late 1970s or early 1980s. (I remember early FORTRAN well, noting that I learned it as a university undergraduate.) Anything in column 6 counts as a continuation character, although I always used sequential digits for clarity.

Sign in to comment.

Categories

Find more on Fortran with MATLAB in Help Center and File Exchange

Community Treasure Hunt

Find the treasures in MATLAB Central and discover how the community can help you!

Start Hunting!