github.com/jingcheng-WU/gonum@v0.9.1-0.20210323123734-f1a2a11a8f7b/optimize/functions/functions.go (about)

     1  // Copyright ©2015 The Gonum Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  package functions
     6  
     7  import (
     8  	"math"
     9  
    10  	"github.com/jingcheng-WU/gonum/floats"
    11  	"github.com/jingcheng-WU/gonum/mat"
    12  )
    13  
    14  // Beale implements the Beale's function.
    15  //
    16  // Standard starting points:
    17  //  Easy: [1, 1]
    18  //  Hard: [1, 4]
    19  //
    20  // References:
    21  //  - Beale, E.: On an Iterative Method for Finding a Local Minimum of a
    22  //    Function of More than One Variable. Technical Report 25, Statistical
    23  //    Techniques Research Group, Princeton University (1958)
    24  //  - More, J., Garbow, B.S., Hillstrom, K.E.: Testing unconstrained
    25  //    optimization software. ACM Trans Math Softw 7 (1981), 17-41
    26  type Beale struct{}
    27  
    28  func (Beale) Func(x []float64) float64 {
    29  	if len(x) != 2 {
    30  		panic("dimension of the problem must be 2")
    31  	}
    32  
    33  	f1 := 1.5 - x[0]*(1-x[1])
    34  	f2 := 2.25 - x[0]*(1-x[1]*x[1])
    35  	f3 := 2.625 - x[0]*(1-x[1]*x[1]*x[1])
    36  	return f1*f1 + f2*f2 + f3*f3
    37  }
    38  
    39  func (Beale) Grad(grad, x []float64) {
    40  	if len(x) != 2 {
    41  		panic("dimension of the problem must be 2")
    42  	}
    43  	if len(x) != len(grad) {
    44  		panic("incorrect size of the gradient")
    45  	}
    46  
    47  	t1 := 1 - x[1]
    48  	t2 := 1 - x[1]*x[1]
    49  	t3 := 1 - x[1]*x[1]*x[1]
    50  
    51  	f1 := 1.5 - x[0]*t1
    52  	f2 := 2.25 - x[0]*t2
    53  	f3 := 2.625 - x[0]*t3
    54  
    55  	grad[0] = -2 * (f1*t1 + f2*t2 + f3*t3)
    56  	grad[1] = 2 * x[0] * (f1 + 2*f2*x[1] + 3*f3*x[1]*x[1])
    57  }
    58  
    59  func (Beale) Hess(dst *mat.SymDense, x []float64) {
    60  	if len(x) != 2 {
    61  		panic("dimension of the problem must be 2")
    62  	}
    63  	if len(x) != dst.Symmetric() {
    64  		panic("incorrect size of the Hessian")
    65  	}
    66  
    67  	t1 := 1 - x[1]
    68  	t2 := 1 - x[1]*x[1]
    69  	t3 := 1 - x[1]*x[1]*x[1]
    70  	f1 := 1.5 - x[1]*t1
    71  	f2 := 2.25 - x[1]*t2
    72  	f3 := 2.625 - x[1]*t3
    73  
    74  	h00 := 2 * (t1*t1 + t2*t2 + t3*t3)
    75  	h01 := 2 * (f1 + x[1]*(2*f2+3*x[1]*f3) - x[0]*(t1+x[1]*(2*t2+3*x[1]*t3)))
    76  	h11 := 2 * x[0] * (x[0] + 2*f2 + x[1]*(6*f3+x[0]*x[1]*(4+9*x[1]*x[1])))
    77  	dst.SetSym(0, 0, h00)
    78  	dst.SetSym(0, 1, h01)
    79  	dst.SetSym(1, 1, h11)
    80  }
    81  
    82  func (Beale) Minima() []Minimum {
    83  	return []Minimum{
    84  		{
    85  			X:      []float64{3, 0.5},
    86  			F:      0,
    87  			Global: true,
    88  		},
    89  	}
    90  }
    91  
    92  // BiggsEXP2 implements the Biggs' EXP2 function.
    93  //
    94  // Standard starting point:
    95  //  [1, 2]
    96  //
    97  // Reference:
    98  //  Biggs, M.C.: Minimization algorithms making use of non-quadratic properties
    99  //  of the objective function. IMA J Appl Math 8 (1971), 315-327; doi:10.1093/imamat/8.3.315
   100  type BiggsEXP2 struct{}
   101  
   102  func (BiggsEXP2) Func(x []float64) (sum float64) {
   103  	if len(x) != 2 {
   104  		panic("dimension of the problem must be 2")
   105  	}
   106  
   107  	for i := 1; i <= 10; i++ {
   108  		z := float64(i) / 10
   109  		y := math.Exp(-z) - 5*math.Exp(-10*z)
   110  		f := math.Exp(-x[0]*z) - 5*math.Exp(-x[1]*z) - y
   111  		sum += f * f
   112  	}
   113  	return sum
   114  }
   115  
   116  func (BiggsEXP2) Grad(grad, x []float64) {
   117  	if len(x) != 2 {
   118  		panic("dimension of the problem must be 2")
   119  	}
   120  	if len(x) != len(grad) {
   121  		panic("incorrect size of the gradient")
   122  	}
   123  
   124  	for i := range grad {
   125  		grad[i] = 0
   126  	}
   127  	for i := 1; i <= 10; i++ {
   128  		z := float64(i) / 10
   129  		y := math.Exp(-z) - 5*math.Exp(-10*z)
   130  		f := math.Exp(-x[0]*z) - 5*math.Exp(-x[1]*z) - y
   131  
   132  		dfdx0 := -z * math.Exp(-x[0]*z)
   133  		dfdx1 := 5 * z * math.Exp(-x[1]*z)
   134  
   135  		grad[0] += 2 * f * dfdx0
   136  		grad[1] += 2 * f * dfdx1
   137  	}
   138  }
   139  
   140  func (BiggsEXP2) Minima() []Minimum {
   141  	return []Minimum{
   142  		{
   143  			X:      []float64{1, 10},
   144  			F:      0,
   145  			Global: true,
   146  		},
   147  	}
   148  }
   149  
   150  // BiggsEXP3 implements the Biggs' EXP3 function.
   151  //
   152  // Standard starting point:
   153  //  [1, 2, 1]
   154  //
   155  // Reference:
   156  //  Biggs, M.C.: Minimization algorithms making use of non-quadratic properties
   157  //  of the objective function. IMA J Appl Math 8 (1971), 315-327; doi:10.1093/imamat/8.3.315
   158  type BiggsEXP3 struct{}
   159  
   160  func (BiggsEXP3) Func(x []float64) (sum float64) {
   161  	if len(x) != 3 {
   162  		panic("dimension of the problem must be 3")
   163  	}
   164  
   165  	for i := 1; i <= 10; i++ {
   166  		z := float64(i) / 10
   167  		y := math.Exp(-z) - 5*math.Exp(-10*z)
   168  		f := math.Exp(-x[0]*z) - x[2]*math.Exp(-x[1]*z) - y
   169  		sum += f * f
   170  	}
   171  	return sum
   172  }
   173  
   174  func (BiggsEXP3) Grad(grad, x []float64) {
   175  	if len(x) != 3 {
   176  		panic("dimension of the problem must be 3")
   177  	}
   178  	if len(x) != len(grad) {
   179  		panic("incorrect size of the gradient")
   180  	}
   181  
   182  	for i := range grad {
   183  		grad[i] = 0
   184  	}
   185  	for i := 1; i <= 10; i++ {
   186  		z := float64(i) / 10
   187  		y := math.Exp(-z) - 5*math.Exp(-10*z)
   188  		f := math.Exp(-x[0]*z) - x[2]*math.Exp(-x[1]*z) - y
   189  
   190  		dfdx0 := -z * math.Exp(-x[0]*z)
   191  		dfdx1 := x[2] * z * math.Exp(-x[1]*z)
   192  		dfdx2 := -math.Exp(-x[1] * z)
   193  
   194  		grad[0] += 2 * f * dfdx0
   195  		grad[1] += 2 * f * dfdx1
   196  		grad[2] += 2 * f * dfdx2
   197  	}
   198  }
   199  
   200  func (BiggsEXP3) Minima() []Minimum {
   201  	return []Minimum{
   202  		{
   203  			X:      []float64{1, 10, 5},
   204  			F:      0,
   205  			Global: true,
   206  		},
   207  	}
   208  }
   209  
   210  // BiggsEXP4 implements the Biggs' EXP4 function.
   211  //
   212  // Standard starting point:
   213  //  [1, 2, 1, 1]
   214  //
   215  // Reference:
   216  //  Biggs, M.C.: Minimization algorithms making use of non-quadratic properties
   217  //  of the objective function. IMA J Appl Math 8 (1971), 315-327; doi:10.1093/imamat/8.3.315
   218  type BiggsEXP4 struct{}
   219  
   220  func (BiggsEXP4) Func(x []float64) (sum float64) {
   221  	if len(x) != 4 {
   222  		panic("dimension of the problem must be 4")
   223  	}
   224  
   225  	for i := 1; i <= 10; i++ {
   226  		z := float64(i) / 10
   227  		y := math.Exp(-z) - 5*math.Exp(-10*z)
   228  		f := x[2]*math.Exp(-x[0]*z) - x[3]*math.Exp(-x[1]*z) - y
   229  		sum += f * f
   230  	}
   231  	return sum
   232  }
   233  
   234  func (BiggsEXP4) Grad(grad, x []float64) {
   235  	if len(x) != 4 {
   236  		panic("dimension of the problem must be 4")
   237  	}
   238  	if len(x) != len(grad) {
   239  		panic("incorrect size of the gradient")
   240  	}
   241  
   242  	for i := range grad {
   243  		grad[i] = 0
   244  	}
   245  	for i := 1; i <= 10; i++ {
   246  		z := float64(i) / 10
   247  		y := math.Exp(-z) - 5*math.Exp(-10*z)
   248  		f := x[2]*math.Exp(-x[0]*z) - x[3]*math.Exp(-x[1]*z) - y
   249  
   250  		dfdx0 := -z * x[2] * math.Exp(-x[0]*z)
   251  		dfdx1 := z * x[3] * math.Exp(-x[1]*z)
   252  		dfdx2 := math.Exp(-x[0] * z)
   253  		dfdx3 := -math.Exp(-x[1] * z)
   254  
   255  		grad[0] += 2 * f * dfdx0
   256  		grad[1] += 2 * f * dfdx1
   257  		grad[2] += 2 * f * dfdx2
   258  		grad[3] += 2 * f * dfdx3
   259  	}
   260  }
   261  
   262  func (BiggsEXP4) Minima() []Minimum {
   263  	return []Minimum{
   264  		{
   265  			X:      []float64{1, 10, 1, 5},
   266  			F:      0,
   267  			Global: true,
   268  		},
   269  	}
   270  }
   271  
   272  // BiggsEXP5 implements the Biggs' EXP5 function.
   273  //
   274  // Standard starting point:
   275  //  [1, 2, 1, 1, 1]
   276  //
   277  // Reference:
   278  //  Biggs, M.C.: Minimization algorithms making use of non-quadratic properties
   279  //  of the objective function. IMA J Appl Math 8 (1971), 315-327; doi:10.1093/imamat/8.3.315
   280  type BiggsEXP5 struct{}
   281  
   282  func (BiggsEXP5) Func(x []float64) (sum float64) {
   283  	if len(x) != 5 {
   284  		panic("dimension of the problem must be 5")
   285  	}
   286  
   287  	for i := 1; i <= 11; i++ {
   288  		z := float64(i) / 10
   289  		y := math.Exp(-z) - 5*math.Exp(-10*z) + 3*math.Exp(-4*z)
   290  		f := x[2]*math.Exp(-x[0]*z) - x[3]*math.Exp(-x[1]*z) + 3*math.Exp(-x[4]*z) - y
   291  		sum += f * f
   292  	}
   293  	return sum
   294  }
   295  
   296  func (BiggsEXP5) Grad(grad, x []float64) {
   297  	if len(x) != 5 {
   298  		panic("dimension of the problem must be 5")
   299  	}
   300  	if len(x) != len(grad) {
   301  		panic("incorrect size of the gradient")
   302  	}
   303  
   304  	for i := range grad {
   305  		grad[i] = 0
   306  	}
   307  	for i := 1; i <= 11; i++ {
   308  		z := float64(i) / 10
   309  		y := math.Exp(-z) - 5*math.Exp(-10*z) + 3*math.Exp(-4*z)
   310  		f := x[2]*math.Exp(-x[0]*z) - x[3]*math.Exp(-x[1]*z) + 3*math.Exp(-x[4]*z) - y
   311  
   312  		dfdx0 := -z * x[2] * math.Exp(-x[0]*z)
   313  		dfdx1 := z * x[3] * math.Exp(-x[1]*z)
   314  		dfdx2 := math.Exp(-x[0] * z)
   315  		dfdx3 := -math.Exp(-x[1] * z)
   316  		dfdx4 := -3 * z * math.Exp(-x[4]*z)
   317  
   318  		grad[0] += 2 * f * dfdx0
   319  		grad[1] += 2 * f * dfdx1
   320  		grad[2] += 2 * f * dfdx2
   321  		grad[3] += 2 * f * dfdx3
   322  		grad[4] += 2 * f * dfdx4
   323  	}
   324  }
   325  
   326  func (BiggsEXP5) Minima() []Minimum {
   327  	return []Minimum{
   328  		{
   329  			X:      []float64{1, 10, 1, 5, 4},
   330  			F:      0,
   331  			Global: true,
   332  		},
   333  	}
   334  }
   335  
   336  // BiggsEXP6 implements the Biggs' EXP6 function.
   337  //
   338  // Standard starting point:
   339  //  [1, 2, 1, 1, 1, 1]
   340  //
   341  // References:
   342  //  - Biggs, M.C.: Minimization algorithms making use of non-quadratic
   343  //    properties of the objective function. IMA J Appl Math 8 (1971), 315-327;
   344  //    doi:10.1093/imamat/8.3.315
   345  //  - More, J., Garbow, B.S., Hillstrom, K.E.: Testing unconstrained
   346  //    optimization software. ACM Trans Math Softw 7 (1981), 17-41
   347  type BiggsEXP6 struct{}
   348  
   349  func (BiggsEXP6) Func(x []float64) (sum float64) {
   350  	if len(x) != 6 {
   351  		panic("dimension of the problem must be 6")
   352  	}
   353  
   354  	for i := 1; i <= 13; i++ {
   355  		z := float64(i) / 10
   356  		y := math.Exp(-z) - 5*math.Exp(-10*z) + 3*math.Exp(-4*z)
   357  		f := x[2]*math.Exp(-x[0]*z) - x[3]*math.Exp(-x[1]*z) + x[5]*math.Exp(-x[4]*z) - y
   358  		sum += f * f
   359  	}
   360  	return sum
   361  }
   362  
   363  func (BiggsEXP6) Grad(grad, x []float64) {
   364  	if len(x) != 6 {
   365  		panic("dimension of the problem must be 6")
   366  	}
   367  	if len(x) != len(grad) {
   368  		panic("incorrect size of the gradient")
   369  	}
   370  
   371  	for i := range grad {
   372  		grad[i] = 0
   373  	}
   374  	for i := 1; i <= 13; i++ {
   375  		z := float64(i) / 10
   376  		y := math.Exp(-z) - 5*math.Exp(-10*z) + 3*math.Exp(-4*z)
   377  		f := x[2]*math.Exp(-x[0]*z) - x[3]*math.Exp(-x[1]*z) + x[5]*math.Exp(-x[4]*z) - y
   378  
   379  		dfdx0 := -z * x[2] * math.Exp(-x[0]*z)
   380  		dfdx1 := z * x[3] * math.Exp(-x[1]*z)
   381  		dfdx2 := math.Exp(-x[0] * z)
   382  		dfdx3 := -math.Exp(-x[1] * z)
   383  		dfdx4 := -z * x[5] * math.Exp(-x[4]*z)
   384  		dfdx5 := math.Exp(-x[4] * z)
   385  
   386  		grad[0] += 2 * f * dfdx0
   387  		grad[1] += 2 * f * dfdx1
   388  		grad[2] += 2 * f * dfdx2
   389  		grad[3] += 2 * f * dfdx3
   390  		grad[4] += 2 * f * dfdx4
   391  		grad[5] += 2 * f * dfdx5
   392  	}
   393  }
   394  
   395  func (BiggsEXP6) Minima() []Minimum {
   396  	return []Minimum{
   397  		{
   398  			X:      []float64{1, 10, 1, 5, 4, 3},
   399  			F:      0,
   400  			Global: true,
   401  		},
   402  		{
   403  			X: []float64{1.7114159947956764, 17.68319817846745, 1.1631436609697268,
   404  				5.1865615510738605, 1.7114159947949301, 1.1631436609697998},
   405  			F:      0.005655649925499929,
   406  			Global: false,
   407  		},
   408  		{
   409  			// X: []float64{1.22755594752403, X[1] >> 0, 0.83270306333466, X[3] << 0, X[4] = X[0], X[5] = X[2]},
   410  			X:      []float64{1.22755594752403, 1000, 0.83270306333466, -1000, 1.22755594752403, 0.83270306333466},
   411  			F:      0.306366772624790,
   412  			Global: false,
   413  		},
   414  	}
   415  }
   416  
   417  // Box3D implements the Box' three-dimensional function.
   418  //
   419  // Standard starting point:
   420  //  [0, 10, 20]
   421  //
   422  // References:
   423  //  - Box, M.J.: A comparison of several current optimization methods, and the
   424  //    use of transformations in constrained problems. Comput J 9 (1966), 67-77
   425  //  - More, J., Garbow, B.S., Hillstrom, K.E.: Testing unconstrained
   426  //    optimization software. ACM Trans Math Softw 7 (1981), 17-41
   427  type Box3D struct{}
   428  
   429  func (Box3D) Func(x []float64) (sum float64) {
   430  	if len(x) != 3 {
   431  		panic("dimension of the problem must be 3")
   432  	}
   433  
   434  	for i := 1; i <= 10; i++ {
   435  		c := -float64(i) / 10
   436  		y := math.Exp(c) - math.Exp(10*c)
   437  		f := math.Exp(c*x[0]) - math.Exp(c*x[1]) - x[2]*y
   438  		sum += f * f
   439  	}
   440  	return sum
   441  }
   442  
   443  func (Box3D) Grad(grad, x []float64) {
   444  	if len(x) != 3 {
   445  		panic("dimension of the problem must be 3")
   446  	}
   447  	if len(x) != len(grad) {
   448  		panic("incorrect size of the gradient")
   449  	}
   450  
   451  	grad[0] = 0
   452  	grad[1] = 0
   453  	grad[2] = 0
   454  	for i := 1; i <= 10; i++ {
   455  		c := -float64(i) / 10
   456  		y := math.Exp(c) - math.Exp(10*c)
   457  		f := math.Exp(c*x[0]) - math.Exp(c*x[1]) - x[2]*y
   458  		grad[0] += 2 * f * c * math.Exp(c*x[0])
   459  		grad[1] += -2 * f * c * math.Exp(c*x[1])
   460  		grad[2] += -2 * f * y
   461  	}
   462  }
   463  
   464  func (Box3D) Minima() []Minimum {
   465  	return []Minimum{
   466  		{
   467  			X:      []float64{1, 10, 1},
   468  			F:      0,
   469  			Global: true,
   470  		},
   471  		{
   472  			X:      []float64{10, 1, -1},
   473  			F:      0,
   474  			Global: true,
   475  		},
   476  		{
   477  			// Any point at the line {a, a, 0}.
   478  			X:      []float64{1, 1, 0},
   479  			F:      0,
   480  			Global: true,
   481  		},
   482  	}
   483  }
   484  
   485  // BraninHoo implements the Branin-Hoo function. BraninHoo is a 2-dimensional
   486  // test function with three global minima. It is typically evaluated in the domain
   487  // x_0 ∈ [-5, 10], x_1 ∈ [0, 15].
   488  //  f(x) = (x_1 - (5.1/(4π^2))*x_0^2 + (5/π)*x_0 - 6)^2 + 10*(1-1/(8π))cos(x_0) + 10
   489  // It has a minimum value of 0.397887 at x^* = {(-π, 12.275), (π, 2.275), (9.424778, 2.475)}
   490  //
   491  // Reference:
   492  //  https://www.sfu.ca/~ssurjano/branin.html (obtained June 2017)
   493  type BraninHoo struct{}
   494  
   495  func (BraninHoo) Func(x []float64) float64 {
   496  	if len(x) != 2 {
   497  		panic("functions: dimension of the problem must be 2")
   498  	}
   499  	a, b, c, r, s, t := 1.0, 5.1/(4*math.Pi*math.Pi), 5/math.Pi, 6.0, 10.0, 1/(8*math.Pi)
   500  
   501  	term := x[1] - b*x[0]*x[0] + c*x[0] - r
   502  	return a*term*term + s*(1-t)*math.Cos(x[0]) + s
   503  }
   504  
   505  func (BraninHoo) Minima() []Minimum {
   506  	return []Minimum{
   507  		{
   508  			X:      []float64{-math.Pi, 12.275},
   509  			F:      0.397887,
   510  			Global: true,
   511  		},
   512  		{
   513  			X:      []float64{math.Pi, 2.275},
   514  			F:      0.397887,
   515  			Global: true,
   516  		},
   517  		{
   518  			X:      []float64{9.424778, 2.475},
   519  			F:      0.397887,
   520  			Global: true,
   521  		},
   522  	}
   523  }
   524  
   525  // BrownBadlyScaled implements the Brown's badly scaled function.
   526  //
   527  // Standard starting point:
   528  //  [1, 1]
   529  //
   530  // References:
   531  //  - More, J., Garbow, B.S., Hillstrom, K.E.: Testing unconstrained
   532  //    optimization software. ACM Trans Math Softw 7 (1981), 17-41
   533  type BrownBadlyScaled struct{}
   534  
   535  func (BrownBadlyScaled) Func(x []float64) float64 {
   536  	if len(x) != 2 {
   537  		panic("dimension of the problem must be 2")
   538  	}
   539  
   540  	f1 := x[0] - 1e6
   541  	f2 := x[1] - 2e-6
   542  	f3 := x[0]*x[1] - 2
   543  	return f1*f1 + f2*f2 + f3*f3
   544  }
   545  
   546  func (BrownBadlyScaled) Grad(grad, x []float64) {
   547  	if len(x) != 2 {
   548  		panic("dimension of the problem must be 2")
   549  	}
   550  	if len(x) != len(grad) {
   551  		panic("incorrect size of the gradient")
   552  	}
   553  
   554  	f1 := x[0] - 1e6
   555  	f2 := x[1] - 2e-6
   556  	f3 := float64(x[0]*x[1]) - 2 // Prevent fused multiply subtract.
   557  	grad[0] = 2*f1 + 2*f3*x[1]
   558  	grad[1] = 2*f2 + 2*f3*x[0]
   559  }
   560  
   561  func (BrownBadlyScaled) Hess(dst *mat.SymDense, x []float64) {
   562  	if len(x) != 2 {
   563  		panic("dimension of the problem must be 2")
   564  	}
   565  	if len(x) != dst.Symmetric() {
   566  		panic("incorrect size of the Hessian")
   567  	}
   568  
   569  	h00 := 2 + 2*x[1]*x[1]
   570  	h01 := 4*x[0]*x[1] - 4
   571  	h11 := 2 + 2*x[0]*x[0]
   572  	dst.SetSym(0, 0, h00)
   573  	dst.SetSym(0, 1, h01)
   574  	dst.SetSym(1, 1, h11)
   575  }
   576  
   577  func (BrownBadlyScaled) Minima() []Minimum {
   578  	return []Minimum{
   579  		{
   580  			X:      []float64{1e6, 2e-6},
   581  			F:      0,
   582  			Global: true,
   583  		},
   584  	}
   585  }
   586  
   587  // BrownAndDennis implements the Brown and Dennis function.
   588  //
   589  // Standard starting point:
   590  //  [25, 5, -5, -1]
   591  //
   592  // References:
   593  //  - Brown, K.M., Dennis, J.E.: New computational algorithms for minimizing a
   594  //    sum of squares of nonlinear functions. Research Report Number 71-6, Yale
   595  //    University (1971)
   596  //  - More, J., Garbow, B.S., Hillstrom, K.E.: Testing unconstrained
   597  //    optimization software. ACM Trans Math Softw 7 (1981), 17-41
   598  type BrownAndDennis struct{}
   599  
   600  func (BrownAndDennis) Func(x []float64) (sum float64) {
   601  	if len(x) != 4 {
   602  		panic("dimension of the problem must be 4")
   603  	}
   604  
   605  	for i := 1; i <= 20; i++ {
   606  		c := float64(i) / 5
   607  		f1 := x[0] + c*x[1] - math.Exp(c)
   608  		f2 := x[2] + x[3]*math.Sin(c) - math.Cos(c)
   609  		f := f1*f1 + f2*f2
   610  		sum += f * f
   611  	}
   612  	return sum
   613  }
   614  
   615  func (BrownAndDennis) Grad(grad, x []float64) {
   616  	if len(x) != 4 {
   617  		panic("dimension of the problem must be 4")
   618  	}
   619  	if len(x) != len(grad) {
   620  		panic("incorrect size of the gradient")
   621  	}
   622  
   623  	for i := range grad {
   624  		grad[i] = 0
   625  	}
   626  	for i := 1; i <= 20; i++ {
   627  		c := float64(i) / 5
   628  		f1 := x[0] + c*x[1] - math.Exp(c)
   629  		f2 := x[2] + x[3]*math.Sin(c) - math.Cos(c)
   630  		f := f1*f1 + f2*f2
   631  		grad[0] += 4 * f * f1
   632  		grad[1] += 4 * f * f1 * c
   633  		grad[2] += 4 * f * f2
   634  		grad[3] += 4 * f * f2 * math.Sin(c)
   635  	}
   636  }
   637  
   638  func (BrownAndDennis) Hess(dst *mat.SymDense, x []float64) {
   639  	if len(x) != 4 {
   640  		panic("dimension of the problem must be 4")
   641  	}
   642  	if len(x) != dst.Symmetric() {
   643  		panic("incorrect size of the Hessian")
   644  	}
   645  
   646  	for i := 0; i < 4; i++ {
   647  		for j := i; j < 4; j++ {
   648  			dst.SetSym(i, j, 0)
   649  		}
   650  	}
   651  	for i := 1; i <= 20; i++ {
   652  		d1 := float64(i) / 5
   653  		d2 := math.Sin(d1)
   654  		t1 := x[0] + d1*x[1] - math.Exp(d1)
   655  		t2 := x[2] + d2*x[3] - math.Cos(d1)
   656  		t := t1*t1 + t2*t2
   657  		s3 := 2 * t1 * t2
   658  		r1 := t + 2*t1*t1
   659  		r2 := t + 2*t2*t2
   660  		dst.SetSym(0, 0, dst.At(0, 0)+r1)
   661  		dst.SetSym(0, 1, dst.At(0, 1)+d1*r1)
   662  		dst.SetSym(1, 1, dst.At(1, 1)+d1*d1*r1)
   663  		dst.SetSym(0, 2, dst.At(0, 2)+s3)
   664  		dst.SetSym(1, 2, dst.At(1, 2)+d1*s3)
   665  		dst.SetSym(2, 2, dst.At(2, 2)+r2)
   666  		dst.SetSym(0, 3, dst.At(0, 3)+d2*s3)
   667  		dst.SetSym(1, 3, dst.At(1, 3)+d1*d2*s3)
   668  		dst.SetSym(2, 3, dst.At(2, 3)+d2*r2)
   669  		dst.SetSym(3, 3, dst.At(3, 3)+d2*d2*r2)
   670  	}
   671  	for i := 0; i < 4; i++ {
   672  		for j := i; j < 4; j++ {
   673  			dst.SetSym(i, j, 4*dst.At(i, j))
   674  		}
   675  	}
   676  }
   677  
   678  func (BrownAndDennis) Minima() []Minimum {
   679  	return []Minimum{
   680  		{
   681  			X:      []float64{-11.594439904762162, 13.203630051207202, -0.4034394881768612, 0.2367787744557347},
   682  			F:      85822.20162635634,
   683  			Global: true,
   684  		},
   685  	}
   686  }
   687  
   688  // ExtendedPowellSingular implements the extended Powell's function.
   689  // Its Hessian matrix is singular at the minimizer.
   690  //
   691  // Standard starting point:
   692  //  [3, -1, 0, 3, 3, -1, 0, 3, ..., 3, -1, 0, 3]
   693  //
   694  // References:
   695  //  - Spedicato E.: Computational experience with quasi-Newton algorithms for
   696  //    minimization problems of moderatly large size. Towards Global
   697  //    Optimization 2 (1978), 209-219
   698  //  - More, J., Garbow, B.S., Hillstrom, K.E.: Testing unconstrained
   699  //    optimization software. ACM Trans Math Softw 7 (1981), 17-41
   700  type ExtendedPowellSingular struct{}
   701  
   702  func (ExtendedPowellSingular) Func(x []float64) (sum float64) {
   703  	if len(x)%4 != 0 {
   704  		panic("dimension of the problem must be a multiple of 4")
   705  	}
   706  
   707  	for i := 0; i < len(x); i += 4 {
   708  		f1 := x[i] + 10*x[i+1]
   709  		f2 := x[i+2] - x[i+3]
   710  		t := x[i+1] - 2*x[i+2]
   711  		f3 := t * t
   712  		t = x[i] - x[i+3]
   713  		f4 := t * t
   714  		sum += f1*f1 + 5*f2*f2 + f3*f3 + 10*f4*f4
   715  	}
   716  	return sum
   717  }
   718  
   719  func (ExtendedPowellSingular) Grad(grad, x []float64) {
   720  	if len(x)%4 != 0 {
   721  		panic("dimension of the problem must be a multiple of 4")
   722  	}
   723  	if len(x) != len(grad) {
   724  		panic("incorrect size of the gradient")
   725  	}
   726  
   727  	for i := 0; i < len(x); i += 4 {
   728  		f1 := x[i] + 10*x[i+1]
   729  		f2 := x[i+2] - x[i+3]
   730  		t1 := x[i+1] - 2*x[i+2]
   731  		f3 := t1 * t1
   732  		t2 := x[i] - x[i+3]
   733  		f4 := t2 * t2
   734  		grad[i] = 2*f1 + 40*f4*t2
   735  		grad[i+1] = 20*f1 + 4*f3*t1
   736  		grad[i+2] = 10*f2 - 8*f3*t1
   737  		grad[i+3] = -10*f2 - 40*f4*t2
   738  	}
   739  }
   740  
   741  func (ExtendedPowellSingular) Minima() []Minimum {
   742  	return []Minimum{
   743  		{
   744  			X:      []float64{0, 0, 0, 0},
   745  			F:      0,
   746  			Global: true,
   747  		},
   748  		{
   749  			X:      []float64{0, 0, 0, 0, 0, 0, 0, 0},
   750  			F:      0,
   751  			Global: true,
   752  		},
   753  		{
   754  			X:      []float64{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
   755  			F:      0,
   756  			Global: true,
   757  		},
   758  	}
   759  }
   760  
   761  // ExtendedRosenbrock implements the extended, multidimensional Rosenbrock
   762  // function.
   763  //
   764  // Standard starting point:
   765  //  Easy: [-1.2, 1, -1.2, 1, ...]
   766  //  Hard: any point far from the minimum
   767  //
   768  // References:
   769  //  - Rosenbrock, H.H.: An Automatic Method for Finding the Greatest or Least
   770  //    Value of a Function. Computer J 3 (1960), 175-184
   771  //  - http://en.wikipedia.org/wiki/Rosenbrock_function
   772  type ExtendedRosenbrock struct{}
   773  
   774  func (ExtendedRosenbrock) Func(x []float64) (sum float64) {
   775  	for i := 0; i < len(x)-1; i++ {
   776  		a := 1 - x[i]
   777  		b := x[i+1] - x[i]*x[i]
   778  		sum += a*a + 100*b*b
   779  	}
   780  	return sum
   781  }
   782  
   783  func (ExtendedRosenbrock) Grad(grad, x []float64) {
   784  	if len(x) != len(grad) {
   785  		panic("incorrect size of the gradient")
   786  	}
   787  
   788  	dim := len(x)
   789  	for i := range grad {
   790  		grad[i] = 0
   791  	}
   792  	// Prevent fused multiply add and fused multiply subtract.
   793  	for i := 0; i < dim-1; i++ {
   794  		grad[i] -= float64(2 * (1 - x[i]))
   795  		grad[i] -= float64(400 * (x[i+1] - float64(x[i]*x[i])) * x[i])
   796  	}
   797  	for i := 1; i < dim; i++ {
   798  		grad[i] += float64(200 * (x[i] - float64(x[i-1]*x[i-1])))
   799  	}
   800  }
   801  
   802  func (ExtendedRosenbrock) Minima() []Minimum {
   803  	return []Minimum{
   804  		{
   805  			X:      []float64{1, 1},
   806  			F:      0,
   807  			Global: true,
   808  		},
   809  		{
   810  			X:      []float64{1, 1, 1},
   811  			F:      0,
   812  			Global: true,
   813  		},
   814  		{
   815  			X:      []float64{1, 1, 1, 1},
   816  			F:      0,
   817  			Global: true,
   818  		},
   819  		{
   820  			X: []float64{-0.7756592265653526, 0.6130933654850433,
   821  				0.38206284633839305, 0.14597201855219452},
   822  			F:      3.701428610430017,
   823  			Global: false,
   824  		},
   825  		{
   826  			X:      []float64{1, 1, 1, 1, 1},
   827  			F:      0,
   828  			Global: true,
   829  		},
   830  		{
   831  			X: []float64{-0.9620510206947502, 0.9357393959767103,
   832  				0.8807136041943204, 0.7778776758544063, 0.6050936785926526},
   833  			F:      3.930839434133027,
   834  			Global: false,
   835  		},
   836  		{
   837  			X: []float64{-0.9865749795709938, 0.9833982288361819, 0.972106670053092,
   838  				0.9474374368264362, 0.8986511848517299, 0.8075739520354182},
   839  			F:      3.973940500930295,
   840  			Global: false,
   841  		},
   842  		{
   843  			X: []float64{-0.9917225725614055, 0.9935553935033712, 0.992173321594692,
   844  				0.9868987626903134, 0.975164756608872, 0.9514319827049906, 0.9052228177139495},
   845  			F:      3.9836005364248543,
   846  			Global: false,
   847  		},
   848  		{
   849  			X:      []float64{1, 1, 1, 1, 1, 1, 1, 1, 1, 1},
   850  			F:      0,
   851  			Global: true,
   852  		},
   853  	}
   854  }
   855  
   856  // Gaussian implements the Gaussian function.
   857  // The function has one global minimum and a number of false local minima
   858  // caused by the finite floating point precision.
   859  //
   860  // Standard starting point:
   861  //  [0.4, 1, 0]
   862  //
   863  // Reference:
   864  //  More, J., Garbow, B.S., Hillstrom, K.E.: Testing unconstrained optimization
   865  //  software. ACM Trans Math Softw 7 (1981), 17-41
   866  type Gaussian struct{}
   867  
   868  func (Gaussian) y(i int) (yi float64) {
   869  	switch i {
   870  	case 1, 15:
   871  		yi = 0.0009
   872  	case 2, 14:
   873  		yi = 0.0044
   874  	case 3, 13:
   875  		yi = 0.0175
   876  	case 4, 12:
   877  		yi = 0.0540
   878  	case 5, 11:
   879  		yi = 0.1295
   880  	case 6, 10:
   881  		yi = 0.2420
   882  	case 7, 9:
   883  		yi = 0.3521
   884  	case 8:
   885  		yi = 0.3989
   886  	}
   887  	return yi
   888  }
   889  
   890  func (g Gaussian) Func(x []float64) (sum float64) {
   891  	if len(x) != 3 {
   892  		panic("dimension of the problem must be 3")
   893  	}
   894  
   895  	for i := 1; i <= 15; i++ {
   896  		c := 0.5 * float64(8-i)
   897  		b := c - x[2]
   898  		d := b * b
   899  		e := math.Exp(-0.5 * x[1] * d)
   900  		f := x[0]*e - g.y(i)
   901  		sum += f * f
   902  	}
   903  	return sum
   904  }
   905  
   906  func (g Gaussian) Grad(grad, x []float64) {
   907  	if len(x) != 3 {
   908  		panic("dimension of the problem must be 3")
   909  	}
   910  	if len(x) != len(grad) {
   911  		panic("incorrect size of the gradient")
   912  	}
   913  
   914  	grad[0] = 0
   915  	grad[1] = 0
   916  	grad[2] = 0
   917  	for i := 1; i <= 15; i++ {
   918  		c := 0.5 * float64(8-i)
   919  		b := c - x[2]
   920  		d := b * b
   921  		e := math.Exp(-0.5 * x[1] * d)
   922  		f := x[0]*e - g.y(i)
   923  		grad[0] += 2 * f * e
   924  		grad[1] -= f * e * d * x[0]
   925  		grad[2] += 2 * f * e * x[0] * x[1] * b
   926  	}
   927  }
   928  
   929  func (Gaussian) Minima() []Minimum {
   930  	return []Minimum{
   931  		{
   932  			X:      []float64{0.398956137837997, 1.0000190844805048, 0},
   933  			F:      1.12793276961912e-08,
   934  			Global: true,
   935  		},
   936  	}
   937  }
   938  
   939  // GulfResearchAndDevelopment implements the Gulf Research and Development function.
   940  //
   941  // Standard starting point:
   942  //  [5, 2.5, 0.15]
   943  //
   944  // References:
   945  //  - Cox, R.A.: Comparison of the performance of seven optimization algorithms
   946  //    on twelve unconstrained minimization problems. Ref. 1335CNO4, Gulf
   947  //    Research and Development Company, Pittsburg (1969)
   948  //  - More, J., Garbow, B.S., Hillstrom, K.E.: Testing unconstrained
   949  //    optimization software. ACM Trans Math Softw 7 (1981), 17-41
   950  type GulfResearchAndDevelopment struct{}
   951  
   952  func (GulfResearchAndDevelopment) Func(x []float64) (sum float64) {
   953  	if len(x) != 3 {
   954  		panic("dimension of the problem must be 3")
   955  	}
   956  
   957  	for i := 1; i <= 99; i++ {
   958  		arg := float64(i) / 100
   959  		r := math.Pow(-50*math.Log(arg), 2.0/3.0) + 25 - x[1]
   960  		t1 := math.Pow(math.Abs(r), x[2]) / x[0]
   961  		t2 := math.Exp(-t1)
   962  		t := t2 - arg
   963  		sum += t * t
   964  	}
   965  	return sum
   966  }
   967  
   968  func (GulfResearchAndDevelopment) Grad(grad, x []float64) {
   969  	if len(x) != 3 {
   970  		panic("dimension of the problem must be 3")
   971  	}
   972  	if len(x) != len(grad) {
   973  		panic("incorrect size of the gradient")
   974  	}
   975  
   976  	for i := range grad {
   977  		grad[i] = 0
   978  	}
   979  	for i := 1; i <= 99; i++ {
   980  		arg := float64(i) / 100
   981  		r := math.Pow(-50*math.Log(arg), 2.0/3.0) + 25 - x[1]
   982  		t1 := math.Pow(math.Abs(r), x[2]) / x[0]
   983  		t2 := math.Exp(-t1)
   984  		t := t2 - arg
   985  		s1 := t1 * t2 * t
   986  		grad[0] += s1
   987  		grad[1] += s1 / r
   988  		grad[2] -= s1 * math.Log(math.Abs(r))
   989  	}
   990  	grad[0] *= 2 / x[0]
   991  	grad[1] *= 2 * x[2]
   992  	grad[2] *= 2
   993  }
   994  
   995  func (GulfResearchAndDevelopment) Minima() []Minimum {
   996  	return []Minimum{
   997  		{
   998  			X:      []float64{50, 25, 1.5},
   999  			F:      0,
  1000  			Global: true,
  1001  		},
  1002  		{
  1003  			X:      []float64{99.89529935174151, 60.61453902799833, 9.161242695144592},
  1004  			F:      32.8345,
  1005  			Global: false,
  1006  		},
  1007  		{
  1008  			X:      []float64{201.662589489426, 60.61633150468155, 10.224891158488965},
  1009  			F:      32.8345,
  1010  			Global: false,
  1011  		},
  1012  	}
  1013  }
  1014  
  1015  // HelicalValley implements the helical valley function of Fletcher and Powell.
  1016  // Function is not defined at x[0] = 0.
  1017  //
  1018  // Standard starting point:
  1019  //  [-1, 0, 0]
  1020  //
  1021  // References:
  1022  //  - Fletcher, R., Powell, M.J.D.: A rapidly convergent descent method for
  1023  //    minimization. Comput J 6 (1963), 163-168
  1024  //  - More, J., Garbow, B.S., Hillstrom, K.E.: Testing unconstrained
  1025  //    optimization software. ACM Trans Math Softw 7 (1981), 17-41
  1026  type HelicalValley struct{}
  1027  
  1028  func (HelicalValley) Func(x []float64) float64 {
  1029  	if len(x) != 3 {
  1030  		panic("dimension of the problem must be 3")
  1031  	}
  1032  	if x[0] == 0 {
  1033  		panic("function not defined at x[0] = 0")
  1034  	}
  1035  
  1036  	theta := 0.5 * math.Atan(x[1]/x[0]) / math.Pi
  1037  	if x[0] < 0 {
  1038  		theta += 0.5
  1039  	}
  1040  	f1 := 10 * (x[2] - 10*theta)
  1041  	f2 := 10 * (math.Hypot(x[0], x[1]) - 1)
  1042  	f3 := x[2]
  1043  	return f1*f1 + f2*f2 + f3*f3
  1044  }
  1045  
  1046  func (HelicalValley) Grad(grad, x []float64) {
  1047  	if len(x) != 3 {
  1048  		panic("dimension of the problem must be 3")
  1049  	}
  1050  	if len(x) != len(grad) {
  1051  		panic("incorrect size of the gradient")
  1052  	}
  1053  	if x[0] == 0 {
  1054  		panic("function not defined at x[0] = 0")
  1055  	}
  1056  
  1057  	theta := 0.5 * math.Atan(x[1]/x[0]) / math.Pi
  1058  	if x[0] < 0 {
  1059  		theta += 0.5
  1060  	}
  1061  	h := math.Hypot(x[0], x[1])
  1062  	r := 1 / h
  1063  	q := r * r / math.Pi
  1064  	s := x[2] - 10*theta
  1065  	grad[0] = 200 * (5*s*q*x[1] + (h-1)*r*x[0])
  1066  	grad[1] = 200 * (-5*s*q*x[0] + (h-1)*r*x[1])
  1067  	grad[2] = 2 * (100*s + x[2])
  1068  }
  1069  
  1070  func (HelicalValley) Minima() []Minimum {
  1071  	return []Minimum{
  1072  		{
  1073  			X:      []float64{1, 0, 0},
  1074  			F:      0,
  1075  			Global: true,
  1076  		},
  1077  	}
  1078  }
  1079  
  1080  // Linear implements a linear function.
  1081  type Linear struct{}
  1082  
  1083  func (Linear) Func(x []float64) float64 {
  1084  	return floats.Sum(x)
  1085  }
  1086  
  1087  func (Linear) Grad(grad, x []float64) []float64 {
  1088  	if len(x) != len(grad) {
  1089  		panic("incorrect size of the gradient")
  1090  	}
  1091  
  1092  	for i := range grad {
  1093  		grad[i] = 1
  1094  	}
  1095  	return grad
  1096  }
  1097  
  1098  // PenaltyI implements the first penalty function by Gill, Murray and Pitfield.
  1099  //
  1100  // Standard starting point:
  1101  //  [1, ..., n]
  1102  //
  1103  // References:
  1104  //  - Gill, P.E., Murray, W., Pitfield, R.A.: The implementation of two revised
  1105  //    quasi-Newton algorithms for unconstrained optimization. Report NAC 11,
  1106  //    National Phys Lab (1972), 82-83
  1107  //  - More, J., Garbow, B.S., Hillstrom, K.E.: Testing unconstrained
  1108  //    optimization software. ACM Trans Math Softw 7 (1981), 17-41
  1109  type PenaltyI struct{}
  1110  
  1111  func (PenaltyI) Func(x []float64) (sum float64) {
  1112  	for _, v := range x {
  1113  		sum += (v - 1) * (v - 1)
  1114  	}
  1115  	sum *= 1e-5
  1116  
  1117  	var s float64
  1118  	for _, v := range x {
  1119  		s += v * v
  1120  	}
  1121  	sum += (s - 0.25) * (s - 0.25)
  1122  	return sum
  1123  }
  1124  
  1125  func (PenaltyI) Grad(grad, x []float64) {
  1126  	if len(x) != len(grad) {
  1127  		panic("incorrect size of the gradient")
  1128  	}
  1129  
  1130  	s := -0.25
  1131  	for _, v := range x {
  1132  		s += v * v
  1133  	}
  1134  	for i, v := range x {
  1135  		grad[i] = 2 * (2*s*v + 1e-5*(v-1))
  1136  	}
  1137  }
  1138  
  1139  func (PenaltyI) Minima() []Minimum {
  1140  	return []Minimum{
  1141  		{
  1142  			X:      []float64{0.2500074995875379, 0.2500074995875379, 0.2500074995875379, 0.2500074995875379},
  1143  			F:      2.2499775008999372e-05,
  1144  			Global: true,
  1145  		},
  1146  		{
  1147  			X: []float64{0.15812230111311634, 0.15812230111311634, 0.15812230111311634,
  1148  				0.15812230111311634, 0.15812230111311634, 0.15812230111311634,
  1149  				0.15812230111311634, 0.15812230111311634, 0.15812230111311634, 0.15812230111311634},
  1150  			F:      7.087651467090369e-05,
  1151  			Global: true,
  1152  		},
  1153  	}
  1154  }
  1155  
  1156  // PenaltyII implements the second penalty function by Gill, Murray and Pitfield.
  1157  //
  1158  // Standard starting point:
  1159  //  [0.5, ..., 0.5]
  1160  //
  1161  // References:
  1162  //  - Gill, P.E., Murray, W., Pitfield, R.A.: The implementation of two revised
  1163  //    quasi-Newton algorithms for unconstrained optimization. Report NAC 11,
  1164  //    National Phys Lab (1972), 82-83
  1165  //  - More, J., Garbow, B.S., Hillstrom, K.E.: Testing unconstrained
  1166  //    optimization software. ACM Trans Math Softw 7 (1981), 17-41
  1167  type PenaltyII struct{}
  1168  
  1169  func (PenaltyII) Func(x []float64) (sum float64) {
  1170  	dim := len(x)
  1171  	s := -1.0
  1172  	for i, v := range x {
  1173  		s += float64(dim-i) * v * v
  1174  	}
  1175  	for i := 1; i < dim; i++ {
  1176  		yi := math.Exp(float64(i+1)/10) + math.Exp(float64(i)/10)
  1177  		f := math.Exp(x[i]/10) + math.Exp(x[i-1]/10) - yi
  1178  		sum += f * f
  1179  	}
  1180  	for i := 1; i < dim; i++ {
  1181  		f := math.Exp(x[i]/10) - math.Exp(-1.0/10)
  1182  		sum += f * f
  1183  	}
  1184  	sum *= 1e-5
  1185  	sum += (x[0] - 0.2) * (x[0] - 0.2)
  1186  	sum += s * s
  1187  	return sum
  1188  }
  1189  
  1190  func (PenaltyII) Grad(grad, x []float64) {
  1191  	if len(x) != len(grad) {
  1192  		panic("incorrect size of the gradient")
  1193  	}
  1194  
  1195  	dim := len(x)
  1196  	s := -1.0
  1197  	for i, v := range x {
  1198  		s += float64(dim-i) * v * v
  1199  	}
  1200  	for i, v := range x {
  1201  		grad[i] = 4 * s * float64(dim-i) * v
  1202  	}
  1203  	for i := 1; i < dim; i++ {
  1204  		yi := math.Exp(float64(i+1)/10) + math.Exp(float64(i)/10)
  1205  		f := math.Exp(x[i]/10) + math.Exp(x[i-1]/10) - yi
  1206  		grad[i] += 1e-5 * f * math.Exp(x[i]/10) / 5
  1207  		grad[i-1] += 1e-5 * f * math.Exp(x[i-1]/10) / 5
  1208  	}
  1209  	for i := 1; i < dim; i++ {
  1210  		f := math.Exp(x[i]/10) - math.Exp(-1.0/10)
  1211  		grad[i] += 1e-5 * f * math.Exp(x[i]/10) / 5
  1212  	}
  1213  	grad[0] += 2 * (x[0] - 0.2)
  1214  }
  1215  
  1216  func (PenaltyII) Minima() []Minimum {
  1217  	return []Minimum{
  1218  		{
  1219  			X:      []float64{0.19999933335, 0.19131670128566283, 0.4801014860897, 0.5188454026659},
  1220  			F:      9.376293007355449e-06,
  1221  			Global: true,
  1222  		},
  1223  		{
  1224  			X: []float64{0.19998360520892217, 0.010350644318663525,
  1225  				0.01960493546891094, 0.03208906550305253, 0.04993267593895693,
  1226  				0.07651399534454084, 0.11862407118600789, 0.1921448731780023,
  1227  				0.3473205862372022, 0.36916437893066273},
  1228  			F:      0.00029366053745674594,
  1229  			Global: true,
  1230  		},
  1231  	}
  1232  }
  1233  
  1234  // PowellBadlyScaled implements the Powell's badly scaled function.
  1235  // The function is very flat near the minimum. A satisfactory solution is one
  1236  // that gives f(x) ≅ 1e-13.
  1237  //
  1238  // Standard starting point:
  1239  //  [0, 1]
  1240  //
  1241  // References:
  1242  //  - Powell, M.J.D.: A Hybrid Method for Nonlinear Equations. Numerical
  1243  //    Methods for Nonlinear Algebraic Equations, P. Rabinowitz (ed.), Gordon
  1244  //    and Breach (1970)
  1245  //  - More, J., Garbow, B.S., Hillstrom, K.E.: Testing unconstrained
  1246  //    optimization software. ACM Trans Math Softw 7 (1981), 17-41
  1247  type PowellBadlyScaled struct{}
  1248  
  1249  func (PowellBadlyScaled) Func(x []float64) float64 {
  1250  	if len(x) != 2 {
  1251  		panic("dimension of the problem must be 2")
  1252  	}
  1253  
  1254  	f1 := 1e4*x[0]*x[1] - 1
  1255  	f2 := math.Exp(-x[0]) + math.Exp(-x[1]) - 1.0001
  1256  	return f1*f1 + f2*f2
  1257  }
  1258  
  1259  func (PowellBadlyScaled) Grad(grad, x []float64) {
  1260  	if len(x) != 2 {
  1261  		panic("dimension of the problem must be 2")
  1262  	}
  1263  	if len(x) != len(grad) {
  1264  		panic("incorrect size of the gradient")
  1265  	}
  1266  
  1267  	f1 := 1e4*x[0]*x[1] - 1
  1268  	f2 := math.Exp(-x[0]) + math.Exp(-x[1]) - 1.0001
  1269  	grad[0] = 2 * (1e4*f1*x[1] - f2*math.Exp(-x[0]))
  1270  	grad[1] = 2 * (1e4*f1*x[0] - f2*math.Exp(-x[1]))
  1271  }
  1272  
  1273  func (PowellBadlyScaled) Hess(dst *mat.SymDense, x []float64) {
  1274  	if len(x) != 2 {
  1275  		panic("dimension of the problem must be 2")
  1276  	}
  1277  	if len(x) != dst.Symmetric() {
  1278  		panic("incorrect size of the Hessian")
  1279  	}
  1280  
  1281  	t1 := 1e4*x[0]*x[1] - 1
  1282  	s1 := math.Exp(-x[0])
  1283  	s2 := math.Exp(-x[1])
  1284  	t2 := s1 + s2 - 1.0001
  1285  
  1286  	h00 := 2 * (1e8*x[1]*x[1] + s1*(s1+t2))
  1287  	h01 := 2 * (1e4*(1+2*t1) + s1*s2)
  1288  	h11 := 2 * (1e8*x[0]*x[0] + s2*(s2+t2))
  1289  	dst.SetSym(0, 0, h00)
  1290  	dst.SetSym(0, 1, h01)
  1291  	dst.SetSym(1, 1, h11)
  1292  }
  1293  
  1294  func (PowellBadlyScaled) Minima() []Minimum {
  1295  	return []Minimum{
  1296  		{
  1297  			X:      []float64{1.0981593296997149e-05, 9.106146739867375},
  1298  			F:      0,
  1299  			Global: true,
  1300  		},
  1301  	}
  1302  }
  1303  
  1304  // Trigonometric implements the trigonometric function.
  1305  //
  1306  // Standard starting point:
  1307  //  [1/dim, ..., 1/dim]
  1308  //
  1309  // References:
  1310  //  - Spedicato E.: Computational experience with quasi-Newton algorithms for
  1311  //    minimization problems of moderatly large size. Towards Global
  1312  //    Optimization 2 (1978), 209-219
  1313  //  - More, J., Garbow, B.S., Hillstrom, K.E.: Testing unconstrained
  1314  //    optimization software. ACM Trans Math Softw 7 (1981), 17-41
  1315  type Trigonometric struct{}
  1316  
  1317  func (Trigonometric) Func(x []float64) (sum float64) {
  1318  	var s1 float64
  1319  	for _, v := range x {
  1320  		s1 += math.Cos(v)
  1321  	}
  1322  	for i, v := range x {
  1323  		f := float64(len(x)+i+1) - float64(i+1)*math.Cos(v) - math.Sin(v) - s1
  1324  		sum += f * f
  1325  	}
  1326  	return sum
  1327  }
  1328  
  1329  func (Trigonometric) Grad(grad, x []float64) {
  1330  	if len(x) != len(grad) {
  1331  		panic("incorrect size of the gradient")
  1332  	}
  1333  
  1334  	var s1 float64
  1335  	for _, v := range x {
  1336  		s1 += math.Cos(v)
  1337  	}
  1338  
  1339  	var s2 float64
  1340  	for i, v := range x {
  1341  		f := float64(len(x)+i+1) - float64(i+1)*math.Cos(v) - math.Sin(v) - s1
  1342  		s2 += f
  1343  		grad[i] = 2 * f * (float64(i+1)*math.Sin(v) - math.Cos(v))
  1344  	}
  1345  
  1346  	for i, v := range x {
  1347  		grad[i] += 2 * s2 * math.Sin(v)
  1348  	}
  1349  }
  1350  
  1351  func (Trigonometric) Minima() []Minimum {
  1352  	return []Minimum{
  1353  		{
  1354  			X: []float64{0.04296456438227447, 0.043976287478192246,
  1355  				0.045093397949095684, 0.04633891624617569, 0.047744381782831,
  1356  				0.04935473251330618, 0.05123734850076505, 0.19520946391410446,
  1357  				0.1649776652761741, 0.06014857783799575},
  1358  			F:      0,
  1359  			Global: true,
  1360  		},
  1361  		{
  1362  			// TODO(vladimir-ch): If we knew the location of this minimum more
  1363  			// accurately, we could decrease defaultGradTol.
  1364  			X: []float64{0.05515090434047145, 0.05684061730812344,
  1365  				0.05876400231100774, 0.060990608903034337, 0.06362621381044778,
  1366  				0.06684318087364617, 0.2081615177172172, 0.16436309604419047,
  1367  				0.08500689695564931, 0.09143145386293675},
  1368  			F:      2.795056121876575e-05,
  1369  			Global: false,
  1370  		},
  1371  	}
  1372  }
  1373  
  1374  // VariablyDimensioned implements a variably dimensioned function.
  1375  //
  1376  // Standard starting point:
  1377  //  [..., (dim-i)/dim, ...], i=1,...,dim
  1378  //
  1379  // References:
  1380  //  More, J., Garbow, B.S., Hillstrom, K.E.: Testing unconstrained optimization
  1381  //  software. ACM Trans Math Softw 7 (1981), 17-41
  1382  type VariablyDimensioned struct{}
  1383  
  1384  func (VariablyDimensioned) Func(x []float64) (sum float64) {
  1385  	for _, v := range x {
  1386  		t := v - 1
  1387  		sum += t * t
  1388  	}
  1389  
  1390  	var s float64
  1391  	for i, v := range x {
  1392  		s += float64(i+1) * (v - 1)
  1393  	}
  1394  	s *= s
  1395  	sum += s
  1396  	s *= s
  1397  	sum += s
  1398  	return sum
  1399  }
  1400  
  1401  func (VariablyDimensioned) Grad(grad, x []float64) {
  1402  	if len(x) != len(grad) {
  1403  		panic("incorrect size of the gradient")
  1404  	}
  1405  
  1406  	var s float64
  1407  	for i, v := range x {
  1408  		s += float64(i+1) * (v - 1)
  1409  	}
  1410  	for i, v := range x {
  1411  		grad[i] = 2 * (v - 1 + s*float64(i+1)*(1+2*s*s))
  1412  	}
  1413  }
  1414  
  1415  func (VariablyDimensioned) Minima() []Minimum {
  1416  	return []Minimum{
  1417  		{
  1418  			X:      []float64{1, 1},
  1419  			F:      0,
  1420  			Global: true,
  1421  		},
  1422  		{
  1423  			X:      []float64{1, 1, 1},
  1424  			F:      0,
  1425  			Global: true,
  1426  		},
  1427  		{
  1428  			X:      []float64{1, 1, 1, 1},
  1429  			F:      0,
  1430  			Global: true,
  1431  		},
  1432  		{
  1433  			X:      []float64{1, 1, 1, 1, 1},
  1434  			F:      0,
  1435  			Global: true,
  1436  		},
  1437  		{
  1438  			X:      []float64{1, 1, 1, 1, 1, 1, 1, 1, 1, 1},
  1439  			F:      0,
  1440  			Global: true,
  1441  		},
  1442  	}
  1443  }
  1444  
  1445  // Watson implements the Watson's function.
  1446  // Dimension of the problem should be 2 <= dim <= 31. For dim == 9, the problem
  1447  // of minimizing the function is very ill conditioned.
  1448  //
  1449  // Standard starting point:
  1450  //  [0, ..., 0]
  1451  //
  1452  // References:
  1453  //  - Kowalik, J.S., Osborne, M.R.: Methods for Unconstrained Optimization
  1454  //    Problems. Elsevier North-Holland, New York, 1968
  1455  //  - More, J., Garbow, B.S., Hillstrom, K.E.: Testing unconstrained
  1456  //    optimization software. ACM Trans Math Softw 7 (1981), 17-41
  1457  type Watson struct{}
  1458  
  1459  func (Watson) Func(x []float64) (sum float64) {
  1460  	for i := 1; i <= 29; i++ {
  1461  		d1 := float64(i) / 29
  1462  
  1463  		d2 := 1.0
  1464  		var s1 float64
  1465  		for j := 1; j < len(x); j++ {
  1466  			s1 += float64(j) * d2 * x[j]
  1467  			d2 *= d1
  1468  		}
  1469  
  1470  		d2 = 1.0
  1471  		var s2 float64
  1472  		for _, v := range x {
  1473  			s2 += d2 * v
  1474  			d2 *= d1
  1475  		}
  1476  
  1477  		t := s1 - s2*s2 - 1
  1478  		sum += t * t
  1479  	}
  1480  	t := x[1] - x[0]*x[0] - 1
  1481  	sum += x[0]*x[0] + t*t
  1482  	return sum
  1483  }
  1484  
  1485  func (Watson) Grad(grad, x []float64) {
  1486  	if len(x) != len(grad) {
  1487  		panic("incorrect size of the gradient")
  1488  	}
  1489  
  1490  	for i := range grad {
  1491  		grad[i] = 0
  1492  	}
  1493  	for i := 1; i <= 29; i++ {
  1494  		d1 := float64(i) / 29
  1495  
  1496  		d2 := 1.0
  1497  		var s1 float64
  1498  		for j := 1; j < len(x); j++ {
  1499  			s1 += float64(j) * d2 * x[j]
  1500  			d2 *= d1
  1501  		}
  1502  
  1503  		d2 = 1.0
  1504  		var s2 float64
  1505  		for _, v := range x {
  1506  			s2 += d2 * v
  1507  			d2 *= d1
  1508  		}
  1509  
  1510  		t := s1 - s2*s2 - 1
  1511  		s3 := 2 * d1 * s2
  1512  		d2 = 2 / d1
  1513  		for j := range x {
  1514  			grad[j] += d2 * (float64(j) - s3) * t
  1515  			d2 *= d1
  1516  		}
  1517  	}
  1518  	t := x[1] - x[0]*x[0] - 1
  1519  	grad[0] += x[0] * (2 - 4*t)
  1520  	grad[1] += 2 * t
  1521  }
  1522  
  1523  func (Watson) Hess(dst *mat.SymDense, x []float64) {
  1524  	dim := len(x)
  1525  	if len(x) != dst.Symmetric() {
  1526  		panic("incorrect size of the Hessian")
  1527  	}
  1528  
  1529  	for j := 0; j < dim; j++ {
  1530  		for k := j; k < dim; k++ {
  1531  			dst.SetSym(j, k, 0)
  1532  		}
  1533  	}
  1534  	for i := 1; i <= 29; i++ {
  1535  		d1 := float64(i) / 29
  1536  		d2 := 1.0
  1537  		var s1 float64
  1538  		for j := 1; j < dim; j++ {
  1539  			s1 += float64(j) * d2 * x[j]
  1540  			d2 *= d1
  1541  		}
  1542  
  1543  		d2 = 1.0
  1544  		var s2 float64
  1545  		for _, v := range x {
  1546  			s2 += d2 * v
  1547  			d2 *= d1
  1548  		}
  1549  
  1550  		t := s1 - s2*s2 - 1
  1551  		s3 := 2 * d1 * s2
  1552  		d2 = 2 / d1
  1553  		th := 2 * d1 * d1 * t
  1554  		for j := 0; j < dim; j++ {
  1555  			v := float64(j) - s3
  1556  			d3 := 1 / d1
  1557  			for k := 0; k <= j; k++ {
  1558  				dst.SetSym(k, j, dst.At(k, j)+d2*d3*(v*(float64(k)-s3)-th))
  1559  				d3 *= d1
  1560  			}
  1561  			d2 *= d1
  1562  		}
  1563  	}
  1564  	t1 := x[1] - x[0]*x[0] - 1
  1565  	dst.SetSym(0, 0, dst.At(0, 0)+8*x[0]*x[0]+2-4*t1)
  1566  	dst.SetSym(0, 1, dst.At(0, 1)-4*x[0])
  1567  	dst.SetSym(1, 1, dst.At(1, 1)+2)
  1568  }
  1569  
  1570  func (Watson) Minima() []Minimum {
  1571  	return []Minimum{
  1572  		{
  1573  			X: []float64{-0.01572508644590686, 1.012434869244884, -0.23299162372002916,
  1574  				1.2604300800978554, -1.51372891341701, 0.9929964286340117},
  1575  			F:      0.0022876700535523838,
  1576  			Global: true,
  1577  		},
  1578  		{
  1579  			X: []float64{-1.5307036521992127e-05, 0.9997897039319495, 0.01476396369355022,
  1580  				0.14634232829939883, 1.0008211030046426, -2.617731140519101, 4.104403164479245,
  1581  				-3.1436122785568514, 1.0526264080103074},
  1582  			F:      1.399760138096796e-06,
  1583  			Global: true,
  1584  		},
  1585  		// TODO(vladimir-ch): More, Garbow, Hillstrom list just the value, but
  1586  		// not the location. Our minimizers find a minimum, but the value is
  1587  		// different.
  1588  		// {
  1589  		// 	// For dim == 12
  1590  		// 	F:      4.72238e-10,
  1591  		// 	Global: true,
  1592  		// },
  1593  		// TODO(vladimir-ch): netlib/uncon report a value of 2.48631d-20 for dim == 20.
  1594  	}
  1595  }
  1596  
  1597  // Wood implements the Wood's function.
  1598  //
  1599  // Standard starting point:
  1600  //  [-3, -1, -3, -1]
  1601  //
  1602  // References:
  1603  //  - Colville, A.R.: A comparative study of nonlinear programming codes.
  1604  //    Report 320-2949, IBM New York Scientific Center (1968)
  1605  //  - More, J., Garbow, B.S., Hillstrom, K.E.: Testing unconstrained
  1606  //    optimization software. ACM Trans Math Softw 7 (1981), 17-41
  1607  type Wood struct{}
  1608  
  1609  func (Wood) Func(x []float64) (sum float64) {
  1610  	if len(x) != 4 {
  1611  		panic("dimension of the problem must be 4")
  1612  	}
  1613  
  1614  	f1 := x[1] - x[0]*x[0]
  1615  	f2 := 1 - x[0]
  1616  	f3 := x[3] - x[2]*x[2]
  1617  	f4 := 1 - x[2]
  1618  	f5 := x[1] + x[3] - 2
  1619  	f6 := x[1] - x[3]
  1620  	return 100*f1*f1 + f2*f2 + 90*f3*f3 + f4*f4 + 10*f5*f5 + 0.1*f6*f6
  1621  }
  1622  
  1623  func (Wood) Grad(grad, x []float64) {
  1624  	if len(x) != 4 {
  1625  		panic("dimension of the problem must be 4")
  1626  	}
  1627  	if len(x) != len(grad) {
  1628  		panic("incorrect size of the gradient")
  1629  	}
  1630  
  1631  	f1 := x[1] - x[0]*x[0]
  1632  	f2 := 1 - x[0]
  1633  	f3 := x[3] - x[2]*x[2]
  1634  	f4 := 1 - x[2]
  1635  	f5 := x[1] + x[3] - 2
  1636  	f6 := x[1] - x[3]
  1637  	grad[0] = -2 * (200*f1*x[0] + f2)
  1638  	grad[1] = 2 * (100*f1 + 10*f5 + 0.1*f6)
  1639  	grad[2] = -2 * (180*f3*x[2] + f4)
  1640  	grad[3] = 2 * (90*f3 + 10*f5 - 0.1*f6)
  1641  }
  1642  
  1643  func (Wood) Hess(dst *mat.SymDense, x []float64) {
  1644  	if len(x) != 4 {
  1645  		panic("dimension of the problem must be 4")
  1646  	}
  1647  	if len(x) != dst.Symmetric() {
  1648  		panic("incorrect size of the Hessian")
  1649  	}
  1650  
  1651  	dst.SetSym(0, 0, 400*(3*x[0]*x[0]-x[1])+2)
  1652  	dst.SetSym(0, 1, -400*x[0])
  1653  	dst.SetSym(1, 1, 220.2)
  1654  	dst.SetSym(0, 2, 0)
  1655  	dst.SetSym(1, 2, 0)
  1656  	dst.SetSym(2, 2, 360*(3*x[2]*x[2]-x[3])+2)
  1657  	dst.SetSym(0, 3, 0)
  1658  	dst.SetSym(1, 3, 19.8)
  1659  	dst.SetSym(2, 3, -360*x[2])
  1660  	dst.SetSym(3, 3, 200.2)
  1661  }
  1662  
  1663  func (Wood) Minima() []Minimum {
  1664  	return []Minimum{
  1665  		{
  1666  			X:      []float64{1, 1, 1, 1},
  1667  			F:      0,
  1668  			Global: true,
  1669  		},
  1670  	}
  1671  }
  1672  
  1673  // ConcaveRight implements an univariate function that is concave to the right
  1674  // of the minimizer which is located at x=sqrt(2).
  1675  //
  1676  // References:
  1677  //  More, J.J., and Thuente, D.J.: Line Search Algorithms with Guaranteed Sufficient Decrease.
  1678  //  ACM Transactions on Mathematical Software 20(3) (1994), 286–307, eq. (5.1)
  1679  type ConcaveRight struct{}
  1680  
  1681  func (ConcaveRight) Func(x []float64) float64 {
  1682  	if len(x) != 1 {
  1683  		panic("dimension of the problem must be 1")
  1684  	}
  1685  	return -x[0] / (x[0]*x[0] + 2)
  1686  }
  1687  
  1688  func (ConcaveRight) Grad(grad, x []float64) {
  1689  	if len(x) != 1 {
  1690  		panic("dimension of the problem must be 1")
  1691  	}
  1692  	if len(x) != len(grad) {
  1693  		panic("incorrect size of the gradient")
  1694  	}
  1695  	xSqr := x[0] * x[0]
  1696  	grad[0] = (xSqr - 2) / (xSqr + 2) / (xSqr + 2)
  1697  }
  1698  
  1699  // ConcaveLeft implements an univariate function that is concave to the left of
  1700  // the minimizer which is located at x=399/250=1.596.
  1701  //
  1702  // References:
  1703  //  More, J.J., and Thuente, D.J.: Line Search Algorithms with Guaranteed Sufficient Decrease.
  1704  //  ACM Transactions on Mathematical Software 20(3) (1994), 286–307, eq. (5.2)
  1705  type ConcaveLeft struct{}
  1706  
  1707  func (ConcaveLeft) Func(x []float64) float64 {
  1708  	if len(x) != 1 {
  1709  		panic("dimension of the problem must be 1")
  1710  	}
  1711  	return math.Pow(x[0]+0.004, 4) * (x[0] - 1.996)
  1712  }
  1713  
  1714  func (ConcaveLeft) Grad(grad, x []float64) {
  1715  	if len(x) != 1 {
  1716  		panic("dimension of the problem must be 1")
  1717  	}
  1718  	if len(x) != len(grad) {
  1719  		panic("incorrect size of the gradient")
  1720  	}
  1721  	grad[0] = math.Pow(x[0]+0.004, 3) * (5*x[0] - 7.98)
  1722  }
  1723  
  1724  // Plassmann implements an univariate oscillatory function where the value of L
  1725  // controls the number of oscillations. The value of Beta controls the size of
  1726  // the derivative at zero and the size of the interval where the strong Wolfe
  1727  // conditions can hold. For small values of Beta this function represents a
  1728  // difficult test problem for linesearchers also because the information based
  1729  // on the derivative is unreliable due to the oscillations.
  1730  //
  1731  // References:
  1732  //  More, J.J., and Thuente, D.J.: Line Search Algorithms with Guaranteed Sufficient Decrease.
  1733  //  ACM Transactions on Mathematical Software 20(3) (1994), 286–307, eq. (5.3)
  1734  type Plassmann struct {
  1735  	L    float64 // Number of oscillations for |x-1| ≥ Beta.
  1736  	Beta float64 // Size of the derivative at zero, f'(0) = -Beta.
  1737  }
  1738  
  1739  func (f Plassmann) Func(x []float64) float64 {
  1740  	if len(x) != 1 {
  1741  		panic("dimension of the problem must be 1")
  1742  	}
  1743  	a := x[0]
  1744  	b := f.Beta
  1745  	l := f.L
  1746  	r := 2 * (1 - b) / l / math.Pi * math.Sin(l*math.Pi/2*a)
  1747  	switch {
  1748  	case a <= 1-b:
  1749  		r += 1 - a
  1750  	case 1-b < a && a <= 1+b:
  1751  		r += 0.5 * ((a-1)*(a-1)/b + b)
  1752  	default: // a > 1+b
  1753  		r += a - 1
  1754  	}
  1755  	return r
  1756  }
  1757  
  1758  func (f Plassmann) Grad(grad, x []float64) {
  1759  	if len(x) != 1 {
  1760  		panic("dimension of the problem must be 1")
  1761  	}
  1762  	if len(x) != len(grad) {
  1763  		panic("incorrect size of the gradient")
  1764  	}
  1765  	a := x[0]
  1766  	b := f.Beta
  1767  	l := f.L
  1768  	grad[0] = (1 - b) * math.Cos(l*math.Pi/2*a)
  1769  	switch {
  1770  	case a <= 1-b:
  1771  		grad[0]--
  1772  	case 1-b < a && a <= 1+b:
  1773  		grad[0] += (a - 1) / b
  1774  	default: // a > 1+b
  1775  		grad[0]++
  1776  	}
  1777  }
  1778  
  1779  // YanaiOzawaKaneko is an univariate convex function where the values of Beta1
  1780  // and Beta2 control the curvature around the minimum. Far away from the
  1781  // minimum the function approximates an absolute value function. Near the
  1782  // minimum, the function can either be sharply curved or flat, controlled by
  1783  // the parameter values.
  1784  //
  1785  // References:
  1786  //  - More, J.J., and Thuente, D.J.: Line Search Algorithms with Guaranteed Sufficient Decrease.
  1787  //    ACM Transactions on Mathematical Software 20(3) (1994), 286–307, eq. (5.4)
  1788  //  - Yanai, H., Ozawa, M., and Kaneko, S.: Interpolation methods in one dimensional
  1789  //    optimization. Computing 27 (1981), 155–163
  1790  type YanaiOzawaKaneko struct {
  1791  	Beta1 float64
  1792  	Beta2 float64
  1793  }
  1794  
  1795  func (f YanaiOzawaKaneko) Func(x []float64) float64 {
  1796  	if len(x) != 1 {
  1797  		panic("dimension of the problem must be 1")
  1798  	}
  1799  	a := x[0]
  1800  	b1 := f.Beta1
  1801  	b2 := f.Beta2
  1802  	g1 := math.Sqrt(1+b1*b1) - b1
  1803  	g2 := math.Sqrt(1+b2*b2) - b2
  1804  	return g1*math.Sqrt((a-1)*(a-1)+b2*b2) + g2*math.Sqrt(a*a+b1*b1)
  1805  }
  1806  
  1807  func (f YanaiOzawaKaneko) Grad(grad, x []float64) {
  1808  	if len(x) != 1 {
  1809  		panic("dimension of the problem must be 1")
  1810  	}
  1811  	if len(x) != len(grad) {
  1812  		panic("incorrect size of the gradient")
  1813  	}
  1814  	a := x[0]
  1815  	b1 := f.Beta1
  1816  	b2 := f.Beta2
  1817  	g1 := math.Sqrt(1+b1*b1) - b1
  1818  	g2 := math.Sqrt(1+b2*b2) - b2
  1819  	grad[0] = g1*(a-1)/math.Sqrt(b2*b2+(a-1)*(a-1)) + g2*a/math.Sqrt(b1*b1+a*a)
  1820  }