gonum.org/v1/gonum@v0.14.0/optimize/functions/functions.go (about)

     1  // Copyright ©2015 The Gonum Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  package functions
     6  
     7  import (
     8  	"math"
     9  
    10  	"gonum.org/v1/gonum/floats"
    11  	"gonum.org/v1/gonum/mat"
    12  )
    13  
    14  // Beale implements the Beale's function.
    15  //
    16  // Standard starting points:
    17  //
    18  //	Easy: [1, 1]
    19  //	Hard: [1, 4]
    20  //
    21  // References:
    22  //   - Beale, E.: On an Iterative Method for Finding a Local Minimum of a
    23  //     Function of More than One Variable. Technical Report 25, Statistical
    24  //     Techniques Research Group, Princeton University (1958)
    25  //   - More, J., Garbow, B.S., Hillstrom, K.E.: Testing unconstrained
    26  //     optimization software. ACM Trans Math Softw 7 (1981), 17-41
    27  type Beale struct{}
    28  
    29  func (Beale) Func(x []float64) float64 {
    30  	if len(x) != 2 {
    31  		panic("dimension of the problem must be 2")
    32  	}
    33  
    34  	f1 := 1.5 - x[0]*(1-x[1])
    35  	f2 := 2.25 - x[0]*(1-x[1]*x[1])
    36  	f3 := 2.625 - x[0]*(1-x[1]*x[1]*x[1])
    37  	return f1*f1 + f2*f2 + f3*f3
    38  }
    39  
    40  func (Beale) Grad(grad, x []float64) {
    41  	if len(x) != 2 {
    42  		panic("dimension of the problem must be 2")
    43  	}
    44  	if len(x) != len(grad) {
    45  		panic("incorrect size of the gradient")
    46  	}
    47  
    48  	t1 := 1 - x[1]
    49  	t2 := 1 - x[1]*x[1]
    50  	t3 := 1 - x[1]*x[1]*x[1]
    51  
    52  	f1 := 1.5 - x[0]*t1
    53  	f2 := 2.25 - x[0]*t2
    54  	f3 := 2.625 - x[0]*t3
    55  
    56  	grad[0] = -2 * (f1*t1 + f2*t2 + f3*t3)
    57  	grad[1] = 2 * x[0] * (f1 + 2*f2*x[1] + 3*f3*x[1]*x[1])
    58  }
    59  
    60  func (Beale) Hess(dst *mat.SymDense, x []float64) {
    61  	if len(x) != 2 {
    62  		panic("dimension of the problem must be 2")
    63  	}
    64  	if len(x) != dst.SymmetricDim() {
    65  		panic("incorrect size of the Hessian")
    66  	}
    67  
    68  	t1 := 1 - x[1]
    69  	t2 := 1 - x[1]*x[1]
    70  	t3 := 1 - x[1]*x[1]*x[1]
    71  	f1 := 1.5 - x[1]*t1
    72  	f2 := 2.25 - x[1]*t2
    73  	f3 := 2.625 - x[1]*t3
    74  
    75  	h00 := 2 * (t1*t1 + t2*t2 + t3*t3)
    76  	h01 := 2 * (f1 + x[1]*(2*f2+3*x[1]*f3) - x[0]*(t1+x[1]*(2*t2+3*x[1]*t3)))
    77  	h11 := 2 * x[0] * (x[0] + 2*f2 + x[1]*(6*f3+x[0]*x[1]*(4+9*x[1]*x[1])))
    78  	dst.SetSym(0, 0, h00)
    79  	dst.SetSym(0, 1, h01)
    80  	dst.SetSym(1, 1, h11)
    81  }
    82  
    83  func (Beale) Minima() []Minimum {
    84  	return []Minimum{
    85  		{
    86  			X:      []float64{3, 0.5},
    87  			F:      0,
    88  			Global: true,
    89  		},
    90  	}
    91  }
    92  
    93  // BiggsEXP2 implements the Biggs' EXP2 function.
    94  //
    95  // Standard starting point:
    96  //
    97  //	[1, 2]
    98  //
    99  // Reference:
   100  //
   101  //	Biggs, M.C.: Minimization algorithms making use of non-quadratic properties
   102  //	of the objective function. IMA J Appl Math 8 (1971), 315-327; doi:10.1093/imamat/8.3.315
   103  type BiggsEXP2 struct{}
   104  
   105  func (BiggsEXP2) Func(x []float64) (sum float64) {
   106  	if len(x) != 2 {
   107  		panic("dimension of the problem must be 2")
   108  	}
   109  
   110  	for i := 1; i <= 10; i++ {
   111  		z := float64(i) / 10
   112  		y := math.Exp(-z) - 5*math.Exp(-10*z)
   113  		f := math.Exp(-x[0]*z) - 5*math.Exp(-x[1]*z) - y
   114  		sum += f * f
   115  	}
   116  	return sum
   117  }
   118  
   119  func (BiggsEXP2) Grad(grad, x []float64) {
   120  	if len(x) != 2 {
   121  		panic("dimension of the problem must be 2")
   122  	}
   123  	if len(x) != len(grad) {
   124  		panic("incorrect size of the gradient")
   125  	}
   126  
   127  	for i := range grad {
   128  		grad[i] = 0
   129  	}
   130  	for i := 1; i <= 10; i++ {
   131  		z := float64(i) / 10
   132  		y := math.Exp(-z) - 5*math.Exp(-10*z)
   133  		f := math.Exp(-x[0]*z) - 5*math.Exp(-x[1]*z) - y
   134  
   135  		dfdx0 := -z * math.Exp(-x[0]*z)
   136  		dfdx1 := 5 * z * math.Exp(-x[1]*z)
   137  
   138  		grad[0] += 2 * f * dfdx0
   139  		grad[1] += 2 * f * dfdx1
   140  	}
   141  }
   142  
   143  func (BiggsEXP2) Minima() []Minimum {
   144  	return []Minimum{
   145  		{
   146  			X:      []float64{1, 10},
   147  			F:      0,
   148  			Global: true,
   149  		},
   150  	}
   151  }
   152  
   153  // BiggsEXP3 implements the Biggs' EXP3 function.
   154  //
   155  // Standard starting point:
   156  //
   157  //	[1, 2, 1]
   158  //
   159  // Reference:
   160  //
   161  //	Biggs, M.C.: Minimization algorithms making use of non-quadratic properties
   162  //	of the objective function. IMA J Appl Math 8 (1971), 315-327; doi:10.1093/imamat/8.3.315
   163  type BiggsEXP3 struct{}
   164  
   165  func (BiggsEXP3) Func(x []float64) (sum float64) {
   166  	if len(x) != 3 {
   167  		panic("dimension of the problem must be 3")
   168  	}
   169  
   170  	for i := 1; i <= 10; i++ {
   171  		z := float64(i) / 10
   172  		y := math.Exp(-z) - 5*math.Exp(-10*z)
   173  		f := math.Exp(-x[0]*z) - x[2]*math.Exp(-x[1]*z) - y
   174  		sum += f * f
   175  	}
   176  	return sum
   177  }
   178  
   179  func (BiggsEXP3) Grad(grad, x []float64) {
   180  	if len(x) != 3 {
   181  		panic("dimension of the problem must be 3")
   182  	}
   183  	if len(x) != len(grad) {
   184  		panic("incorrect size of the gradient")
   185  	}
   186  
   187  	for i := range grad {
   188  		grad[i] = 0
   189  	}
   190  	for i := 1; i <= 10; i++ {
   191  		z := float64(i) / 10
   192  		y := math.Exp(-z) - 5*math.Exp(-10*z)
   193  		f := math.Exp(-x[0]*z) - x[2]*math.Exp(-x[1]*z) - y
   194  
   195  		dfdx0 := -z * math.Exp(-x[0]*z)
   196  		dfdx1 := x[2] * z * math.Exp(-x[1]*z)
   197  		dfdx2 := -math.Exp(-x[1] * z)
   198  
   199  		grad[0] += 2 * f * dfdx0
   200  		grad[1] += 2 * f * dfdx1
   201  		grad[2] += 2 * f * dfdx2
   202  	}
   203  }
   204  
   205  func (BiggsEXP3) Minima() []Minimum {
   206  	return []Minimum{
   207  		{
   208  			X:      []float64{1, 10, 5},
   209  			F:      0,
   210  			Global: true,
   211  		},
   212  	}
   213  }
   214  
   215  // BiggsEXP4 implements the Biggs' EXP4 function.
   216  //
   217  // Standard starting point:
   218  //
   219  //	[1, 2, 1, 1]
   220  //
   221  // Reference:
   222  //
   223  //	Biggs, M.C.: Minimization algorithms making use of non-quadratic properties
   224  //	of the objective function. IMA J Appl Math 8 (1971), 315-327; doi:10.1093/imamat/8.3.315
   225  type BiggsEXP4 struct{}
   226  
   227  func (BiggsEXP4) Func(x []float64) (sum float64) {
   228  	if len(x) != 4 {
   229  		panic("dimension of the problem must be 4")
   230  	}
   231  
   232  	for i := 1; i <= 10; i++ {
   233  		z := float64(i) / 10
   234  		y := math.Exp(-z) - 5*math.Exp(-10*z)
   235  		f := x[2]*math.Exp(-x[0]*z) - x[3]*math.Exp(-x[1]*z) - y
   236  		sum += f * f
   237  	}
   238  	return sum
   239  }
   240  
   241  func (BiggsEXP4) Grad(grad, x []float64) {
   242  	if len(x) != 4 {
   243  		panic("dimension of the problem must be 4")
   244  	}
   245  	if len(x) != len(grad) {
   246  		panic("incorrect size of the gradient")
   247  	}
   248  
   249  	for i := range grad {
   250  		grad[i] = 0
   251  	}
   252  	for i := 1; i <= 10; i++ {
   253  		z := float64(i) / 10
   254  		y := math.Exp(-z) - 5*math.Exp(-10*z)
   255  		f := x[2]*math.Exp(-x[0]*z) - x[3]*math.Exp(-x[1]*z) - y
   256  
   257  		dfdx0 := -z * x[2] * math.Exp(-x[0]*z)
   258  		dfdx1 := z * x[3] * math.Exp(-x[1]*z)
   259  		dfdx2 := math.Exp(-x[0] * z)
   260  		dfdx3 := -math.Exp(-x[1] * z)
   261  
   262  		grad[0] += 2 * f * dfdx0
   263  		grad[1] += 2 * f * dfdx1
   264  		grad[2] += 2 * f * dfdx2
   265  		grad[3] += 2 * f * dfdx3
   266  	}
   267  }
   268  
   269  func (BiggsEXP4) Minima() []Minimum {
   270  	return []Minimum{
   271  		{
   272  			X:      []float64{1, 10, 1, 5},
   273  			F:      0,
   274  			Global: true,
   275  		},
   276  	}
   277  }
   278  
   279  // BiggsEXP5 implements the Biggs' EXP5 function.
   280  //
   281  // Standard starting point:
   282  //
   283  //	[1, 2, 1, 1, 1]
   284  //
   285  // Reference:
   286  //
   287  //	Biggs, M.C.: Minimization algorithms making use of non-quadratic properties
   288  //	of the objective function. IMA J Appl Math 8 (1971), 315-327; doi:10.1093/imamat/8.3.315
   289  type BiggsEXP5 struct{}
   290  
   291  func (BiggsEXP5) Func(x []float64) (sum float64) {
   292  	if len(x) != 5 {
   293  		panic("dimension of the problem must be 5")
   294  	}
   295  
   296  	for i := 1; i <= 11; i++ {
   297  		z := float64(i) / 10
   298  		y := math.Exp(-z) - 5*math.Exp(-10*z) + 3*math.Exp(-4*z)
   299  		f := x[2]*math.Exp(-x[0]*z) - x[3]*math.Exp(-x[1]*z) + 3*math.Exp(-x[4]*z) - y
   300  		sum += f * f
   301  	}
   302  	return sum
   303  }
   304  
   305  func (BiggsEXP5) Grad(grad, x []float64) {
   306  	if len(x) != 5 {
   307  		panic("dimension of the problem must be 5")
   308  	}
   309  	if len(x) != len(grad) {
   310  		panic("incorrect size of the gradient")
   311  	}
   312  
   313  	for i := range grad {
   314  		grad[i] = 0
   315  	}
   316  	for i := 1; i <= 11; i++ {
   317  		z := float64(i) / 10
   318  		y := math.Exp(-z) - 5*math.Exp(-10*z) + 3*math.Exp(-4*z)
   319  		f := x[2]*math.Exp(-x[0]*z) - x[3]*math.Exp(-x[1]*z) + 3*math.Exp(-x[4]*z) - y
   320  
   321  		dfdx0 := -z * x[2] * math.Exp(-x[0]*z)
   322  		dfdx1 := z * x[3] * math.Exp(-x[1]*z)
   323  		dfdx2 := math.Exp(-x[0] * z)
   324  		dfdx3 := -math.Exp(-x[1] * z)
   325  		dfdx4 := -3 * z * math.Exp(-x[4]*z)
   326  
   327  		grad[0] += 2 * f * dfdx0
   328  		grad[1] += 2 * f * dfdx1
   329  		grad[2] += 2 * f * dfdx2
   330  		grad[3] += 2 * f * dfdx3
   331  		grad[4] += 2 * f * dfdx4
   332  	}
   333  }
   334  
   335  func (BiggsEXP5) Minima() []Minimum {
   336  	return []Minimum{
   337  		{
   338  			X:      []float64{1, 10, 1, 5, 4},
   339  			F:      0,
   340  			Global: true,
   341  		},
   342  	}
   343  }
   344  
   345  // BiggsEXP6 implements the Biggs' EXP6 function.
   346  //
   347  // Standard starting point:
   348  //
   349  //	[1, 2, 1, 1, 1, 1]
   350  //
   351  // References:
   352  //   - Biggs, M.C.: Minimization algorithms making use of non-quadratic
   353  //     properties of the objective function. IMA J Appl Math 8 (1971), 315-327;
   354  //     doi:10.1093/imamat/8.3.315
   355  //   - More, J., Garbow, B.S., Hillstrom, K.E.: Testing unconstrained
   356  //     optimization software. ACM Trans Math Softw 7 (1981), 17-41
   357  type BiggsEXP6 struct{}
   358  
   359  func (BiggsEXP6) Func(x []float64) (sum float64) {
   360  	if len(x) != 6 {
   361  		panic("dimension of the problem must be 6")
   362  	}
   363  
   364  	for i := 1; i <= 13; i++ {
   365  		z := float64(i) / 10
   366  		y := math.Exp(-z) - 5*math.Exp(-10*z) + 3*math.Exp(-4*z)
   367  		f := x[2]*math.Exp(-x[0]*z) - x[3]*math.Exp(-x[1]*z) + x[5]*math.Exp(-x[4]*z) - y
   368  		sum += f * f
   369  	}
   370  	return sum
   371  }
   372  
   373  func (BiggsEXP6) Grad(grad, x []float64) {
   374  	if len(x) != 6 {
   375  		panic("dimension of the problem must be 6")
   376  	}
   377  	if len(x) != len(grad) {
   378  		panic("incorrect size of the gradient")
   379  	}
   380  
   381  	for i := range grad {
   382  		grad[i] = 0
   383  	}
   384  	for i := 1; i <= 13; i++ {
   385  		z := float64(i) / 10
   386  		y := math.Exp(-z) - 5*math.Exp(-10*z) + 3*math.Exp(-4*z)
   387  		f := x[2]*math.Exp(-x[0]*z) - x[3]*math.Exp(-x[1]*z) + x[5]*math.Exp(-x[4]*z) - y
   388  
   389  		dfdx0 := -z * x[2] * math.Exp(-x[0]*z)
   390  		dfdx1 := z * x[3] * math.Exp(-x[1]*z)
   391  		dfdx2 := math.Exp(-x[0] * z)
   392  		dfdx3 := -math.Exp(-x[1] * z)
   393  		dfdx4 := -z * x[5] * math.Exp(-x[4]*z)
   394  		dfdx5 := math.Exp(-x[4] * z)
   395  
   396  		grad[0] += 2 * f * dfdx0
   397  		grad[1] += 2 * f * dfdx1
   398  		grad[2] += 2 * f * dfdx2
   399  		grad[3] += 2 * f * dfdx3
   400  		grad[4] += 2 * f * dfdx4
   401  		grad[5] += 2 * f * dfdx5
   402  	}
   403  }
   404  
   405  func (BiggsEXP6) Minima() []Minimum {
   406  	return []Minimum{
   407  		{
   408  			X:      []float64{1, 10, 1, 5, 4, 3},
   409  			F:      0,
   410  			Global: true,
   411  		},
   412  		{
   413  			X: []float64{1.7114159947956764, 17.68319817846745, 1.1631436609697268,
   414  				5.1865615510738605, 1.7114159947949301, 1.1631436609697998},
   415  			F:      0.005655649925499929,
   416  			Global: false,
   417  		},
   418  		{
   419  			// X: []float64{1.22755594752403, X[1] >> 0, 0.83270306333466, X[3] << 0, X[4] = X[0], X[5] = X[2]},
   420  			X:      []float64{1.22755594752403, 1000, 0.83270306333466, -1000, 1.22755594752403, 0.83270306333466},
   421  			F:      0.306366772624790,
   422  			Global: false,
   423  		},
   424  	}
   425  }
   426  
   427  // Box3D implements the Box' three-dimensional function.
   428  //
   429  // Standard starting point:
   430  //
   431  //	[0, 10, 20]
   432  //
   433  // References:
   434  //   - Box, M.J.: A comparison of several current optimization methods, and the
   435  //     use of transformations in constrained problems. Comput J 9 (1966), 67-77
   436  //   - More, J., Garbow, B.S., Hillstrom, K.E.: Testing unconstrained
   437  //     optimization software. ACM Trans Math Softw 7 (1981), 17-41
   438  type Box3D struct{}
   439  
   440  func (Box3D) Func(x []float64) (sum float64) {
   441  	if len(x) != 3 {
   442  		panic("dimension of the problem must be 3")
   443  	}
   444  
   445  	for i := 1; i <= 10; i++ {
   446  		c := -float64(i) / 10
   447  		y := math.Exp(c) - math.Exp(10*c)
   448  		f := math.Exp(c*x[0]) - math.Exp(c*x[1]) - x[2]*y
   449  		sum += f * f
   450  	}
   451  	return sum
   452  }
   453  
   454  func (Box3D) Grad(grad, x []float64) {
   455  	if len(x) != 3 {
   456  		panic("dimension of the problem must be 3")
   457  	}
   458  	if len(x) != len(grad) {
   459  		panic("incorrect size of the gradient")
   460  	}
   461  
   462  	grad[0] = 0
   463  	grad[1] = 0
   464  	grad[2] = 0
   465  	for i := 1; i <= 10; i++ {
   466  		c := -float64(i) / 10
   467  		y := math.Exp(c) - math.Exp(10*c)
   468  		f := math.Exp(c*x[0]) - math.Exp(c*x[1]) - x[2]*y
   469  		grad[0] += 2 * f * c * math.Exp(c*x[0])
   470  		grad[1] += -2 * f * c * math.Exp(c*x[1])
   471  		grad[2] += -2 * f * y
   472  	}
   473  }
   474  
   475  func (Box3D) Minima() []Minimum {
   476  	return []Minimum{
   477  		{
   478  			X:      []float64{1, 10, 1},
   479  			F:      0,
   480  			Global: true,
   481  		},
   482  		{
   483  			X:      []float64{10, 1, -1},
   484  			F:      0,
   485  			Global: true,
   486  		},
   487  		{
   488  			// Any point at the line {a, a, 0}.
   489  			X:      []float64{1, 1, 0},
   490  			F:      0,
   491  			Global: true,
   492  		},
   493  	}
   494  }
   495  
   496  // BraninHoo implements the Branin-Hoo function. BraninHoo is a 2-dimensional
   497  // test function with three global minima. It is typically evaluated in the domain
   498  // x_0 ∈ [-5, 10], x_1 ∈ [0, 15].
   499  //
   500  //	f(x) = (x_1 - (5.1/(4π^2))*x_0^2 + (5/π)*x_0 - 6)^2 + 10*(1-1/(8π))cos(x_0) + 10
   501  //
   502  // It has a minimum value of 0.397887 at x^* = {(-π, 12.275), (π, 2.275), (9.424778, 2.475)}
   503  //
   504  // Reference:
   505  //
   506  //	https://www.sfu.ca/~ssurjano/branin.html (obtained June 2017)
   507  type BraninHoo struct{}
   508  
   509  func (BraninHoo) Func(x []float64) float64 {
   510  	if len(x) != 2 {
   511  		panic("functions: dimension of the problem must be 2")
   512  	}
   513  	a, b, c, r, s, t := 1.0, 5.1/(4*math.Pi*math.Pi), 5/math.Pi, 6.0, 10.0, 1/(8*math.Pi)
   514  
   515  	term := x[1] - b*x[0]*x[0] + c*x[0] - r
   516  	return a*term*term + s*(1-t)*math.Cos(x[0]) + s
   517  }
   518  
   519  func (BraninHoo) Minima() []Minimum {
   520  	return []Minimum{
   521  		{
   522  			X:      []float64{-math.Pi, 12.275},
   523  			F:      0.397887,
   524  			Global: true,
   525  		},
   526  		{
   527  			X:      []float64{math.Pi, 2.275},
   528  			F:      0.397887,
   529  			Global: true,
   530  		},
   531  		{
   532  			X:      []float64{9.424778, 2.475},
   533  			F:      0.397887,
   534  			Global: true,
   535  		},
   536  	}
   537  }
   538  
   539  // BrownBadlyScaled implements the Brown's badly scaled function.
   540  //
   541  // Standard starting point:
   542  //
   543  //	[1, 1]
   544  //
   545  // References:
   546  //   - More, J., Garbow, B.S., Hillstrom, K.E.: Testing unconstrained
   547  //     optimization software. ACM Trans Math Softw 7 (1981), 17-41
   548  type BrownBadlyScaled struct{}
   549  
   550  func (BrownBadlyScaled) Func(x []float64) float64 {
   551  	if len(x) != 2 {
   552  		panic("dimension of the problem must be 2")
   553  	}
   554  
   555  	f1 := x[0] - 1e6
   556  	f2 := x[1] - 2e-6
   557  	f3 := x[0]*x[1] - 2
   558  	return f1*f1 + f2*f2 + f3*f3
   559  }
   560  
   561  func (BrownBadlyScaled) Grad(grad, x []float64) {
   562  	if len(x) != 2 {
   563  		panic("dimension of the problem must be 2")
   564  	}
   565  	if len(x) != len(grad) {
   566  		panic("incorrect size of the gradient")
   567  	}
   568  
   569  	f1 := x[0] - 1e6
   570  	f2 := x[1] - 2e-6
   571  	f3 := float64(x[0]*x[1]) - 2 // Prevent fused multiply subtract.
   572  	grad[0] = 2*f1 + 2*f3*x[1]
   573  	grad[1] = 2*f2 + 2*f3*x[0]
   574  }
   575  
   576  func (BrownBadlyScaled) Hess(dst *mat.SymDense, x []float64) {
   577  	if len(x) != 2 {
   578  		panic("dimension of the problem must be 2")
   579  	}
   580  	if len(x) != dst.SymmetricDim() {
   581  		panic("incorrect size of the Hessian")
   582  	}
   583  
   584  	h00 := 2 + 2*x[1]*x[1]
   585  	h01 := 4*x[0]*x[1] - 4
   586  	h11 := 2 + 2*x[0]*x[0]
   587  	dst.SetSym(0, 0, h00)
   588  	dst.SetSym(0, 1, h01)
   589  	dst.SetSym(1, 1, h11)
   590  }
   591  
   592  func (BrownBadlyScaled) Minima() []Minimum {
   593  	return []Minimum{
   594  		{
   595  			X:      []float64{1e6, 2e-6},
   596  			F:      0,
   597  			Global: true,
   598  		},
   599  	}
   600  }
   601  
   602  // BrownAndDennis implements the Brown and Dennis function.
   603  //
   604  // Standard starting point:
   605  //
   606  //	[25, 5, -5, -1]
   607  //
   608  // References:
   609  //   - Brown, K.M., Dennis, J.E.: New computational algorithms for minimizing a
   610  //     sum of squares of nonlinear functions. Research Report Number 71-6, Yale
   611  //     University (1971)
   612  //   - More, J., Garbow, B.S., Hillstrom, K.E.: Testing unconstrained
   613  //     optimization software. ACM Trans Math Softw 7 (1981), 17-41
   614  type BrownAndDennis struct{}
   615  
   616  func (BrownAndDennis) Func(x []float64) (sum float64) {
   617  	if len(x) != 4 {
   618  		panic("dimension of the problem must be 4")
   619  	}
   620  
   621  	for i := 1; i <= 20; i++ {
   622  		c := float64(i) / 5
   623  		f1 := x[0] + c*x[1] - math.Exp(c)
   624  		f2 := x[2] + x[3]*math.Sin(c) - math.Cos(c)
   625  		f := f1*f1 + f2*f2
   626  		sum += f * f
   627  	}
   628  	return sum
   629  }
   630  
   631  func (BrownAndDennis) Grad(grad, x []float64) {
   632  	if len(x) != 4 {
   633  		panic("dimension of the problem must be 4")
   634  	}
   635  	if len(x) != len(grad) {
   636  		panic("incorrect size of the gradient")
   637  	}
   638  
   639  	for i := range grad {
   640  		grad[i] = 0
   641  	}
   642  	for i := 1; i <= 20; i++ {
   643  		c := float64(i) / 5
   644  		f1 := x[0] + c*x[1] - math.Exp(c)
   645  		f2 := x[2] + x[3]*math.Sin(c) - math.Cos(c)
   646  		f := f1*f1 + f2*f2
   647  		grad[0] += 4 * f * f1
   648  		grad[1] += 4 * f * f1 * c
   649  		grad[2] += 4 * f * f2
   650  		grad[3] += 4 * f * f2 * math.Sin(c)
   651  	}
   652  }
   653  
   654  func (BrownAndDennis) Hess(dst *mat.SymDense, x []float64) {
   655  	if len(x) != 4 {
   656  		panic("dimension of the problem must be 4")
   657  	}
   658  	if len(x) != dst.SymmetricDim() {
   659  		panic("incorrect size of the Hessian")
   660  	}
   661  
   662  	for i := 0; i < 4; i++ {
   663  		for j := i; j < 4; j++ {
   664  			dst.SetSym(i, j, 0)
   665  		}
   666  	}
   667  	for i := 1; i <= 20; i++ {
   668  		d1 := float64(i) / 5
   669  		d2 := math.Sin(d1)
   670  		t1 := x[0] + d1*x[1] - math.Exp(d1)
   671  		t2 := x[2] + d2*x[3] - math.Cos(d1)
   672  		t := t1*t1 + t2*t2
   673  		s3 := 2 * t1 * t2
   674  		r1 := t + 2*t1*t1
   675  		r2 := t + 2*t2*t2
   676  		dst.SetSym(0, 0, dst.At(0, 0)+r1)
   677  		dst.SetSym(0, 1, dst.At(0, 1)+d1*r1)
   678  		dst.SetSym(1, 1, dst.At(1, 1)+d1*d1*r1)
   679  		dst.SetSym(0, 2, dst.At(0, 2)+s3)
   680  		dst.SetSym(1, 2, dst.At(1, 2)+d1*s3)
   681  		dst.SetSym(2, 2, dst.At(2, 2)+r2)
   682  		dst.SetSym(0, 3, dst.At(0, 3)+d2*s3)
   683  		dst.SetSym(1, 3, dst.At(1, 3)+d1*d2*s3)
   684  		dst.SetSym(2, 3, dst.At(2, 3)+d2*r2)
   685  		dst.SetSym(3, 3, dst.At(3, 3)+d2*d2*r2)
   686  	}
   687  	for i := 0; i < 4; i++ {
   688  		for j := i; j < 4; j++ {
   689  			dst.SetSym(i, j, 4*dst.At(i, j))
   690  		}
   691  	}
   692  }
   693  
   694  func (BrownAndDennis) Minima() []Minimum {
   695  	return []Minimum{
   696  		{
   697  			X:      []float64{-11.594439904762162, 13.203630051207202, -0.4034394881768612, 0.2367787744557347},
   698  			F:      85822.20162635634,
   699  			Global: true,
   700  		},
   701  	}
   702  }
   703  
   704  // ExtendedPowellSingular implements the extended Powell's function.
   705  // Its Hessian matrix is singular at the minimizer.
   706  //
   707  // Standard starting point:
   708  //
   709  //	[3, -1, 0, 3, 3, -1, 0, 3, ..., 3, -1, 0, 3]
   710  //
   711  // References:
   712  //   - Spedicato E.: Computational experience with quasi-Newton algorithms for
   713  //     minimization problems of moderatly large size. Towards Global
   714  //     Optimization 2 (1978), 209-219
   715  //   - More, J., Garbow, B.S., Hillstrom, K.E.: Testing unconstrained
   716  //     optimization software. ACM Trans Math Softw 7 (1981), 17-41
   717  type ExtendedPowellSingular struct{}
   718  
   719  func (ExtendedPowellSingular) Func(x []float64) (sum float64) {
   720  	if len(x)%4 != 0 {
   721  		panic("dimension of the problem must be a multiple of 4")
   722  	}
   723  
   724  	for i := 0; i < len(x); i += 4 {
   725  		f1 := x[i] + 10*x[i+1]
   726  		f2 := x[i+2] - x[i+3]
   727  		t := x[i+1] - 2*x[i+2]
   728  		f3 := t * t
   729  		t = x[i] - x[i+3]
   730  		f4 := t * t
   731  		sum += f1*f1 + 5*f2*f2 + f3*f3 + 10*f4*f4
   732  	}
   733  	return sum
   734  }
   735  
   736  func (ExtendedPowellSingular) Grad(grad, x []float64) {
   737  	if len(x)%4 != 0 {
   738  		panic("dimension of the problem must be a multiple of 4")
   739  	}
   740  	if len(x) != len(grad) {
   741  		panic("incorrect size of the gradient")
   742  	}
   743  
   744  	for i := 0; i < len(x); i += 4 {
   745  		f1 := x[i] + 10*x[i+1]
   746  		f2 := x[i+2] - x[i+3]
   747  		t1 := x[i+1] - 2*x[i+2]
   748  		f3 := t1 * t1
   749  		t2 := x[i] - x[i+3]
   750  		f4 := t2 * t2
   751  		grad[i] = 2*f1 + 40*f4*t2
   752  		grad[i+1] = 20*f1 + 4*f3*t1
   753  		grad[i+2] = 10*f2 - 8*f3*t1
   754  		grad[i+3] = -10*f2 - 40*f4*t2
   755  	}
   756  }
   757  
   758  func (ExtendedPowellSingular) Minima() []Minimum {
   759  	return []Minimum{
   760  		{
   761  			X:      []float64{0, 0, 0, 0},
   762  			F:      0,
   763  			Global: true,
   764  		},
   765  		{
   766  			X:      []float64{0, 0, 0, 0, 0, 0, 0, 0},
   767  			F:      0,
   768  			Global: true,
   769  		},
   770  		{
   771  			X:      []float64{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
   772  			F:      0,
   773  			Global: true,
   774  		},
   775  	}
   776  }
   777  
   778  // ExtendedRosenbrock implements the extended, multidimensional Rosenbrock
   779  // function.
   780  //
   781  // Standard starting point:
   782  //
   783  //	Easy: [-1.2, 1, -1.2, 1, ...]
   784  //	Hard: any point far from the minimum
   785  //
   786  // References:
   787  //   - Rosenbrock, H.H.: An Automatic Method for Finding the Greatest or Least
   788  //     Value of a Function. Computer J 3 (1960), 175-184
   789  //   - http://en.wikipedia.org/wiki/Rosenbrock_function
   790  type ExtendedRosenbrock struct{}
   791  
   792  func (ExtendedRosenbrock) Func(x []float64) (sum float64) {
   793  	for i := 0; i < len(x)-1; i++ {
   794  		a := 1 - x[i]
   795  		b := x[i+1] - x[i]*x[i]
   796  		sum += a*a + 100*b*b
   797  	}
   798  	return sum
   799  }
   800  
   801  func (ExtendedRosenbrock) Grad(grad, x []float64) {
   802  	if len(x) != len(grad) {
   803  		panic("incorrect size of the gradient")
   804  	}
   805  
   806  	dim := len(x)
   807  	for i := range grad {
   808  		grad[i] = 0
   809  	}
   810  	// Prevent fused multiply add and fused multiply subtract.
   811  	for i := 0; i < dim-1; i++ {
   812  		grad[i] -= float64(2 * (1 - x[i]))
   813  		grad[i] -= float64(400 * (x[i+1] - float64(x[i]*x[i])) * x[i])
   814  	}
   815  	for i := 1; i < dim; i++ {
   816  		grad[i] += float64(200 * (x[i] - float64(x[i-1]*x[i-1])))
   817  	}
   818  }
   819  
   820  func (ExtendedRosenbrock) Minima() []Minimum {
   821  	return []Minimum{
   822  		{
   823  			X:      []float64{1, 1},
   824  			F:      0,
   825  			Global: true,
   826  		},
   827  		{
   828  			X:      []float64{1, 1, 1},
   829  			F:      0,
   830  			Global: true,
   831  		},
   832  		{
   833  			X:      []float64{1, 1, 1, 1},
   834  			F:      0,
   835  			Global: true,
   836  		},
   837  		{
   838  			X: []float64{-0.7756592265653526, 0.6130933654850433,
   839  				0.38206284633839305, 0.14597201855219452},
   840  			F:      3.701428610430017,
   841  			Global: false,
   842  		},
   843  		{
   844  			X:      []float64{1, 1, 1, 1, 1},
   845  			F:      0,
   846  			Global: true,
   847  		},
   848  		{
   849  			X: []float64{-0.9620510206947502, 0.9357393959767103,
   850  				0.8807136041943204, 0.7778776758544063, 0.6050936785926526},
   851  			F:      3.930839434133027,
   852  			Global: false,
   853  		},
   854  		{
   855  			X: []float64{-0.9865749795709938, 0.9833982288361819, 0.972106670053092,
   856  				0.9474374368264362, 0.8986511848517299, 0.8075739520354182},
   857  			F:      3.973940500930295,
   858  			Global: false,
   859  		},
   860  		{
   861  			X: []float64{-0.9917225725614055, 0.9935553935033712, 0.992173321594692,
   862  				0.9868987626903134, 0.975164756608872, 0.9514319827049906, 0.9052228177139495},
   863  			F:      3.9836005364248543,
   864  			Global: false,
   865  		},
   866  		{
   867  			X:      []float64{1, 1, 1, 1, 1, 1, 1, 1, 1, 1},
   868  			F:      0,
   869  			Global: true,
   870  		},
   871  	}
   872  }
   873  
   874  // Gaussian implements the Gaussian function.
   875  // The function has one global minimum and a number of false local minima
   876  // caused by the finite floating point precision.
   877  //
   878  // Standard starting point:
   879  //
   880  //	[0.4, 1, 0]
   881  //
   882  // Reference:
   883  //
   884  //	More, J., Garbow, B.S., Hillstrom, K.E.: Testing unconstrained optimization
   885  //	software. ACM Trans Math Softw 7 (1981), 17-41
   886  type Gaussian struct{}
   887  
   888  func (Gaussian) y(i int) (yi float64) {
   889  	switch i {
   890  	case 1, 15:
   891  		yi = 0.0009
   892  	case 2, 14:
   893  		yi = 0.0044
   894  	case 3, 13:
   895  		yi = 0.0175
   896  	case 4, 12:
   897  		yi = 0.0540
   898  	case 5, 11:
   899  		yi = 0.1295
   900  	case 6, 10:
   901  		yi = 0.2420
   902  	case 7, 9:
   903  		yi = 0.3521
   904  	case 8:
   905  		yi = 0.3989
   906  	}
   907  	return yi
   908  }
   909  
   910  func (g Gaussian) Func(x []float64) (sum float64) {
   911  	if len(x) != 3 {
   912  		panic("dimension of the problem must be 3")
   913  	}
   914  
   915  	for i := 1; i <= 15; i++ {
   916  		c := 0.5 * float64(8-i)
   917  		b := c - x[2]
   918  		d := b * b
   919  		e := math.Exp(-0.5 * x[1] * d)
   920  		f := x[0]*e - g.y(i)
   921  		sum += f * f
   922  	}
   923  	return sum
   924  }
   925  
   926  func (g Gaussian) Grad(grad, x []float64) {
   927  	if len(x) != 3 {
   928  		panic("dimension of the problem must be 3")
   929  	}
   930  	if len(x) != len(grad) {
   931  		panic("incorrect size of the gradient")
   932  	}
   933  
   934  	grad[0] = 0
   935  	grad[1] = 0
   936  	grad[2] = 0
   937  	for i := 1; i <= 15; i++ {
   938  		c := 0.5 * float64(8-i)
   939  		b := c - x[2]
   940  		d := b * b
   941  		e := math.Exp(-0.5 * x[1] * d)
   942  		f := x[0]*e - g.y(i)
   943  		grad[0] += 2 * f * e
   944  		grad[1] -= f * e * d * x[0]
   945  		grad[2] += 2 * f * e * x[0] * x[1] * b
   946  	}
   947  }
   948  
   949  func (Gaussian) Minima() []Minimum {
   950  	return []Minimum{
   951  		{
   952  			X:      []float64{0.398956137837997, 1.0000190844805048, 0},
   953  			F:      1.12793276961912e-08,
   954  			Global: true,
   955  		},
   956  	}
   957  }
   958  
   959  // GulfResearchAndDevelopment implements the Gulf Research and Development function.
   960  //
   961  // Standard starting point:
   962  //
   963  //	[5, 2.5, 0.15]
   964  //
   965  // References:
   966  //   - Cox, R.A.: Comparison of the performance of seven optimization algorithms
   967  //     on twelve unconstrained minimization problems. Ref. 1335CNO4, Gulf
   968  //     Research and Development Company, Pittsburg (1969)
   969  //   - More, J., Garbow, B.S., Hillstrom, K.E.: Testing unconstrained
   970  //     optimization software. ACM Trans Math Softw 7 (1981), 17-41
   971  type GulfResearchAndDevelopment struct{}
   972  
   973  func (GulfResearchAndDevelopment) Func(x []float64) (sum float64) {
   974  	if len(x) != 3 {
   975  		panic("dimension of the problem must be 3")
   976  	}
   977  
   978  	for i := 1; i <= 99; i++ {
   979  		arg := float64(i) / 100
   980  		r := math.Pow(-50*math.Log(arg), 2.0/3.0) + 25 - x[1]
   981  		t1 := math.Pow(math.Abs(r), x[2]) / x[0]
   982  		t2 := math.Exp(-t1)
   983  		t := t2 - arg
   984  		sum += t * t
   985  	}
   986  	return sum
   987  }
   988  
   989  func (GulfResearchAndDevelopment) Grad(grad, x []float64) {
   990  	if len(x) != 3 {
   991  		panic("dimension of the problem must be 3")
   992  	}
   993  	if len(x) != len(grad) {
   994  		panic("incorrect size of the gradient")
   995  	}
   996  
   997  	for i := range grad {
   998  		grad[i] = 0
   999  	}
  1000  	for i := 1; i <= 99; i++ {
  1001  		arg := float64(i) / 100
  1002  		r := math.Pow(-50*math.Log(arg), 2.0/3.0) + 25 - x[1]
  1003  		t1 := math.Pow(math.Abs(r), x[2]) / x[0]
  1004  		t2 := math.Exp(-t1)
  1005  		t := t2 - arg
  1006  		s1 := t1 * t2 * t
  1007  		grad[0] += s1
  1008  		grad[1] += s1 / r
  1009  		grad[2] -= s1 * math.Log(math.Abs(r))
  1010  	}
  1011  	grad[0] *= 2 / x[0]
  1012  	grad[1] *= 2 * x[2]
  1013  	grad[2] *= 2
  1014  }
  1015  
  1016  func (GulfResearchAndDevelopment) Minima() []Minimum {
  1017  	return []Minimum{
  1018  		{
  1019  			X:      []float64{50, 25, 1.5},
  1020  			F:      0,
  1021  			Global: true,
  1022  		},
  1023  		{
  1024  			X:      []float64{99.89529935174151, 60.61453902799833, 9.161242695144592},
  1025  			F:      32.8345,
  1026  			Global: false,
  1027  		},
  1028  		{
  1029  			X:      []float64{201.662589489426, 60.61633150468155, 10.224891158488965},
  1030  			F:      32.8345,
  1031  			Global: false,
  1032  		},
  1033  	}
  1034  }
  1035  
  1036  // HelicalValley implements the helical valley function of Fletcher and Powell.
  1037  // Function is not defined at x[0] = 0.
  1038  //
  1039  // Standard starting point:
  1040  //
  1041  //	[-1, 0, 0]
  1042  //
  1043  // References:
  1044  //   - Fletcher, R., Powell, M.J.D.: A rapidly convergent descent method for
  1045  //     minimization. Comput J 6 (1963), 163-168
  1046  //   - More, J., Garbow, B.S., Hillstrom, K.E.: Testing unconstrained
  1047  //     optimization software. ACM Trans Math Softw 7 (1981), 17-41
  1048  type HelicalValley struct{}
  1049  
  1050  func (HelicalValley) Func(x []float64) float64 {
  1051  	if len(x) != 3 {
  1052  		panic("dimension of the problem must be 3")
  1053  	}
  1054  	if x[0] == 0 {
  1055  		panic("function not defined at x[0] = 0")
  1056  	}
  1057  
  1058  	theta := 0.5 * math.Atan(x[1]/x[0]) / math.Pi
  1059  	if x[0] < 0 {
  1060  		theta += 0.5
  1061  	}
  1062  	f1 := 10 * (x[2] - 10*theta)
  1063  	f2 := 10 * (math.Hypot(x[0], x[1]) - 1)
  1064  	f3 := x[2]
  1065  	return f1*f1 + f2*f2 + f3*f3
  1066  }
  1067  
  1068  func (HelicalValley) Grad(grad, x []float64) {
  1069  	if len(x) != 3 {
  1070  		panic("dimension of the problem must be 3")
  1071  	}
  1072  	if len(x) != len(grad) {
  1073  		panic("incorrect size of the gradient")
  1074  	}
  1075  	if x[0] == 0 {
  1076  		panic("function not defined at x[0] = 0")
  1077  	}
  1078  
  1079  	theta := 0.5 * math.Atan(x[1]/x[0]) / math.Pi
  1080  	if x[0] < 0 {
  1081  		theta += 0.5
  1082  	}
  1083  	h := math.Hypot(x[0], x[1])
  1084  	r := 1 / h
  1085  	q := r * r / math.Pi
  1086  	s := x[2] - 10*theta
  1087  	grad[0] = 200 * (5*s*q*x[1] + (h-1)*r*x[0])
  1088  	grad[1] = 200 * (-5*s*q*x[0] + (h-1)*r*x[1])
  1089  	grad[2] = 2 * (100*s + x[2])
  1090  }
  1091  
  1092  func (HelicalValley) Minima() []Minimum {
  1093  	return []Minimum{
  1094  		{
  1095  			X:      []float64{1, 0, 0},
  1096  			F:      0,
  1097  			Global: true,
  1098  		},
  1099  	}
  1100  }
  1101  
  1102  // Linear implements a linear function.
  1103  type Linear struct{}
  1104  
  1105  func (Linear) Func(x []float64) float64 {
  1106  	return floats.Sum(x)
  1107  }
  1108  
  1109  func (Linear) Grad(grad, x []float64) []float64 {
  1110  	if len(x) != len(grad) {
  1111  		panic("incorrect size of the gradient")
  1112  	}
  1113  
  1114  	for i := range grad {
  1115  		grad[i] = 1
  1116  	}
  1117  	return grad
  1118  }
  1119  
  1120  // PenaltyI implements the first penalty function by Gill, Murray and Pitfield.
  1121  //
  1122  // Standard starting point:
  1123  //
  1124  //	[1, ..., n]
  1125  //
  1126  // References:
  1127  //   - Gill, P.E., Murray, W., Pitfield, R.A.: The implementation of two revised
  1128  //     quasi-Newton algorithms for unconstrained optimization. Report NAC 11,
  1129  //     National Phys Lab (1972), 82-83
  1130  //   - More, J., Garbow, B.S., Hillstrom, K.E.: Testing unconstrained
  1131  //     optimization software. ACM Trans Math Softw 7 (1981), 17-41
  1132  type PenaltyI struct{}
  1133  
  1134  func (PenaltyI) Func(x []float64) (sum float64) {
  1135  	for _, v := range x {
  1136  		sum += (v - 1) * (v - 1)
  1137  	}
  1138  	sum *= 1e-5
  1139  
  1140  	var s float64
  1141  	for _, v := range x {
  1142  		s += v * v
  1143  	}
  1144  	sum += (s - 0.25) * (s - 0.25)
  1145  	return sum
  1146  }
  1147  
  1148  func (PenaltyI) Grad(grad, x []float64) {
  1149  	if len(x) != len(grad) {
  1150  		panic("incorrect size of the gradient")
  1151  	}
  1152  
  1153  	s := -0.25
  1154  	for _, v := range x {
  1155  		s += v * v
  1156  	}
  1157  	for i, v := range x {
  1158  		grad[i] = 2 * (2*s*v + 1e-5*(v-1))
  1159  	}
  1160  }
  1161  
  1162  func (PenaltyI) Minima() []Minimum {
  1163  	return []Minimum{
  1164  		{
  1165  			X:      []float64{0.2500074995875379, 0.2500074995875379, 0.2500074995875379, 0.2500074995875379},
  1166  			F:      2.2499775008999372e-05,
  1167  			Global: true,
  1168  		},
  1169  		{
  1170  			X: []float64{0.15812230111311634, 0.15812230111311634, 0.15812230111311634,
  1171  				0.15812230111311634, 0.15812230111311634, 0.15812230111311634,
  1172  				0.15812230111311634, 0.15812230111311634, 0.15812230111311634, 0.15812230111311634},
  1173  			F:      7.087651467090369e-05,
  1174  			Global: true,
  1175  		},
  1176  	}
  1177  }
  1178  
  1179  // PenaltyII implements the second penalty function by Gill, Murray and Pitfield.
  1180  //
  1181  // Standard starting point:
  1182  //
  1183  //	[0.5, ..., 0.5]
  1184  //
  1185  // References:
  1186  //   - Gill, P.E., Murray, W., Pitfield, R.A.: The implementation of two revised
  1187  //     quasi-Newton algorithms for unconstrained optimization. Report NAC 11,
  1188  //     National Phys Lab (1972), 82-83
  1189  //   - More, J., Garbow, B.S., Hillstrom, K.E.: Testing unconstrained
  1190  //     optimization software. ACM Trans Math Softw 7 (1981), 17-41
  1191  type PenaltyII struct{}
  1192  
  1193  func (PenaltyII) Func(x []float64) (sum float64) {
  1194  	dim := len(x)
  1195  	s := -1.0
  1196  	for i, v := range x {
  1197  		s += float64(dim-i) * v * v
  1198  	}
  1199  	for i := 1; i < dim; i++ {
  1200  		yi := math.Exp(float64(i+1)/10) + math.Exp(float64(i)/10)
  1201  		f := math.Exp(x[i]/10) + math.Exp(x[i-1]/10) - yi
  1202  		sum += f * f
  1203  	}
  1204  	for i := 1; i < dim; i++ {
  1205  		f := math.Exp(x[i]/10) - math.Exp(-1.0/10)
  1206  		sum += f * f
  1207  	}
  1208  	sum *= 1e-5
  1209  	sum += (x[0] - 0.2) * (x[0] - 0.2)
  1210  	sum += s * s
  1211  	return sum
  1212  }
  1213  
  1214  func (PenaltyII) Grad(grad, x []float64) {
  1215  	if len(x) != len(grad) {
  1216  		panic("incorrect size of the gradient")
  1217  	}
  1218  
  1219  	dim := len(x)
  1220  	s := -1.0
  1221  	for i, v := range x {
  1222  		s += float64(dim-i) * v * v
  1223  	}
  1224  	for i, v := range x {
  1225  		grad[i] = 4 * s * float64(dim-i) * v
  1226  	}
  1227  	for i := 1; i < dim; i++ {
  1228  		yi := math.Exp(float64(i+1)/10) + math.Exp(float64(i)/10)
  1229  		f := math.Exp(x[i]/10) + math.Exp(x[i-1]/10) - yi
  1230  		grad[i] += 1e-5 * f * math.Exp(x[i]/10) / 5
  1231  		grad[i-1] += 1e-5 * f * math.Exp(x[i-1]/10) / 5
  1232  	}
  1233  	for i := 1; i < dim; i++ {
  1234  		f := math.Exp(x[i]/10) - math.Exp(-1.0/10)
  1235  		grad[i] += 1e-5 * f * math.Exp(x[i]/10) / 5
  1236  	}
  1237  	grad[0] += 2 * (x[0] - 0.2)
  1238  }
  1239  
  1240  func (PenaltyII) Minima() []Minimum {
  1241  	return []Minimum{
  1242  		{
  1243  			X:      []float64{0.19999933335, 0.19131670128566283, 0.4801014860897, 0.5188454026659},
  1244  			F:      9.376293007355449e-06,
  1245  			Global: true,
  1246  		},
  1247  		{
  1248  			X: []float64{0.19998360520892217, 0.010350644318663525,
  1249  				0.01960493546891094, 0.03208906550305253, 0.04993267593895693,
  1250  				0.07651399534454084, 0.11862407118600789, 0.1921448731780023,
  1251  				0.3473205862372022, 0.36916437893066273},
  1252  			F:      0.00029366053745674594,
  1253  			Global: true,
  1254  		},
  1255  	}
  1256  }
  1257  
  1258  // PowellBadlyScaled implements the Powell's badly scaled function.
  1259  // The function is very flat near the minimum. A satisfactory solution is one
  1260  // that gives f(x) ≅ 1e-13.
  1261  //
  1262  // Standard starting point:
  1263  //
  1264  //	[0, 1]
  1265  //
  1266  // References:
  1267  //   - Powell, M.J.D.: A Hybrid Method for Nonlinear Equations. Numerical
  1268  //     Methods for Nonlinear Algebraic Equations, P. Rabinowitz (ed.), Gordon
  1269  //     and Breach (1970)
  1270  //   - More, J., Garbow, B.S., Hillstrom, K.E.: Testing unconstrained
  1271  //     optimization software. ACM Trans Math Softw 7 (1981), 17-41
  1272  type PowellBadlyScaled struct{}
  1273  
  1274  func (PowellBadlyScaled) Func(x []float64) float64 {
  1275  	if len(x) != 2 {
  1276  		panic("dimension of the problem must be 2")
  1277  	}
  1278  
  1279  	f1 := 1e4*x[0]*x[1] - 1
  1280  	f2 := math.Exp(-x[0]) + math.Exp(-x[1]) - 1.0001
  1281  	return f1*f1 + f2*f2
  1282  }
  1283  
  1284  func (PowellBadlyScaled) Grad(grad, x []float64) {
  1285  	if len(x) != 2 {
  1286  		panic("dimension of the problem must be 2")
  1287  	}
  1288  	if len(x) != len(grad) {
  1289  		panic("incorrect size of the gradient")
  1290  	}
  1291  
  1292  	f1 := 1e4*x[0]*x[1] - 1
  1293  	f2 := math.Exp(-x[0]) + math.Exp(-x[1]) - 1.0001
  1294  	grad[0] = 2 * (1e4*f1*x[1] - f2*math.Exp(-x[0]))
  1295  	grad[1] = 2 * (1e4*f1*x[0] - f2*math.Exp(-x[1]))
  1296  }
  1297  
  1298  func (PowellBadlyScaled) Hess(dst *mat.SymDense, x []float64) {
  1299  	if len(x) != 2 {
  1300  		panic("dimension of the problem must be 2")
  1301  	}
  1302  	if len(x) != dst.SymmetricDim() {
  1303  		panic("incorrect size of the Hessian")
  1304  	}
  1305  
  1306  	t1 := 1e4*x[0]*x[1] - 1
  1307  	s1 := math.Exp(-x[0])
  1308  	s2 := math.Exp(-x[1])
  1309  	t2 := s1 + s2 - 1.0001
  1310  
  1311  	h00 := 2 * (1e8*x[1]*x[1] + s1*(s1+t2))
  1312  	h01 := 2 * (1e4*(1+2*t1) + s1*s2)
  1313  	h11 := 2 * (1e8*x[0]*x[0] + s2*(s2+t2))
  1314  	dst.SetSym(0, 0, h00)
  1315  	dst.SetSym(0, 1, h01)
  1316  	dst.SetSym(1, 1, h11)
  1317  }
  1318  
  1319  func (PowellBadlyScaled) Minima() []Minimum {
  1320  	return []Minimum{
  1321  		{
  1322  			X:      []float64{1.0981593296997149e-05, 9.106146739867375},
  1323  			F:      0,
  1324  			Global: true,
  1325  		},
  1326  	}
  1327  }
  1328  
  1329  // Trigonometric implements the trigonometric function.
  1330  //
  1331  // Standard starting point:
  1332  //
  1333  //	[1/dim, ..., 1/dim]
  1334  //
  1335  // References:
  1336  //   - Spedicato E.: Computational experience with quasi-Newton algorithms for
  1337  //     minimization problems of moderatly large size. Towards Global
  1338  //     Optimization 2 (1978), 209-219
  1339  //   - More, J., Garbow, B.S., Hillstrom, K.E.: Testing unconstrained
  1340  //     optimization software. ACM Trans Math Softw 7 (1981), 17-41
  1341  type Trigonometric struct{}
  1342  
  1343  func (Trigonometric) Func(x []float64) (sum float64) {
  1344  	var s1 float64
  1345  	for _, v := range x {
  1346  		s1 += math.Cos(v)
  1347  	}
  1348  	for i, v := range x {
  1349  		f := float64(len(x)+i+1) - float64(i+1)*math.Cos(v) - math.Sin(v) - s1
  1350  		sum += f * f
  1351  	}
  1352  	return sum
  1353  }
  1354  
  1355  func (Trigonometric) Grad(grad, x []float64) {
  1356  	if len(x) != len(grad) {
  1357  		panic("incorrect size of the gradient")
  1358  	}
  1359  
  1360  	var s1 float64
  1361  	for _, v := range x {
  1362  		s1 += math.Cos(v)
  1363  	}
  1364  
  1365  	var s2 float64
  1366  	for i, v := range x {
  1367  		f := float64(len(x)+i+1) - float64(i+1)*math.Cos(v) - math.Sin(v) - s1
  1368  		s2 += f
  1369  		grad[i] = 2 * f * (float64(i+1)*math.Sin(v) - math.Cos(v))
  1370  	}
  1371  
  1372  	for i, v := range x {
  1373  		grad[i] += 2 * s2 * math.Sin(v)
  1374  	}
  1375  }
  1376  
  1377  func (Trigonometric) Minima() []Minimum {
  1378  	return []Minimum{
  1379  		{
  1380  			X: []float64{0.04296456438227447, 0.043976287478192246,
  1381  				0.045093397949095684, 0.04633891624617569, 0.047744381782831,
  1382  				0.04935473251330618, 0.05123734850076505, 0.19520946391410446,
  1383  				0.1649776652761741, 0.06014857783799575},
  1384  			F:      0,
  1385  			Global: true,
  1386  		},
  1387  		{
  1388  			// TODO(vladimir-ch): If we knew the location of this minimum more
  1389  			// accurately, we could decrease defaultGradTol.
  1390  			X: []float64{0.05515090434047145, 0.05684061730812344,
  1391  				0.05876400231100774, 0.060990608903034337, 0.06362621381044778,
  1392  				0.06684318087364617, 0.2081615177172172, 0.16436309604419047,
  1393  				0.08500689695564931, 0.09143145386293675},
  1394  			F:      2.795056121876575e-05,
  1395  			Global: false,
  1396  		},
  1397  	}
  1398  }
  1399  
  1400  // VariablyDimensioned implements a variably dimensioned function.
  1401  //
  1402  // Standard starting point:
  1403  //
  1404  //	[..., (dim-i)/dim, ...], i=1,...,dim
  1405  //
  1406  // References:
  1407  //
  1408  //	More, J., Garbow, B.S., Hillstrom, K.E.: Testing unconstrained optimization
  1409  //	software. ACM Trans Math Softw 7 (1981), 17-41
  1410  type VariablyDimensioned struct{}
  1411  
  1412  func (VariablyDimensioned) Func(x []float64) (sum float64) {
  1413  	for _, v := range x {
  1414  		t := v - 1
  1415  		sum += t * t
  1416  	}
  1417  
  1418  	var s float64
  1419  	for i, v := range x {
  1420  		s += float64(i+1) * (v - 1)
  1421  	}
  1422  	s *= s
  1423  	sum += s
  1424  	s *= s
  1425  	sum += s
  1426  	return sum
  1427  }
  1428  
  1429  func (VariablyDimensioned) Grad(grad, x []float64) {
  1430  	if len(x) != len(grad) {
  1431  		panic("incorrect size of the gradient")
  1432  	}
  1433  
  1434  	var s float64
  1435  	for i, v := range x {
  1436  		s += float64(i+1) * (v - 1)
  1437  	}
  1438  	for i, v := range x {
  1439  		grad[i] = 2 * (v - 1 + s*float64(i+1)*(1+2*s*s))
  1440  	}
  1441  }
  1442  
  1443  func (VariablyDimensioned) Minima() []Minimum {
  1444  	return []Minimum{
  1445  		{
  1446  			X:      []float64{1, 1},
  1447  			F:      0,
  1448  			Global: true,
  1449  		},
  1450  		{
  1451  			X:      []float64{1, 1, 1},
  1452  			F:      0,
  1453  			Global: true,
  1454  		},
  1455  		{
  1456  			X:      []float64{1, 1, 1, 1},
  1457  			F:      0,
  1458  			Global: true,
  1459  		},
  1460  		{
  1461  			X:      []float64{1, 1, 1, 1, 1},
  1462  			F:      0,
  1463  			Global: true,
  1464  		},
  1465  		{
  1466  			X:      []float64{1, 1, 1, 1, 1, 1, 1, 1, 1, 1},
  1467  			F:      0,
  1468  			Global: true,
  1469  		},
  1470  	}
  1471  }
  1472  
  1473  // Watson implements the Watson's function.
  1474  // Dimension of the problem should be 2 <= dim <= 31. For dim == 9, the problem
  1475  // of minimizing the function is very ill conditioned.
  1476  //
  1477  // Standard starting point:
  1478  //
  1479  //	[0, ..., 0]
  1480  //
  1481  // References:
  1482  //   - Kowalik, J.S., Osborne, M.R.: Methods for Unconstrained Optimization
  1483  //     Problems. Elsevier North-Holland, New York, 1968
  1484  //   - More, J., Garbow, B.S., Hillstrom, K.E.: Testing unconstrained
  1485  //     optimization software. ACM Trans Math Softw 7 (1981), 17-41
  1486  type Watson struct{}
  1487  
  1488  func (Watson) Func(x []float64) (sum float64) {
  1489  	for i := 1; i <= 29; i++ {
  1490  		d1 := float64(i) / 29
  1491  
  1492  		d2 := 1.0
  1493  		var s1 float64
  1494  		for j := 1; j < len(x); j++ {
  1495  			s1 += float64(j) * d2 * x[j]
  1496  			d2 *= d1
  1497  		}
  1498  
  1499  		d2 = 1.0
  1500  		var s2 float64
  1501  		for _, v := range x {
  1502  			s2 += d2 * v
  1503  			d2 *= d1
  1504  		}
  1505  
  1506  		t := s1 - s2*s2 - 1
  1507  		sum += t * t
  1508  	}
  1509  	t := x[1] - x[0]*x[0] - 1
  1510  	sum += x[0]*x[0] + t*t
  1511  	return sum
  1512  }
  1513  
  1514  func (Watson) Grad(grad, x []float64) {
  1515  	if len(x) != len(grad) {
  1516  		panic("incorrect size of the gradient")
  1517  	}
  1518  
  1519  	for i := range grad {
  1520  		grad[i] = 0
  1521  	}
  1522  	for i := 1; i <= 29; i++ {
  1523  		d1 := float64(i) / 29
  1524  
  1525  		d2 := 1.0
  1526  		var s1 float64
  1527  		for j := 1; j < len(x); j++ {
  1528  			s1 += float64(j) * d2 * x[j]
  1529  			d2 *= d1
  1530  		}
  1531  
  1532  		d2 = 1.0
  1533  		var s2 float64
  1534  		for _, v := range x {
  1535  			s2 += d2 * v
  1536  			d2 *= d1
  1537  		}
  1538  
  1539  		t := s1 - s2*s2 - 1
  1540  		s3 := 2 * d1 * s2
  1541  		d2 = 2 / d1
  1542  		for j := range x {
  1543  			grad[j] += d2 * (float64(j) - s3) * t
  1544  			d2 *= d1
  1545  		}
  1546  	}
  1547  	t := x[1] - x[0]*x[0] - 1
  1548  	grad[0] += x[0] * (2 - 4*t)
  1549  	grad[1] += 2 * t
  1550  }
  1551  
  1552  func (Watson) Hess(dst *mat.SymDense, x []float64) {
  1553  	dim := len(x)
  1554  	if len(x) != dst.SymmetricDim() {
  1555  		panic("incorrect size of the Hessian")
  1556  	}
  1557  
  1558  	for j := 0; j < dim; j++ {
  1559  		for k := j; k < dim; k++ {
  1560  			dst.SetSym(j, k, 0)
  1561  		}
  1562  	}
  1563  	for i := 1; i <= 29; i++ {
  1564  		d1 := float64(i) / 29
  1565  		d2 := 1.0
  1566  		var s1 float64
  1567  		for j := 1; j < dim; j++ {
  1568  			s1 += float64(j) * d2 * x[j]
  1569  			d2 *= d1
  1570  		}
  1571  
  1572  		d2 = 1.0
  1573  		var s2 float64
  1574  		for _, v := range x {
  1575  			s2 += d2 * v
  1576  			d2 *= d1
  1577  		}
  1578  
  1579  		t := s1 - s2*s2 - 1
  1580  		s3 := 2 * d1 * s2
  1581  		d2 = 2 / d1
  1582  		th := 2 * d1 * d1 * t
  1583  		for j := 0; j < dim; j++ {
  1584  			v := float64(j) - s3
  1585  			d3 := 1 / d1
  1586  			for k := 0; k <= j; k++ {
  1587  				dst.SetSym(k, j, dst.At(k, j)+d2*d3*(v*(float64(k)-s3)-th))
  1588  				d3 *= d1
  1589  			}
  1590  			d2 *= d1
  1591  		}
  1592  	}
  1593  	t1 := x[1] - x[0]*x[0] - 1
  1594  	dst.SetSym(0, 0, dst.At(0, 0)+8*x[0]*x[0]+2-4*t1)
  1595  	dst.SetSym(0, 1, dst.At(0, 1)-4*x[0])
  1596  	dst.SetSym(1, 1, dst.At(1, 1)+2)
  1597  }
  1598  
  1599  func (Watson) Minima() []Minimum {
  1600  	return []Minimum{
  1601  		{
  1602  			X: []float64{-0.01572508644590686, 1.012434869244884, -0.23299162372002916,
  1603  				1.2604300800978554, -1.51372891341701, 0.9929964286340117},
  1604  			F:      0.0022876700535523838,
  1605  			Global: true,
  1606  		},
  1607  		{
  1608  			X: []float64{-1.5307036521992127e-05, 0.9997897039319495, 0.01476396369355022,
  1609  				0.14634232829939883, 1.0008211030046426, -2.617731140519101, 4.104403164479245,
  1610  				-3.1436122785568514, 1.0526264080103074},
  1611  			F:      1.399760138096796e-06,
  1612  			Global: true,
  1613  		},
  1614  		// TODO(vladimir-ch): More, Garbow, Hillstrom list just the value, but
  1615  		// not the location. Our minimizers find a minimum, but the value is
  1616  		// different.
  1617  		// {
  1618  		// 	// For dim == 12
  1619  		// 	F:      4.72238e-10,
  1620  		// 	Global: true,
  1621  		// },
  1622  		// TODO(vladimir-ch): netlib/uncon report a value of 2.48631d-20 for dim == 20.
  1623  	}
  1624  }
  1625  
  1626  // Wood implements the Wood's function.
  1627  //
  1628  // Standard starting point:
  1629  //
  1630  //	[-3, -1, -3, -1]
  1631  //
  1632  // References:
  1633  //   - Colville, A.R.: A comparative study of nonlinear programming codes.
  1634  //     Report 320-2949, IBM New York Scientific Center (1968)
  1635  //   - More, J., Garbow, B.S., Hillstrom, K.E.: Testing unconstrained
  1636  //     optimization software. ACM Trans Math Softw 7 (1981), 17-41
  1637  type Wood struct{}
  1638  
  1639  func (Wood) Func(x []float64) (sum float64) {
  1640  	if len(x) != 4 {
  1641  		panic("dimension of the problem must be 4")
  1642  	}
  1643  
  1644  	f1 := x[1] - x[0]*x[0]
  1645  	f2 := 1 - x[0]
  1646  	f3 := x[3] - x[2]*x[2]
  1647  	f4 := 1 - x[2]
  1648  	f5 := x[1] + x[3] - 2
  1649  	f6 := x[1] - x[3]
  1650  	return 100*f1*f1 + f2*f2 + 90*f3*f3 + f4*f4 + 10*f5*f5 + 0.1*f6*f6
  1651  }
  1652  
  1653  func (Wood) Grad(grad, x []float64) {
  1654  	if len(x) != 4 {
  1655  		panic("dimension of the problem must be 4")
  1656  	}
  1657  	if len(x) != len(grad) {
  1658  		panic("incorrect size of the gradient")
  1659  	}
  1660  
  1661  	f1 := x[1] - x[0]*x[0]
  1662  	f2 := 1 - x[0]
  1663  	f3 := x[3] - x[2]*x[2]
  1664  	f4 := 1 - x[2]
  1665  	f5 := x[1] + x[3] - 2
  1666  	f6 := x[1] - x[3]
  1667  	grad[0] = -2 * (200*f1*x[0] + f2)
  1668  	grad[1] = 2 * (100*f1 + 10*f5 + 0.1*f6)
  1669  	grad[2] = -2 * (180*f3*x[2] + f4)
  1670  	grad[3] = 2 * (90*f3 + 10*f5 - 0.1*f6)
  1671  }
  1672  
  1673  func (Wood) Hess(dst *mat.SymDense, x []float64) {
  1674  	if len(x) != 4 {
  1675  		panic("dimension of the problem must be 4")
  1676  	}
  1677  	if len(x) != dst.SymmetricDim() {
  1678  		panic("incorrect size of the Hessian")
  1679  	}
  1680  
  1681  	dst.SetSym(0, 0, 400*(3*x[0]*x[0]-x[1])+2)
  1682  	dst.SetSym(0, 1, -400*x[0])
  1683  	dst.SetSym(1, 1, 220.2)
  1684  	dst.SetSym(0, 2, 0)
  1685  	dst.SetSym(1, 2, 0)
  1686  	dst.SetSym(2, 2, 360*(3*x[2]*x[2]-x[3])+2)
  1687  	dst.SetSym(0, 3, 0)
  1688  	dst.SetSym(1, 3, 19.8)
  1689  	dst.SetSym(2, 3, -360*x[2])
  1690  	dst.SetSym(3, 3, 200.2)
  1691  }
  1692  
  1693  func (Wood) Minima() []Minimum {
  1694  	return []Minimum{
  1695  		{
  1696  			X:      []float64{1, 1, 1, 1},
  1697  			F:      0,
  1698  			Global: true,
  1699  		},
  1700  	}
  1701  }
  1702  
  1703  // ConcaveRight implements an univariate function that is concave to the right
  1704  // of the minimizer which is located at x=sqrt(2).
  1705  //
  1706  // References:
  1707  //
  1708  //	More, J.J., and Thuente, D.J.: Line Search Algorithms with Guaranteed Sufficient Decrease.
  1709  //	ACM Transactions on Mathematical Software 20(3) (1994), 286–307, eq. (5.1)
  1710  type ConcaveRight struct{}
  1711  
  1712  func (ConcaveRight) Func(x []float64) float64 {
  1713  	if len(x) != 1 {
  1714  		panic("dimension of the problem must be 1")
  1715  	}
  1716  	return -x[0] / (x[0]*x[0] + 2)
  1717  }
  1718  
  1719  func (ConcaveRight) Grad(grad, x []float64) {
  1720  	if len(x) != 1 {
  1721  		panic("dimension of the problem must be 1")
  1722  	}
  1723  	if len(x) != len(grad) {
  1724  		panic("incorrect size of the gradient")
  1725  	}
  1726  	xSqr := x[0] * x[0]
  1727  	grad[0] = (xSqr - 2) / (xSqr + 2) / (xSqr + 2)
  1728  }
  1729  
  1730  // ConcaveLeft implements an univariate function that is concave to the left of
  1731  // the minimizer which is located at x=399/250=1.596.
  1732  //
  1733  // References:
  1734  //
  1735  //	More, J.J., and Thuente, D.J.: Line Search Algorithms with Guaranteed Sufficient Decrease.
  1736  //	ACM Transactions on Mathematical Software 20(3) (1994), 286–307, eq. (5.2)
  1737  type ConcaveLeft struct{}
  1738  
  1739  func (ConcaveLeft) Func(x []float64) float64 {
  1740  	if len(x) != 1 {
  1741  		panic("dimension of the problem must be 1")
  1742  	}
  1743  	return math.Pow(x[0]+0.004, 4) * (x[0] - 1.996)
  1744  }
  1745  
  1746  func (ConcaveLeft) Grad(grad, x []float64) {
  1747  	if len(x) != 1 {
  1748  		panic("dimension of the problem must be 1")
  1749  	}
  1750  	if len(x) != len(grad) {
  1751  		panic("incorrect size of the gradient")
  1752  	}
  1753  	grad[0] = math.Pow(x[0]+0.004, 3) * (5*x[0] - 7.98)
  1754  }
  1755  
  1756  // Plassmann implements an univariate oscillatory function where the value of L
  1757  // controls the number of oscillations. The value of Beta controls the size of
  1758  // the derivative at zero and the size of the interval where the strong Wolfe
  1759  // conditions can hold. For small values of Beta this function represents a
  1760  // difficult test problem for linesearchers also because the information based
  1761  // on the derivative is unreliable due to the oscillations.
  1762  //
  1763  // References:
  1764  //
  1765  //	More, J.J., and Thuente, D.J.: Line Search Algorithms with Guaranteed Sufficient Decrease.
  1766  //	ACM Transactions on Mathematical Software 20(3) (1994), 286–307, eq. (5.3)
  1767  type Plassmann struct {
  1768  	L    float64 // Number of oscillations for |x-1| ≥ Beta.
  1769  	Beta float64 // Size of the derivative at zero, f'(0) = -Beta.
  1770  }
  1771  
  1772  func (f Plassmann) Func(x []float64) float64 {
  1773  	if len(x) != 1 {
  1774  		panic("dimension of the problem must be 1")
  1775  	}
  1776  	a := x[0]
  1777  	b := f.Beta
  1778  	l := f.L
  1779  	r := 2 * (1 - b) / l / math.Pi * math.Sin(l*math.Pi/2*a)
  1780  	switch {
  1781  	case a <= 1-b:
  1782  		r += 1 - a
  1783  	case 1-b < a && a <= 1+b:
  1784  		r += 0.5 * ((a-1)*(a-1)/b + b)
  1785  	default: // a > 1+b
  1786  		r += a - 1
  1787  	}
  1788  	return r
  1789  }
  1790  
  1791  func (f Plassmann) Grad(grad, x []float64) {
  1792  	if len(x) != 1 {
  1793  		panic("dimension of the problem must be 1")
  1794  	}
  1795  	if len(x) != len(grad) {
  1796  		panic("incorrect size of the gradient")
  1797  	}
  1798  	a := x[0]
  1799  	b := f.Beta
  1800  	l := f.L
  1801  	grad[0] = (1 - b) * math.Cos(l*math.Pi/2*a)
  1802  	switch {
  1803  	case a <= 1-b:
  1804  		grad[0]--
  1805  	case 1-b < a && a <= 1+b:
  1806  		grad[0] += (a - 1) / b
  1807  	default: // a > 1+b
  1808  		grad[0]++
  1809  	}
  1810  }
  1811  
  1812  // YanaiOzawaKaneko is an univariate convex function where the values of Beta1
  1813  // and Beta2 control the curvature around the minimum. Far away from the
  1814  // minimum the function approximates an absolute value function. Near the
  1815  // minimum, the function can either be sharply curved or flat, controlled by
  1816  // the parameter values.
  1817  //
  1818  // References:
  1819  //   - More, J.J., and Thuente, D.J.: Line Search Algorithms with Guaranteed Sufficient Decrease.
  1820  //     ACM Transactions on Mathematical Software 20(3) (1994), 286–307, eq. (5.4)
  1821  //   - Yanai, H., Ozawa, M., and Kaneko, S.: Interpolation methods in one dimensional
  1822  //     optimization. Computing 27 (1981), 155–163
  1823  type YanaiOzawaKaneko struct {
  1824  	Beta1 float64
  1825  	Beta2 float64
  1826  }
  1827  
  1828  func (f YanaiOzawaKaneko) Func(x []float64) float64 {
  1829  	if len(x) != 1 {
  1830  		panic("dimension of the problem must be 1")
  1831  	}
  1832  	a := x[0]
  1833  	b1 := f.Beta1
  1834  	b2 := f.Beta2
  1835  	g1 := math.Sqrt(1+b1*b1) - b1
  1836  	g2 := math.Sqrt(1+b2*b2) - b2
  1837  	return g1*math.Sqrt((a-1)*(a-1)+b2*b2) + g2*math.Sqrt(a*a+b1*b1)
  1838  }
  1839  
  1840  func (f YanaiOzawaKaneko) Grad(grad, x []float64) {
  1841  	if len(x) != 1 {
  1842  		panic("dimension of the problem must be 1")
  1843  	}
  1844  	if len(x) != len(grad) {
  1845  		panic("incorrect size of the gradient")
  1846  	}
  1847  	a := x[0]
  1848  	b1 := f.Beta1
  1849  	b2 := f.Beta2
  1850  	g1 := math.Sqrt(1+b1*b1) - b1
  1851  	g2 := math.Sqrt(1+b2*b2) - b2
  1852  	grad[0] = g1*(a-1)/math.Sqrt(b2*b2+(a-1)*(a-1)) + g2*a/math.Sqrt(b1*b1+a*a)
  1853  }