001    /*
002     * Licensed to the Apache Software Foundation (ASF) under one or more
003     * contributor license agreements.  See the NOTICE file distributed with
004     * this work for additional information regarding copyright ownership.
005     * The ASF licenses this file to You under the Apache License, Version 2.0
006     * (the "License"); you may not use this file except in compliance with
007     * the License.  You may obtain a copy of the License at
008     *
009     *      http://www.apache.org/licenses/LICENSE-2.0
010     *
011     * Unless required by applicable law or agreed to in writing, software
012     * distributed under the License is distributed on an "AS IS" BASIS,
013     * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
014     * See the License for the specific language governing permissions and
015     * limitations under the License.
016     */
017    
018    package org.apache.commons.math3.optim.nonlinear.scalar.noderiv;
019    
020    import java.util.ArrayList;
021    import java.util.Arrays;
022    import java.util.List;
023    import org.apache.commons.math3.exception.DimensionMismatchException;
024    import org.apache.commons.math3.exception.NotPositiveException;
025    import org.apache.commons.math3.exception.NotStrictlyPositiveException;
026    import org.apache.commons.math3.exception.OutOfRangeException;
027    import org.apache.commons.math3.exception.TooManyEvaluationsException;
028    import org.apache.commons.math3.linear.Array2DRowRealMatrix;
029    import org.apache.commons.math3.linear.EigenDecomposition;
030    import org.apache.commons.math3.linear.MatrixUtils;
031    import org.apache.commons.math3.linear.RealMatrix;
032    import org.apache.commons.math3.optim.ConvergenceChecker;
033    import org.apache.commons.math3.optim.OptimizationData;
034    import org.apache.commons.math3.optim.nonlinear.scalar.GoalType;
035    import org.apache.commons.math3.optim.PointValuePair;
036    import org.apache.commons.math3.optim.nonlinear.scalar.MultivariateOptimizer;
037    import org.apache.commons.math3.random.RandomGenerator;
038    import org.apache.commons.math3.util.MathArrays;
039    
040    /**
041     * <p>An implementation of the active Covariance Matrix Adaptation Evolution Strategy (CMA-ES)
042     * for non-linear, non-convex, non-smooth, global function minimization.
043     * The CMA-Evolution Strategy (CMA-ES) is a reliable stochastic optimization method
044     * which should be applied if derivative-based methods, e.g. quasi-Newton BFGS or
045     * conjugate gradient, fail due to a rugged search landscape (e.g. noise, local
046     * optima, outlier, etc.) of the objective function. Like a
047     * quasi-Newton method, the CMA-ES learns and applies a variable metric
048     * on the underlying search space. Unlike a quasi-Newton method, the
049     * CMA-ES neither estimates nor uses gradients, making it considerably more
050     * reliable in terms of finding a good, or even close to optimal, solution.</p>
051     *
052     * <p>In general, on smooth objective functions the CMA-ES is roughly ten times
053     * slower than BFGS (counting objective function evaluations, no gradients provided).
054     * For up to <math>N=10</math> variables also the derivative-free simplex
055     * direct search method (Nelder and Mead) can be faster, but it is
056     * far less reliable than CMA-ES.</p>
057     *
058     * <p>The CMA-ES is particularly well suited for non-separable
059     * and/or badly conditioned problems. To observe the advantage of CMA compared
060     * to a conventional evolution strategy, it will usually take about
061     * <math>30 N</math> function evaluations. On difficult problems the complete
062     * optimization (a single run) is expected to take <em>roughly</em> between
063     * <math>30 N</math> and <math>300 N<sup>2</sup></math>
064     * function evaluations.</p>
065     *
066     * <p>This implementation is translated and adapted from the Matlab version
067     * of the CMA-ES algorithm as implemented in module {@code cmaes.m} version 3.51.</p>
068     *
069     * For more information, please refer to the following links:
070     * <ul>
071     *  <li><a href="http://www.lri.fr/~hansen/cmaes.m">Matlab code</a></li>
072     *  <li><a href="http://www.lri.fr/~hansen/cmaesintro.html">Introduction to CMA-ES</a></li>
073     *  <li><a href="http://en.wikipedia.org/wiki/CMA-ES">Wikipedia</a></li>
074     * </ul>
075     *
076     * @version $Id: CMAESOptimizer.java 1400108 2012-10-19 14:20:16Z erans $
077     * @since 3.0
078     */
079    public class CMAESOptimizer
080        extends MultivariateOptimizer {
081        // global search parameters
082        /**
083         * Population size, offspring number. The primary strategy parameter to play
084         * with, which can be increased from its default value. Increasing the
085         * population size improves global search properties in exchange to speed.
086         * Speed decreases, as a rule, at most linearly with increasing population
087         * size. It is advisable to begin with the default small population size.
088         */
089        private int lambda; // population size
090        /**
091         * Covariance update mechanism, default is active CMA. isActiveCMA = true
092         * turns on "active CMA" with a negative update of the covariance matrix and
093         * checks for positive definiteness. OPTS.CMA.active = 2 does not check for
094         * pos. def. and is numerically faster. Active CMA usually speeds up the
095         * adaptation.
096         */
097        private final boolean isActiveCMA;
098        /**
099         * Determines how often a new random offspring is generated in case it is
100         * not feasible / beyond the defined limits, default is 0.
101         */
102        private final int checkFeasableCount;
103        /**
104         * @see Sigma
105         */
106        private double[] inputSigma;
107        /** Number of objective variables/problem dimension */
108        private int dimension;
109        /**
110         * Defines the number of initial iterations, where the covariance matrix
111         * remains diagonal and the algorithm has internally linear time complexity.
112         * diagonalOnly = 1 means keeping the covariance matrix always diagonal and
113         * this setting also exhibits linear space complexity. This can be
114         * particularly useful for dimension > 100.
115         * @see <a href="http://hal.archives-ouvertes.fr/inria-00287367/en">A Simple Modification in CMA-ES</a>
116         */
117        private int diagonalOnly;
118        /** Number of objective variables/problem dimension */
119        private boolean isMinimize = true;
120        /** Indicates whether statistic data is collected. */
121        private final boolean generateStatistics;
122    
123        // termination criteria
124        /** Maximal number of iterations allowed. */
125        private final int maxIterations;
126        /** Limit for fitness value. */
127        private final double stopFitness;
128        /** Stop if x-changes larger stopTolUpX. */
129        private double stopTolUpX;
130        /** Stop if x-change smaller stopTolX. */
131        private double stopTolX;
132        /** Stop if fun-changes smaller stopTolFun. */
133        private double stopTolFun;
134        /** Stop if back fun-changes smaller stopTolHistFun. */
135        private double stopTolHistFun;
136    
137        // selection strategy parameters
138        /** Number of parents/points for recombination. */
139        private int mu; //
140        /** log(mu + 0.5), stored for efficiency. */
141        private double logMu2;
142        /** Array for weighted recombination. */
143        private RealMatrix weights;
144        /** Variance-effectiveness of sum w_i x_i. */
145        private double mueff; //
146    
147        // dynamic strategy parameters and constants
148        /** Overall standard deviation - search volume. */
149        private double sigma;
150        /** Cumulation constant. */
151        private double cc;
152        /** Cumulation constant for step-size. */
153        private double cs;
154        /** Damping for step-size. */
155        private double damps;
156        /** Learning rate for rank-one update. */
157        private double ccov1;
158        /** Learning rate for rank-mu update' */
159        private double ccovmu;
160        /** Expectation of ||N(0,I)|| == norm(randn(N,1)). */
161        private double chiN;
162        /** Learning rate for rank-one update - diagonalOnly */
163        private double ccov1Sep;
164        /** Learning rate for rank-mu update - diagonalOnly */
165        private double ccovmuSep;
166    
167        // CMA internal values - updated each generation
168        /** Objective variables. */
169        private RealMatrix xmean;
170        /** Evolution path. */
171        private RealMatrix pc;
172        /** Evolution path for sigma. */
173        private RealMatrix ps;
174        /** Norm of ps, stored for efficiency. */
175        private double normps;
176        /** Coordinate system. */
177        private RealMatrix B;
178        /** Scaling. */
179        private RealMatrix D;
180        /** B*D, stored for efficiency. */
181        private RealMatrix BD;
182        /** Diagonal of sqrt(D), stored for efficiency. */
183        private RealMatrix diagD;
184        /** Covariance matrix. */
185        private RealMatrix C;
186        /** Diagonal of C, used for diagonalOnly. */
187        private RealMatrix diagC;
188        /** Number of iterations already performed. */
189        private int iterations;
190    
191        /** History queue of best values. */
192        private double[] fitnessHistory;
193        /** Size of history queue of best values. */
194        private int historySize;
195    
196        /** Random generator. */
197        private final RandomGenerator random;
198    
199        /** History of sigma values. */
200        private final List<Double> statisticsSigmaHistory = new ArrayList<Double>();
201        /** History of mean matrix. */
202        private final List<RealMatrix> statisticsMeanHistory = new ArrayList<RealMatrix>();
203        /** History of fitness values. */
204        private final List<Double> statisticsFitnessHistory = new ArrayList<Double>();
205        /** History of D matrix. */
206        private final List<RealMatrix> statisticsDHistory = new ArrayList<RealMatrix>();
207    
208        /**
209         * @param maxIterations Maximal number of iterations.
210         * @param stopFitness Whether to stop if objective function value is smaller than
211         * {@code stopFitness}.
212         * @param isActiveCMA Chooses the covariance matrix update method.
213         * @param diagonalOnly Number of initial iterations, where the covariance matrix
214         * remains diagonal.
215         * @param checkFeasableCount Determines how often new random objective variables are
216         * generated in case they are out of bounds.
217         * @param random Random generator.
218         * @param generateStatistics Whether statistic data is collected.
219         * @param checker Convergence checker.
220         *
221         * @since 3.1
222         */
223        public CMAESOptimizer(int maxIterations,
224                              double stopFitness,
225                              boolean isActiveCMA,
226                              int diagonalOnly,
227                              int checkFeasableCount,
228                              RandomGenerator random,
229                              boolean generateStatistics,
230                              ConvergenceChecker<PointValuePair> checker) {
231            super(checker);
232            this.maxIterations = maxIterations;
233            this.stopFitness = stopFitness;
234            this.isActiveCMA = isActiveCMA;
235            this.diagonalOnly = diagonalOnly;
236            this.checkFeasableCount = checkFeasableCount;
237            this.random = random;
238            this.generateStatistics = generateStatistics;
239        }
240    
241        /**
242         * @return History of sigma values.
243         */
244        public List<Double> getStatisticsSigmaHistory() {
245            return statisticsSigmaHistory;
246        }
247    
248        /**
249         * @return History of mean matrix.
250         */
251        public List<RealMatrix> getStatisticsMeanHistory() {
252            return statisticsMeanHistory;
253        }
254    
255        /**
256         * @return History of fitness values.
257         */
258        public List<Double> getStatisticsFitnessHistory() {
259            return statisticsFitnessHistory;
260        }
261    
262        /**
263         * @return History of D matrix.
264         */
265        public List<RealMatrix> getStatisticsDHistory() {
266            return statisticsDHistory;
267        }
268    
269        /**
270         * Input sigma values.
271         * They define the initial coordinate-wise standard deviations for
272         * sampling new search points around the initial guess.
273         * It is suggested to set them to the estimated distance from the
274         * initial to the desired optimum.
275         * Small values induce the search to be more local (and very small
276         * values are more likely to find a local optimum close to the initial
277         * guess).
278         * Too small values might however lead to early termination.
279         */
280        public static class Sigma implements OptimizationData {
281            /** Sigma values. */
282            private final double[] sigma;
283    
284            /**
285             * @param s Sigma values.
286             * @throws NotPositiveException if any of the array entries is smaller
287             * than zero.
288             */
289            public Sigma(double[] s)
290                throws NotPositiveException {
291                for (int i = 0; i < s.length; i++) {
292                    if (s[i] < 0) {
293                        throw new NotPositiveException(s[i]);
294                    }
295                }
296    
297                sigma = s.clone();
298            }
299    
300            /**
301             * @return the sigma values.
302             */
303            public double[] getSigma() {
304                return sigma.clone();
305            }
306        }
307    
308        /**
309         * Population size.
310         * The number of offspring is the primary strategy parameter.
311         * In the absence of better clues, a good default could be an
312         * integer close to {@code 4 + 3 ln(n)}, where {@code n} is the
313         * number of optimized parameters.
314         * Increasing the population size improves global search properties
315         * at the expense of speed (which in general decreases at most
316         * linearly with increasing population size).
317         */
318        public static class PopulationSize implements OptimizationData {
319            /** Population size. */
320            private final int lambda;
321    
322            /**
323             * @param size Population size.
324             * @throws NotStrictlyPositiveException if {@code size <= 0}.
325             */
326            public PopulationSize(int size)
327                throws NotStrictlyPositiveException {
328                if (size <= 0) {
329                    throw new NotStrictlyPositiveException(size);
330                }
331                lambda = size;
332            }
333    
334            /**
335             * @return the population size.
336             */
337            public int getPopulationSize() {
338                return lambda;
339            }
340        }
341    
342        /**
343         * {@inheritDoc}
344         *
345         * @param optData Optimization data. The following data will be looked for:
346         * <ul>
347         *  <li>{@link org.apache.commons.math3.optim.MaxEval}</li>
348         *  <li>{@link org.apache.commons.math3.optim.InitialGuess}</li>
349         *  <li>{@link org.apache.commons.math3.optim.SimpleBounds}</li>
350         *  <li>{@link org.apache.commons.math3.optim.nonlinear.scalar.ObjectiveFunction}</li>
351         *  <li>{@link Sigma}</li>
352         *  <li>{@link PopulationSize}</li>
353         * </ul>
354         * @return {@inheritDoc}
355         * @throws TooManyEvaluationsException if the maximal number of
356         * evaluations is exceeded.
357         * @throws DimensionMismatchException if the initial guess, target, and weight
358         * arguments have inconsistent dimensions.
359         */
360        @Override
361        public PointValuePair optimize(OptimizationData... optData)
362            throws TooManyEvaluationsException,
363                   DimensionMismatchException {
364            // Retrieve settings.
365            parseOptimizationData(optData);
366            // Set up base class and perform computation.
367            return super.optimize(optData);
368        }
369    
370        /** {@inheritDoc} */
371        @Override
372        protected PointValuePair doOptimize() {
373            checkParameters();
374             // -------------------- Initialization --------------------------------
375            isMinimize = getGoalType().equals(GoalType.MINIMIZE);
376            final FitnessFunction fitfun = new FitnessFunction();
377            final double[] guess = getStartPoint();
378            // number of objective variables/problem dimension
379            dimension = guess.length;
380            initializeCMA(guess);
381            iterations = 0;
382            double bestValue = fitfun.value(guess);
383            push(fitnessHistory, bestValue);
384            PointValuePair optimum
385                = new PointValuePair(getStartPoint(),
386                                     isMinimize ? bestValue : -bestValue);
387            PointValuePair lastResult = null;
388    
389            // -------------------- Generation Loop --------------------------------
390    
391            generationLoop:
392            for (iterations = 1; iterations <= maxIterations; iterations++) {
393                // Generate and evaluate lambda offspring
394                final RealMatrix arz = randn1(dimension, lambda);
395                final RealMatrix arx = zeros(dimension, lambda);
396                final double[] fitness = new double[lambda];
397                // generate random offspring
398                for (int k = 0; k < lambda; k++) {
399                    RealMatrix arxk = null;
400                    for (int i = 0; i < checkFeasableCount + 1; i++) {
401                        if (diagonalOnly <= 0) {
402                            arxk = xmean.add(BD.multiply(arz.getColumnMatrix(k))
403                                             .scalarMultiply(sigma)); // m + sig * Normal(0,C)
404                        } else {
405                            arxk = xmean.add(times(diagD,arz.getColumnMatrix(k))
406                                             .scalarMultiply(sigma));
407                        }
408                        if (i >= checkFeasableCount ||
409                            fitfun.isFeasible(arxk.getColumn(0))) {
410                            break;
411                        }
412                        // regenerate random arguments for row
413                        arz.setColumn(k, randn(dimension));
414                    }
415                    copyColumn(arxk, 0, arx, k);
416                    try {
417                        fitness[k] = fitfun.value(arx.getColumn(k)); // compute fitness
418                    } catch (TooManyEvaluationsException e) {
419                        break generationLoop;
420                    }
421                }
422                // Sort by fitness and compute weighted mean into xmean
423                final int[] arindex = sortedIndices(fitness);
424                // Calculate new xmean, this is selection and recombination
425                final RealMatrix xold = xmean; // for speed up of Eq. (2) and (3)
426                final RealMatrix bestArx = selectColumns(arx, MathArrays.copyOf(arindex, mu));
427                xmean = bestArx.multiply(weights);
428                final RealMatrix bestArz = selectColumns(arz, MathArrays.copyOf(arindex, mu));
429                final RealMatrix zmean = bestArz.multiply(weights);
430                final boolean hsig = updateEvolutionPaths(zmean, xold);
431                if (diagonalOnly <= 0) {
432                    updateCovariance(hsig, bestArx, arz, arindex, xold);
433                } else {
434                    updateCovarianceDiagonalOnly(hsig, bestArz);
435                }
436                // Adapt step size sigma - Eq. (5)
437                sigma *= Math.exp(Math.min(1, (normps/chiN - 1) * cs / damps));
438                final double bestFitness = fitness[arindex[0]];
439                final double worstFitness = fitness[arindex[arindex.length - 1]];
440                if (bestValue > bestFitness) {
441                    bestValue = bestFitness;
442                    lastResult = optimum;
443                    optimum = new PointValuePair(fitfun.repair(bestArx.getColumn(0)),
444                                                 isMinimize ? bestFitness : -bestFitness);
445                    if (getConvergenceChecker() != null &&
446                        lastResult != null) {
447                        if (getConvergenceChecker().converged(iterations, optimum, lastResult)) {
448                            break generationLoop;
449                        }
450                    }
451                }
452                // handle termination criteria
453                // Break, if fitness is good enough
454                if (stopFitness != 0) { // only if stopFitness is defined
455                    if (bestFitness < (isMinimize ? stopFitness : -stopFitness)) {
456                        break generationLoop;
457                    }
458                }
459                final double[] sqrtDiagC = sqrt(diagC).getColumn(0);
460                final double[] pcCol = pc.getColumn(0);
461                for (int i = 0; i < dimension; i++) {
462                    if (sigma * Math.max(Math.abs(pcCol[i]), sqrtDiagC[i]) > stopTolX) {
463                        break;
464                    }
465                    if (i >= dimension - 1) {
466                        break generationLoop;
467                    }
468                }
469                for (int i = 0; i < dimension; i++) {
470                    if (sigma * sqrtDiagC[i] > stopTolUpX) {
471                        break generationLoop;
472                    }
473                }
474                final double historyBest = min(fitnessHistory);
475                final double historyWorst = max(fitnessHistory);
476                if (iterations > 2 &&
477                    Math.max(historyWorst, worstFitness) -
478                    Math.min(historyBest, bestFitness) < stopTolFun) {
479                    break generationLoop;
480                }
481                if (iterations > fitnessHistory.length &&
482                    historyWorst - historyBest < stopTolHistFun) {
483                    break generationLoop;
484                }
485                // condition number of the covariance matrix exceeds 1e14
486                if (max(diagD) / min(diagD) > 1e7) {
487                    break generationLoop;
488                }
489                // user defined termination
490                if (getConvergenceChecker() != null) {
491                    final PointValuePair current
492                        = new PointValuePair(bestArx.getColumn(0),
493                                             isMinimize ? bestFitness : -bestFitness);
494                    if (lastResult != null &&
495                        getConvergenceChecker().converged(iterations, current, lastResult)) {
496                        break generationLoop;
497                        }
498                    lastResult = current;
499                }
500                // Adjust step size in case of equal function values (flat fitness)
501                if (bestValue == fitness[arindex[(int)(0.1+lambda/4.)]]) {
502                    sigma = sigma * Math.exp(0.2 + cs / damps);
503                }
504                if (iterations > 2 && Math.max(historyWorst, bestFitness) -
505                    Math.min(historyBest, bestFitness) == 0) {
506                    sigma = sigma * Math.exp(0.2 + cs / damps);
507                }
508                // store best in history
509                push(fitnessHistory,bestFitness);
510                fitfun.setValueRange(worstFitness-bestFitness);
511                if (generateStatistics) {
512                    statisticsSigmaHistory.add(sigma);
513                    statisticsFitnessHistory.add(bestFitness);
514                    statisticsMeanHistory.add(xmean.transpose());
515                    statisticsDHistory.add(diagD.transpose().scalarMultiply(1E5));
516                }
517            }
518            return optimum;
519        }
520    
521        /**
522         * Scans the list of (required and optional) optimization data that
523         * characterize the problem.
524         *
525         * @param optData Optimization data. The following data will be looked for:
526         * <ul>
527         *  <li>{@link Sigma}</li>
528         *  <li>{@link PopulationSize}</li>
529         * </ul>
530         */
531        private void parseOptimizationData(OptimizationData... optData) {
532            // The existing values (as set by the previous call) are reused if
533            // not provided in the argument list.
534            for (OptimizationData data : optData) {
535                if (data instanceof Sigma) {
536                    inputSigma = ((Sigma) data).getSigma();
537                    continue;
538                }
539                if (data instanceof PopulationSize) {
540                    lambda = ((PopulationSize) data).getPopulationSize();
541                    continue;
542                }
543            }
544        }
545    
546        /**
547         * Checks dimensions and values of boundaries and inputSigma if defined.
548         */
549        private void checkParameters() {
550            final double[] init = getStartPoint();
551            final double[] lB = getLowerBound();
552            final double[] uB = getUpperBound();
553    
554            if (inputSigma != null) {
555                if (inputSigma.length != init.length) {
556                    throw new DimensionMismatchException(inputSigma.length, init.length);
557                }
558                for (int i = 0; i < init.length; i++) {
559                    if (inputSigma[i] > uB[i] - lB[i]) {
560                        throw new OutOfRangeException(inputSigma[i], 0, uB[i] - lB[i]);
561                    }
562                }
563            }
564        }
565    
566        /**
567         * Initialization of the dynamic search parameters
568         *
569         * @param guess Initial guess for the arguments of the fitness function.
570         */
571        private void initializeCMA(double[] guess) {
572            if (lambda <= 0) {
573                throw new NotStrictlyPositiveException(lambda);
574            }
575            // initialize sigma
576            final double[][] sigmaArray = new double[guess.length][1];
577            for (int i = 0; i < guess.length; i++) {
578                sigmaArray[i][0] = inputSigma[i];
579            }
580            final RealMatrix insigma = new Array2DRowRealMatrix(sigmaArray, false);
581            sigma = max(insigma); // overall standard deviation
582    
583            // initialize termination criteria
584            stopTolUpX = 1e3 * max(insigma);
585            stopTolX = 1e-11 * max(insigma);
586            stopTolFun = 1e-12;
587            stopTolHistFun = 1e-13;
588    
589            // initialize selection strategy parameters
590            mu = lambda / 2; // number of parents/points for recombination
591            logMu2 = Math.log(mu + 0.5);
592            weights = log(sequence(1, mu, 1)).scalarMultiply(-1).scalarAdd(logMu2);
593            double sumw = 0;
594            double sumwq = 0;
595            for (int i = 0; i < mu; i++) {
596                double w = weights.getEntry(i, 0);
597                sumw += w;
598                sumwq += w * w;
599            }
600            weights = weights.scalarMultiply(1 / sumw);
601            mueff = sumw * sumw / sumwq; // variance-effectiveness of sum w_i x_i
602    
603            // initialize dynamic strategy parameters and constants
604            cc = (4 + mueff / dimension) /
605                    (dimension + 4 + 2 * mueff / dimension);
606            cs = (mueff + 2) / (dimension + mueff + 3.);
607            damps = (1 + 2 * Math.max(0, Math.sqrt((mueff - 1) /
608                                                   (dimension + 1)) - 1)) *
609                Math.max(0.3,
610                         1 - dimension / (1e-6 + maxIterations)) + cs; // minor increment
611            ccov1 = 2 / ((dimension + 1.3) * (dimension + 1.3) + mueff);
612            ccovmu = Math.min(1 - ccov1, 2 * (mueff - 2 + 1 / mueff) /
613                              ((dimension + 2) * (dimension + 2) + mueff));
614            ccov1Sep = Math.min(1, ccov1 * (dimension + 1.5) / 3);
615            ccovmuSep = Math.min(1 - ccov1, ccovmu * (dimension + 1.5) / 3);
616            chiN = Math.sqrt(dimension) *
617                (1 - 1 / ((double) 4 * dimension) + 1 / ((double) 21 * dimension * dimension));
618            // intialize CMA internal values - updated each generation
619            xmean = MatrixUtils.createColumnRealMatrix(guess); // objective variables
620            diagD = insigma.scalarMultiply(1 / sigma);
621            diagC = square(diagD);
622            pc = zeros(dimension, 1); // evolution paths for C and sigma
623            ps = zeros(dimension, 1); // B defines the coordinate system
624            normps = ps.getFrobeniusNorm();
625    
626            B = eye(dimension, dimension);
627            D = ones(dimension, 1); // diagonal D defines the scaling
628            BD = times(B, repmat(diagD.transpose(), dimension, 1));
629            C = B.multiply(diag(square(D)).multiply(B.transpose())); // covariance
630            historySize = 10 + (int) (3 * 10 * dimension / (double) lambda);
631            fitnessHistory = new double[historySize]; // history of fitness values
632            for (int i = 0; i < historySize; i++) {
633                fitnessHistory[i] = Double.MAX_VALUE;
634            }
635        }
636    
637        /**
638         * Update of the evolution paths ps and pc.
639         *
640         * @param zmean Weighted row matrix of the gaussian random numbers generating
641         * the current offspring.
642         * @param xold xmean matrix of the previous generation.
643         * @return hsig flag indicating a small correction.
644         */
645        private boolean updateEvolutionPaths(RealMatrix zmean, RealMatrix xold) {
646            ps = ps.scalarMultiply(1 - cs).add(
647                    B.multiply(zmean).scalarMultiply(
648                            Math.sqrt(cs * (2 - cs) * mueff)));
649            normps = ps.getFrobeniusNorm();
650            final boolean hsig = normps /
651                Math.sqrt(1 - Math.pow(1 - cs, 2 * iterations)) /
652                chiN < 1.4 + 2 / ((double) dimension + 1);
653            pc = pc.scalarMultiply(1 - cc);
654            if (hsig) {
655                pc = pc.add(xmean.subtract(xold).scalarMultiply(Math.sqrt(cc * (2 - cc) * mueff) / sigma));
656            }
657            return hsig;
658        }
659    
660        /**
661         * Update of the covariance matrix C for diagonalOnly > 0
662         *
663         * @param hsig Flag indicating a small correction.
664         * @param bestArz Fitness-sorted matrix of the gaussian random values of the
665         * current offspring.
666         */
667        private void updateCovarianceDiagonalOnly(boolean hsig,
668                                                  final RealMatrix bestArz) {
669            // minor correction if hsig==false
670            double oldFac = hsig ? 0 : ccov1Sep * cc * (2 - cc);
671            oldFac += 1 - ccov1Sep - ccovmuSep;
672            diagC = diagC.scalarMultiply(oldFac) // regard old matrix
673                .add(square(pc).scalarMultiply(ccov1Sep)) // plus rank one update
674                .add((times(diagC, square(bestArz).multiply(weights))) // plus rank mu update
675                     .scalarMultiply(ccovmuSep));
676            diagD = sqrt(diagC); // replaces eig(C)
677            if (diagonalOnly > 1 &&
678                iterations > diagonalOnly) {
679                // full covariance matrix from now on
680                diagonalOnly = 0;
681                B = eye(dimension, dimension);
682                BD = diag(diagD);
683                C = diag(diagC);
684            }
685        }
686    
687        /**
688         * Update of the covariance matrix C.
689         *
690         * @param hsig Flag indicating a small correction.
691         * @param bestArx Fitness-sorted matrix of the argument vectors producing the
692         * current offspring.
693         * @param arz Unsorted matrix containing the gaussian random values of the
694         * current offspring.
695         * @param arindex Indices indicating the fitness-order of the current offspring.
696         * @param xold xmean matrix of the previous generation.
697         */
698        private void updateCovariance(boolean hsig, final RealMatrix bestArx,
699                                      final RealMatrix arz, final int[] arindex,
700                                      final RealMatrix xold) {
701            double negccov = 0;
702            if (ccov1 + ccovmu > 0) {
703                final RealMatrix arpos = bestArx.subtract(repmat(xold, 1, mu))
704                    .scalarMultiply(1 / sigma); // mu difference vectors
705                final RealMatrix roneu = pc.multiply(pc.transpose())
706                    .scalarMultiply(ccov1); // rank one update
707                // minor correction if hsig==false
708                double oldFac = hsig ? 0 : ccov1 * cc * (2 - cc);
709                oldFac += 1 - ccov1 - ccovmu;
710                if (isActiveCMA) {
711                    // Adapt covariance matrix C active CMA
712                    negccov = (1 - ccovmu) * 0.25 * mueff /
713                        (Math.pow(dimension + 2, 1.5) + 2 * mueff);
714                    // keep at least 0.66 in all directions, small popsize are most
715                    // critical
716                    final double negminresidualvariance = 0.66;
717                    // where to make up for the variance loss
718                    final double negalphaold = 0.5;
719                    // prepare vectors, compute negative updating matrix Cneg
720                    final int[] arReverseIndex = reverse(arindex);
721                    RealMatrix arzneg = selectColumns(arz, MathArrays.copyOf(arReverseIndex, mu));
722                    RealMatrix arnorms = sqrt(sumRows(square(arzneg)));
723                    final int[] idxnorms = sortedIndices(arnorms.getRow(0));
724                    final RealMatrix arnormsSorted = selectColumns(arnorms, idxnorms);
725                    final int[] idxReverse = reverse(idxnorms);
726                    final RealMatrix arnormsReverse = selectColumns(arnorms, idxReverse);
727                    arnorms = divide(arnormsReverse, arnormsSorted);
728                    final int[] idxInv = inverse(idxnorms);
729                    final RealMatrix arnormsInv = selectColumns(arnorms, idxInv);
730                    // check and set learning rate negccov
731                    final double negcovMax = (1 - negminresidualvariance) /
732                        square(arnormsInv).multiply(weights).getEntry(0, 0);
733                    if (negccov > negcovMax) {
734                        negccov = negcovMax;
735                    }
736                    arzneg = times(arzneg, repmat(arnormsInv, dimension, 1));
737                    final RealMatrix artmp = BD.multiply(arzneg);
738                    final RealMatrix Cneg = artmp.multiply(diag(weights)).multiply(artmp.transpose());
739                    oldFac += negalphaold * negccov;
740                    C = C.scalarMultiply(oldFac)
741                        .add(roneu) // regard old matrix
742                        .add(arpos.scalarMultiply( // plus rank one update
743                                                  ccovmu + (1 - negalphaold) * negccov) // plus rank mu update
744                             .multiply(times(repmat(weights, 1, dimension),
745                                             arpos.transpose())))
746                        .subtract(Cneg.scalarMultiply(negccov));
747                } else {
748                    // Adapt covariance matrix C - nonactive
749                    C = C.scalarMultiply(oldFac) // regard old matrix
750                        .add(roneu) // plus rank one update
751                        .add(arpos.scalarMultiply(ccovmu) // plus rank mu update
752                             .multiply(times(repmat(weights, 1, dimension),
753                                             arpos.transpose())));
754                }
755            }
756            updateBD(negccov);
757        }
758    
759        /**
760         * Update B and D from C.
761         *
762         * @param negccov Negative covariance factor.
763         */
764        private void updateBD(double negccov) {
765            if (ccov1 + ccovmu + negccov > 0 &&
766                (iterations % 1. / (ccov1 + ccovmu + negccov) / dimension / 10.) < 1) {
767                // to achieve O(N^2)
768                C = triu(C, 0).add(triu(C, 1).transpose());
769                // enforce symmetry to prevent complex numbers
770                final EigenDecomposition eig = new EigenDecomposition(C);
771                B = eig.getV(); // eigen decomposition, B==normalized eigenvectors
772                D = eig.getD();
773                diagD = diag(D);
774                if (min(diagD) <= 0) {
775                    for (int i = 0; i < dimension; i++) {
776                        if (diagD.getEntry(i, 0) < 0) {
777                            diagD.setEntry(i, 0, 0);
778                        }
779                    }
780                    final double tfac = max(diagD) / 1e14;
781                    C = C.add(eye(dimension, dimension).scalarMultiply(tfac));
782                    diagD = diagD.add(ones(dimension, 1).scalarMultiply(tfac));
783                }
784                if (max(diagD) > 1e14 * min(diagD)) {
785                    final double tfac = max(diagD) / 1e14 - min(diagD);
786                    C = C.add(eye(dimension, dimension).scalarMultiply(tfac));
787                    diagD = diagD.add(ones(dimension, 1).scalarMultiply(tfac));
788                }
789                diagC = diag(C);
790                diagD = sqrt(diagD); // D contains standard deviations now
791                BD = times(B, repmat(diagD.transpose(), dimension, 1)); // O(n^2)
792            }
793        }
794    
795        /**
796         * Pushes the current best fitness value in a history queue.
797         *
798         * @param vals History queue.
799         * @param val Current best fitness value.
800         */
801        private static void push(double[] vals, double val) {
802            for (int i = vals.length-1; i > 0; i--) {
803                vals[i] = vals[i-1];
804            }
805            vals[0] = val;
806        }
807    
808        /**
809         * Sorts fitness values.
810         *
811         * @param doubles Array of values to be sorted.
812         * @return a sorted array of indices pointing into doubles.
813         */
814        private int[] sortedIndices(final double[] doubles) {
815            final DoubleIndex[] dis = new DoubleIndex[doubles.length];
816            for (int i = 0; i < doubles.length; i++) {
817                dis[i] = new DoubleIndex(doubles[i], i);
818            }
819            Arrays.sort(dis);
820            final int[] indices = new int[doubles.length];
821            for (int i = 0; i < doubles.length; i++) {
822                indices[i] = dis[i].index;
823            }
824            return indices;
825        }
826    
827        /**
828         * Used to sort fitness values. Sorting is always in lower value first
829         * order.
830         */
831        private static class DoubleIndex implements Comparable<DoubleIndex> {
832            /** Value to compare. */
833            private final double value;
834            /** Index into sorted array. */
835            private final int index;
836    
837            /**
838             * @param value Value to compare.
839             * @param index Index into sorted array.
840             */
841            DoubleIndex(double value, int index) {
842                this.value = value;
843                this.index = index;
844            }
845    
846            /** {@inheritDoc} */
847            public int compareTo(DoubleIndex o) {
848                return Double.compare(value, o.value);
849            }
850    
851            /** {@inheritDoc} */
852            @Override
853            public boolean equals(Object other) {
854    
855                if (this == other) {
856                    return true;
857                }
858    
859                if (other instanceof DoubleIndex) {
860                    return Double.compare(value, ((DoubleIndex) other).value) == 0;
861                }
862    
863                return false;
864            }
865    
866            /** {@inheritDoc} */
867            @Override
868            public int hashCode() {
869                long bits = Double.doubleToLongBits(value);
870                return (int) ((1438542 ^ (bits >>> 32) ^ bits) & 0xffffffff);
871            }
872        }
873    
874        /**
875         * Normalizes fitness values to the range [0,1]. Adds a penalty to the
876         * fitness value if out of range. The penalty is adjusted by calling
877         * setValueRange().
878         */
879        private class FitnessFunction {
880            /** Determines the penalty for boundary violations */
881            private double valueRange;
882            /**
883             * Flag indicating whether the objective variables are forced into their
884             * bounds if defined
885             */
886            private final boolean isRepairMode;
887    
888            /** Simple constructor.
889             */
890            public FitnessFunction() {
891                valueRange = 1;
892                isRepairMode = true;
893            }
894    
895            /**
896             * @param point Normalized objective variables.
897             * @return the objective value + penalty for violated bounds.
898             */
899            public double value(final double[] point) {
900                double value;
901                if (isRepairMode) {
902                    double[] repaired = repair(point);
903                    value = CMAESOptimizer.this.computeObjectiveValue(repaired) +
904                        penalty(point, repaired);
905                } else {
906                    value = CMAESOptimizer.this.computeObjectiveValue(point);
907                }
908                return isMinimize ? value : -value;
909            }
910    
911            /**
912             * @param x Normalized objective variables.
913             * @return {@code true} if in bounds.
914             */
915            public boolean isFeasible(final double[] x) {
916                final double[] lB = CMAESOptimizer.this.getLowerBound();
917                final double[] uB = CMAESOptimizer.this.getUpperBound();
918    
919                for (int i = 0; i < x.length; i++) {
920                    if (x[i] < lB[i]) {
921                        return false;
922                    }
923                    if (x[i] > uB[i]) {
924                        return false;
925                    }
926                }
927                return true;
928            }
929    
930            /**
931             * @param valueRange Adjusts the penalty computation.
932             */
933            public void setValueRange(double valueRange) {
934                this.valueRange = valueRange;
935            }
936    
937            /**
938             * @param x Normalized objective variables.
939             * @return the repaired (i.e. all in bounds) objective variables.
940             */
941            private double[] repair(final double[] x) {
942                final double[] lB = CMAESOptimizer.this.getLowerBound();
943                final double[] uB = CMAESOptimizer.this.getUpperBound();
944    
945                final double[] repaired = new double[x.length];
946                for (int i = 0; i < x.length; i++) {
947                    if (x[i] < lB[i]) {
948                        repaired[i] = lB[i];
949                    } else if (x[i] > uB[i]) {
950                        repaired[i] = uB[i];
951                    } else {
952                        repaired[i] = x[i];
953                    }
954                }
955                return repaired;
956            }
957    
958            /**
959             * @param x Normalized objective variables.
960             * @param repaired Repaired objective variables.
961             * @return Penalty value according to the violation of the bounds.
962             */
963            private double penalty(final double[] x, final double[] repaired) {
964                double penalty = 0;
965                for (int i = 0; i < x.length; i++) {
966                    double diff = Math.abs(x[i] - repaired[i]);
967                    penalty += diff * valueRange;
968                }
969                return isMinimize ? penalty : -penalty;
970            }
971        }
972    
973        // -----Matrix utility functions similar to the Matlab build in functions------
974    
975        /**
976         * @param m Input matrix
977         * @return Matrix representing the element-wise logarithm of m.
978         */
979        private static RealMatrix log(final RealMatrix m) {
980            final double[][] d = new double[m.getRowDimension()][m.getColumnDimension()];
981            for (int r = 0; r < m.getRowDimension(); r++) {
982                for (int c = 0; c < m.getColumnDimension(); c++) {
983                    d[r][c] = Math.log(m.getEntry(r, c));
984                }
985            }
986            return new Array2DRowRealMatrix(d, false);
987        }
988    
989        /**
990         * @param m Input matrix.
991         * @return Matrix representing the element-wise square root of m.
992         */
993        private static RealMatrix sqrt(final RealMatrix m) {
994            final double[][] d = new double[m.getRowDimension()][m.getColumnDimension()];
995            for (int r = 0; r < m.getRowDimension(); r++) {
996                for (int c = 0; c < m.getColumnDimension(); c++) {
997                    d[r][c] = Math.sqrt(m.getEntry(r, c));
998                }
999            }
1000            return new Array2DRowRealMatrix(d, false);
1001        }
1002    
1003        /**
1004         * @param m Input matrix.
1005         * @return Matrix representing the element-wise square of m.
1006         */
1007        private static RealMatrix square(final RealMatrix m) {
1008            final double[][] d = new double[m.getRowDimension()][m.getColumnDimension()];
1009            for (int r = 0; r < m.getRowDimension(); r++) {
1010                for (int c = 0; c < m.getColumnDimension(); c++) {
1011                    double e = m.getEntry(r, c);
1012                    d[r][c] = e * e;
1013                }
1014            }
1015            return new Array2DRowRealMatrix(d, false);
1016        }
1017    
1018        /**
1019         * @param m Input matrix 1.
1020         * @param n Input matrix 2.
1021         * @return the matrix where the elements of m and n are element-wise multiplied.
1022         */
1023        private static RealMatrix times(final RealMatrix m, final RealMatrix n) {
1024            final double[][] d = new double[m.getRowDimension()][m.getColumnDimension()];
1025            for (int r = 0; r < m.getRowDimension(); r++) {
1026                for (int c = 0; c < m.getColumnDimension(); c++) {
1027                    d[r][c] = m.getEntry(r, c) * n.getEntry(r, c);
1028                }
1029            }
1030            return new Array2DRowRealMatrix(d, false);
1031        }
1032    
1033        /**
1034         * @param m Input matrix 1.
1035         * @param n Input matrix 2.
1036         * @return Matrix where the elements of m and n are element-wise divided.
1037         */
1038        private static RealMatrix divide(final RealMatrix m, final RealMatrix n) {
1039            final double[][] d = new double[m.getRowDimension()][m.getColumnDimension()];
1040            for (int r = 0; r < m.getRowDimension(); r++) {
1041                for (int c = 0; c < m.getColumnDimension(); c++) {
1042                    d[r][c] = m.getEntry(r, c) / n.getEntry(r, c);
1043                }
1044            }
1045            return new Array2DRowRealMatrix(d, false);
1046        }
1047    
1048        /**
1049         * @param m Input matrix.
1050         * @param cols Columns to select.
1051         * @return Matrix representing the selected columns.
1052         */
1053        private static RealMatrix selectColumns(final RealMatrix m, final int[] cols) {
1054            final double[][] d = new double[m.getRowDimension()][cols.length];
1055            for (int r = 0; r < m.getRowDimension(); r++) {
1056                for (int c = 0; c < cols.length; c++) {
1057                    d[r][c] = m.getEntry(r, cols[c]);
1058                }
1059            }
1060            return new Array2DRowRealMatrix(d, false);
1061        }
1062    
1063        /**
1064         * @param m Input matrix.
1065         * @param k Diagonal position.
1066         * @return Upper triangular part of matrix.
1067         */
1068        private static RealMatrix triu(final RealMatrix m, int k) {
1069            final double[][] d = new double[m.getRowDimension()][m.getColumnDimension()];
1070            for (int r = 0; r < m.getRowDimension(); r++) {
1071                for (int c = 0; c < m.getColumnDimension(); c++) {
1072                    d[r][c] = r <= c - k ? m.getEntry(r, c) : 0;
1073                }
1074            }
1075            return new Array2DRowRealMatrix(d, false);
1076        }
1077    
1078        /**
1079         * @param m Input matrix.
1080         * @return Row matrix representing the sums of the rows.
1081         */
1082        private static RealMatrix sumRows(final RealMatrix m) {
1083            final double[][] d = new double[1][m.getColumnDimension()];
1084            for (int c = 0; c < m.getColumnDimension(); c++) {
1085                double sum = 0;
1086                for (int r = 0; r < m.getRowDimension(); r++) {
1087                    sum += m.getEntry(r, c);
1088                }
1089                d[0][c] = sum;
1090            }
1091            return new Array2DRowRealMatrix(d, false);
1092        }
1093    
1094        /**
1095         * @param m Input matrix.
1096         * @return the diagonal n-by-n matrix if m is a column matrix or the column
1097         * matrix representing the diagonal if m is a n-by-n matrix.
1098         */
1099        private static RealMatrix diag(final RealMatrix m) {
1100            if (m.getColumnDimension() == 1) {
1101                final double[][] d = new double[m.getRowDimension()][m.getRowDimension()];
1102                for (int i = 0; i < m.getRowDimension(); i++) {
1103                    d[i][i] = m.getEntry(i, 0);
1104                }
1105                return new Array2DRowRealMatrix(d, false);
1106            } else {
1107                final double[][] d = new double[m.getRowDimension()][1];
1108                for (int i = 0; i < m.getColumnDimension(); i++) {
1109                    d[i][0] = m.getEntry(i, i);
1110                }
1111                return new Array2DRowRealMatrix(d, false);
1112            }
1113        }
1114    
1115        /**
1116         * Copies a column from m1 to m2.
1117         *
1118         * @param m1 Source matrix.
1119         * @param col1 Source column.
1120         * @param m2 Target matrix.
1121         * @param col2 Target column.
1122         */
1123        private static void copyColumn(final RealMatrix m1, int col1,
1124                                       RealMatrix m2, int col2) {
1125            for (int i = 0; i < m1.getRowDimension(); i++) {
1126                m2.setEntry(i, col2, m1.getEntry(i, col1));
1127            }
1128        }
1129    
1130        /**
1131         * @param n Number of rows.
1132         * @param m Number of columns.
1133         * @return n-by-m matrix filled with 1.
1134         */
1135        private static RealMatrix ones(int n, int m) {
1136            final double[][] d = new double[n][m];
1137            for (int r = 0; r < n; r++) {
1138                Arrays.fill(d[r], 1);
1139            }
1140            return new Array2DRowRealMatrix(d, false);
1141        }
1142    
1143        /**
1144         * @param n Number of rows.
1145         * @param m Number of columns.
1146         * @return n-by-m matrix of 0 values out of diagonal, and 1 values on
1147         * the diagonal.
1148         */
1149        private static RealMatrix eye(int n, int m) {
1150            final double[][] d = new double[n][m];
1151            for (int r = 0; r < n; r++) {
1152                if (r < m) {
1153                    d[r][r] = 1;
1154                }
1155            }
1156            return new Array2DRowRealMatrix(d, false);
1157        }
1158    
1159        /**
1160         * @param n Number of rows.
1161         * @param m Number of columns.
1162         * @return n-by-m matrix of zero values.
1163         */
1164        private static RealMatrix zeros(int n, int m) {
1165            return new Array2DRowRealMatrix(n, m);
1166        }
1167    
1168        /**
1169         * @param mat Input matrix.
1170         * @param n Number of row replicates.
1171         * @param m Number of column replicates.
1172         * @return a matrix which replicates the input matrix in both directions.
1173         */
1174        private static RealMatrix repmat(final RealMatrix mat, int n, int m) {
1175            final int rd = mat.getRowDimension();
1176            final int cd = mat.getColumnDimension();
1177            final double[][] d = new double[n * rd][m * cd];
1178            for (int r = 0; r < n * rd; r++) {
1179                for (int c = 0; c < m * cd; c++) {
1180                    d[r][c] = mat.getEntry(r % rd, c % cd);
1181                }
1182            }
1183            return new Array2DRowRealMatrix(d, false);
1184        }
1185    
1186        /**
1187         * @param start Start value.
1188         * @param end End value.
1189         * @param step Step size.
1190         * @return a sequence as column matrix.
1191         */
1192        private static RealMatrix sequence(double start, double end, double step) {
1193            final int size = (int) ((end - start) / step + 1);
1194            final double[][] d = new double[size][1];
1195            double value = start;
1196            for (int r = 0; r < size; r++) {
1197                d[r][0] = value;
1198                value += step;
1199            }
1200            return new Array2DRowRealMatrix(d, false);
1201        }
1202    
1203        /**
1204         * @param m Input matrix.
1205         * @return the maximum of the matrix element values.
1206         */
1207        private static double max(final RealMatrix m) {
1208            double max = -Double.MAX_VALUE;
1209            for (int r = 0; r < m.getRowDimension(); r++) {
1210                for (int c = 0; c < m.getColumnDimension(); c++) {
1211                    double e = m.getEntry(r, c);
1212                    if (max < e) {
1213                        max = e;
1214                    }
1215                }
1216            }
1217            return max;
1218        }
1219    
1220        /**
1221         * @param m Input matrix.
1222         * @return the minimum of the matrix element values.
1223         */
1224        private static double min(final RealMatrix m) {
1225            double min = Double.MAX_VALUE;
1226            for (int r = 0; r < m.getRowDimension(); r++) {
1227                for (int c = 0; c < m.getColumnDimension(); c++) {
1228                    double e = m.getEntry(r, c);
1229                    if (min > e) {
1230                        min = e;
1231                    }
1232                }
1233            }
1234            return min;
1235        }
1236    
1237        /**
1238         * @param m Input array.
1239         * @return the maximum of the array values.
1240         */
1241        private static double max(final double[] m) {
1242            double max = -Double.MAX_VALUE;
1243            for (int r = 0; r < m.length; r++) {
1244                if (max < m[r]) {
1245                    max = m[r];
1246                }
1247            }
1248            return max;
1249        }
1250    
1251        /**
1252         * @param m Input array.
1253         * @return the minimum of the array values.
1254         */
1255        private static double min(final double[] m) {
1256            double min = Double.MAX_VALUE;
1257            for (int r = 0; r < m.length; r++) {
1258                if (min > m[r]) {
1259                    min = m[r];
1260                }
1261            }
1262            return min;
1263        }
1264    
1265        /**
1266         * @param indices Input index array.
1267         * @return the inverse of the mapping defined by indices.
1268         */
1269        private static int[] inverse(final int[] indices) {
1270            final int[] inverse = new int[indices.length];
1271            for (int i = 0; i < indices.length; i++) {
1272                inverse[indices[i]] = i;
1273            }
1274            return inverse;
1275        }
1276    
1277        /**
1278         * @param indices Input index array.
1279         * @return the indices in inverse order (last is first).
1280         */
1281        private static int[] reverse(final int[] indices) {
1282            final int[] reverse = new int[indices.length];
1283            for (int i = 0; i < indices.length; i++) {
1284                reverse[i] = indices[indices.length - i - 1];
1285            }
1286            return reverse;
1287        }
1288    
1289        /**
1290         * @param size Length of random array.
1291         * @return an array of Gaussian random numbers.
1292         */
1293        private double[] randn(int size) {
1294            final double[] randn = new double[size];
1295            for (int i = 0; i < size; i++) {
1296                randn[i] = random.nextGaussian();
1297            }
1298            return randn;
1299        }
1300    
1301        /**
1302         * @param size Number of rows.
1303         * @param popSize Population size.
1304         * @return a 2-dimensional matrix of Gaussian random numbers.
1305         */
1306        private RealMatrix randn1(int size, int popSize) {
1307            final double[][] d = new double[size][popSize];
1308            for (int r = 0; r < size; r++) {
1309                for (int c = 0; c < popSize; c++) {
1310                    d[r][c] = random.nextGaussian();
1311                }
1312            }
1313            return new Array2DRowRealMatrix(d, false);
1314        }
1315    }