github.com/pachyderm/pachyderm@v1.13.4/examples/ml/housing-prices/utils.py (about)

     1  import numpy as np
     2  import matplotlib.pyplot as plt
     3  from sklearn.naive_bayes import GaussianNB
     4  from sklearn.svm import SVC
     5  from sklearn.datasets import load_digits
     6  from sklearn.model_selection import learning_curve
     7  from sklearn.model_selection import ShuffleSplit
     8  
     9  
    10  def plot_learning_curve(estimator, title, X, y, axes=None, ylim=None, cv=None,
    11                          n_jobs=None, train_sizes=np.linspace(.1, 1.0, 5)):
    12      """
    13      Generate 3 plots: the test and training learning curve, the training
    14      samples vs fit times curve, the fit times vs score curve.
    15  
    16      Parameters
    17      ----------
    18      estimator : object type that implements the "fit" and "predict" methods
    19          An object of that type which is cloned for each validation.
    20  
    21      title : string
    22          Title for the chart.
    23  
    24      X : array-like, shape (n_samples, n_features)
    25          Training vector, where n_samples is the number of samples and
    26          n_features is the number of features.
    27  
    28      y : array-like, shape (n_samples) or (n_samples, n_features), optional
    29          Target relative to X for classification or regression;
    30          None for unsupervised learning.
    31  
    32      axes : array of 3 axes, optional (default=None)
    33          Axes to use for plotting the curves.
    34  
    35      ylim : tuple, shape (ymin, ymax), optional
    36          Defines minimum and maximum yvalues plotted.
    37  
    38      cv : int, cross-validation generator or an iterable, optional
    39          Determines the cross-validation splitting strategy.
    40          Possible inputs for cv are:
    41  
    42            - None, to use the default 5-fold cross-validation,
    43            - integer, to specify the number of folds.
    44            - :term:`CV splitter`,
    45            - An iterable yielding (train, test) splits as arrays of indices.
    46  
    47          For integer/None inputs, if ``y`` is binary or multiclass,
    48          :class:`StratifiedKFold` used. If the estimator is not a classifier
    49          or if ``y`` is neither binary nor multiclass, :class:`KFold` is used.
    50  
    51          Refer :ref:`User Guide <cross_validation>` for the various
    52          cross-validators that can be used here.
    53  
    54      n_jobs : int or None, optional (default=None)
    55          Number of jobs to run in parallel.
    56          ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
    57          ``-1`` means using all processors. See :term:`Glossary <n_jobs>`
    58          for more details.
    59  
    60      train_sizes : array-like, shape (n_ticks,), dtype float or int
    61          Relative or absolute numbers of training examples that will be used to
    62          generate the learning curve. If the dtype is float, it is regarded as a
    63          fraction of the maximum size of the training set (that is determined
    64          by the selected validation method), i.e. it has to be within (0, 1].
    65          Otherwise it is interpreted as absolute sizes of the training sets.
    66          Note that for classification the number of samples usually have to
    67          be big enough to contain at least one sample from each class.
    68          (default: np.linspace(0.1, 1.0, 5))
    69      """
    70      if axes is None:
    71          _, axes = plt.subplots(1, 3, figsize=(20, 5))
    72  
    73      axes[0].set_title(title)
    74      if ylim is not None:
    75          axes[0].set_ylim(*ylim)
    76      axes[0].set_xlabel("Training examples")
    77      axes[0].set_ylabel("Score")
    78  
    79      train_sizes, train_scores, test_scores, fit_times, _ = \
    80          learning_curve(estimator, X, y, cv=cv, n_jobs=n_jobs,
    81                         train_sizes=train_sizes,
    82                         return_times=True)
    83      train_scores_mean = np.mean(train_scores, axis=1)
    84      train_scores_std = np.std(train_scores, axis=1)
    85      test_scores_mean = np.mean(test_scores, axis=1)
    86      test_scores_std = np.std(test_scores, axis=1)
    87      fit_times_mean = np.mean(fit_times, axis=1)
    88      fit_times_std = np.std(fit_times, axis=1)
    89  
    90      # Plot learning curve
    91      axes[0].grid()
    92      axes[0].fill_between(train_sizes, train_scores_mean - train_scores_std,
    93                           train_scores_mean + train_scores_std, alpha=0.1,
    94                           color="r")
    95      axes[0].fill_between(train_sizes, test_scores_mean - test_scores_std,
    96                           test_scores_mean + test_scores_std, alpha=0.1,
    97                           color="g")
    98      axes[0].plot(train_sizes, train_scores_mean, 'o-', color="r",
    99                   label="Training score")
   100      axes[0].plot(train_sizes, test_scores_mean, 'o-', color="g",
   101                   label="Cross-validation score")
   102      axes[0].legend(loc="best")
   103  
   104      # Plot n_samples vs fit_times
   105      axes[1].grid()
   106      axes[1].plot(train_sizes, fit_times_mean, 'o-')
   107      axes[1].fill_between(train_sizes, fit_times_mean - fit_times_std,
   108                           fit_times_mean + fit_times_std, alpha=0.1)
   109      axes[1].set_xlabel("Training examples")
   110      axes[1].set_ylabel("fit_times")
   111      axes[1].set_title("Scalability of the model")
   112  
   113      # Plot fit_time vs score
   114      axes[2].grid()
   115      axes[2].plot(fit_times_mean, test_scores_mean, 'o-')
   116      axes[2].fill_between(fit_times_mean, test_scores_mean - test_scores_std,
   117                           test_scores_mean + test_scores_std, alpha=0.1)
   118      axes[2].set_xlabel("fit_times")
   119      axes[2].set_ylabel("Score")
   120      axes[2].set_title("Performance of the model")
   121  
   122      return plt