| repo_name
				 stringlengths 7 92 | path
				 stringlengths 5 149 | copies
				 stringlengths 1 3 | size
				 stringlengths 4 6 | content
				 stringlengths 911 693k | license
				 stringclasses 15
				values | 
|---|---|---|---|---|---|
| 
	justincely/rolodex | 
	setup.py | 
	1 | 
	2102 | 
	from setuptools import setup, find_packages
setup(
    name = 'cos_monitoring',
    version = '0.0.1',
    description = 'Provide utilities and monotiring of cos data',
    author = 'Justin Ely',
    author_email = '[email protected]',
    keywords = ['astronomy'],
    classifiers = ['Programming Language :: Python',
                   'Programming Language :: Python :: 3',
                   'Development Status :: 1 - Planning',
                   'Intended Audience :: Science/Research',
                   'Topic :: Scientific/Engineering :: Astronomy',
                   'Topic :: Scientific/Engineering :: Physics',
                   'Topic :: Software Development :: Libraries :: Python Modules'],
    packages = find_packages(),
    requires = ['numpy', 'scipy', 'astropy', 'matplotlib'],
    entry_points = {'console_scripts': ['clean_slate=cos_monitoring.database:clean_slate',
                                        'cm_ingest=cos_monitoring.database:ingest_all',
                                        'cm_monitors=cos_monitoring.database:run_all_monitors',
                                        'create_master_csv=scripts.create_master_csv:main',
                                        'cosmo_retrieval=cos_monitoring.retrieval.run_cosmo_retrieval',
                                        'cm_reports=cos_monitoring.database.report:query_all',
                                        'cm_delete=cos_monitoring.database.database:cm_delete',
                                        'cm_describe=cos_monitoring.database.database:cm_describe',
                                        'cm_tot_gain=cos_monitoring.cci.gainmap:make_all_gainmaps_entry'],
    },
    install_requires = ['setuptools',
                        'numpy>=1.11.1',
                        'astropy>=1.0.1',
                        'sqlalchemy>=1.0.12',
                        'pymysql',
                        'matplotlib',
                        'scipy',
                        'fitsio',
                        'psutil',
                        'beautifulsoup4',
                        'pyfastcopy']
    )
 | 
	bsd-3-clause | 
| 
	pycroscopy/pycroscopy | 
	pycroscopy/processing/svd_utils.py | 
	1 | 
	20291 | 
	# -*- coding: utf-8 -*-
"""
USID utilities for performing randomized singular value decomposition and reconstructing results
Created on Mon Mar 28 09:45:08 2016
@author: Suhas Somnath, Chris Smith
"""
from __future__ import division, print_function, absolute_import
import time
from multiprocessing import cpu_count
import numpy as np
from sklearn.utils import gen_batches
from sklearn.utils.extmath import randomized_svd
from sidpy.hdf.reg_ref import get_indices_for_region_ref, create_region_reference
from sidpy.hdf.hdf_utils import get_attr, write_simple_attrs, copy_attributes
from sidpy.proc.comp_utils import get_available_memory
from sidpy.base.string_utils import format_time
from sidpy.hdf.dtype_utils import check_dtype, stack_real_to_target_dtype
from pyUSID.processing.process import Process
from .proc_utils import get_component_slice
from pyUSID.io.hdf_utils import find_results_groups,  \
    reshape_to_n_dims, write_main_dataset, create_results_group, \
    create_indexed_group, find_dataset
from pyUSID import Dimension
from pyUSID.io.anc_build_utils import calc_chunks
from pyUSID import USIDataset
import h5py
from matplotlib import pyplot as plt
from pyUSID.viz import plot_utils
class SVD(Process):
    """
    This class provides a file-wrapper around the :meth:`sklearn.utils.extmath.randomized_svd` function.
    In other words, it extracts and then reformats the data present in the provided :class:`pyUSID.USIDataset` object,
    performs the randomized SVD operation and writes the results back to the USID HDF5 file after
    formatting the results in an USID compliant manner.
    """
    def __init__(self, h5_main, num_components=None, **kwargs):
        """
        Perform the SVD decomposition on the selected dataset and write the results to h5 file.
        Parameters
        ----------
        h5_main : :class:`pyUSID.USIDataset` object
            USID Main HDF5 dataset that will be decomposed
        num_components : int, optional
            Number of components to decompose h5_main into.  Default None.
        h5_target_group : h5py.Group, optional. Default = None
            Location where to look for existing results and to place newly
            computed results. Use this kwarg if the results need to be written
            to a different HDF5 file. By default, this value is set to the
            parent group containing `h5_main`
        kwargs
            Arguments to be sent to Process
        """
        super(SVD, self).__init__(h5_main, 'SVD', **kwargs)
        '''
        Calculate the size of the main data in memory and compare to max_mem
        We use the minimum of the actual dtype's itemsize and float32 since we
        don't want to read it in yet and do the proper type conversions.
        '''
        n_samples, n_features = h5_main.shape
        self.data_transform_func, is_complex, is_compound, n_features, type_mult = check_dtype(h5_main)
        if num_components is None:
            num_components = min(n_samples, n_features)
        else:
            num_components = min(n_samples, n_features, num_components)
        self.num_components = num_components
        # Check that we can actually compute the SVD with the selected number of components
        self._check_available_mem()
        self.parms_dict = {'num_components': num_components}
        self.duplicate_h5_groups, self.partial_h5_groups = self._check_for_duplicates()
        # supercharge h5_main!
        self.h5_main = USIDataset(self.h5_main)
        self.__u = None
        self.__v = None
        self.__s = None
    def test(self, override=False):
        """
        Applies randomised VD to the dataset. This function does NOT write results to the hdf5 file. Call compute() to
        write to the file. Handles complex, compound datasets such that the V matrix is of the same data-type as the
        input matrix.
        Parameters
        ----------
        override : bool, optional. default = False
            Set to true to recompute results if prior results are available. Else, returns existing results
        Returns
        -------
        U : :class:`numpy.ndarray`
            Abundance matrix
        S : :class:`numpy.ndarray`
            variance vector
        V : :class:`numpy.ndarray`
            eigenvector matrix
        """
        '''
        Check if a number of compnents has been set and ensure that the number is less than
        the minimum axis length of the data.  If both conditions are met, use fsvd.  If not
        use the regular svd.
        C.Smith -- We might need to put a lower limit on num_comps in the future.  I don't
                   know enough about svd to be sure.
        '''
        if not override:
            if isinstance(self.duplicate_h5_groups, list) and len(self.duplicate_h5_groups) > 0:
                self.h5_results_grp = self.duplicate_h5_groups[-1]
                print('Returning previously computed results from: {}'.format(self.h5_results_grp.name))
                print('set the "override" flag to True to recompute results')
                return reshape_to_n_dims(self.h5_results_grp['U'])[0], self.h5_results_grp['S'][()], \
                       reshape_to_n_dims(self.h5_results_grp['V'])[0]
        self.h5_results_grp = None
        t1 = time.time()
        self.__u, self.__s, self.__v = randomized_svd(self.data_transform_func(self.h5_main), self.num_components,
                                                      n_iter=3)
        self.__v = stack_real_to_target_dtype(self.__v, self.h5_main.dtype)
        print('Took {} to compute randomized SVD'.format(format_time(time.time() - t1)))
        u_mat, success = reshape_to_n_dims(self.__u, h5_pos=self.h5_main.h5_pos_inds,
                                           h5_spec=np.expand_dims(np.arange(self.__u.shape[1]), axis=0))
        if not success:
            raise ValueError('Could not reshape U to N-Dimensional dataset! Error:' + success)
        # When the source dataset has a singular valued spectroscopic dimension
        # stack_real_to_target causes V to lose all its dimensions
        if self.__v.ndim == 0:
            # However, we want V to be 2D:
            self.__v = np.atleast_2d(self.__v)
        v_mat, success = reshape_to_n_dims(self.__v, h5_pos=np.expand_dims(np.arange(self.__u.shape[1]), axis=1),
                                           h5_spec=self.h5_main.h5_spec_inds)
        if not success:
            raise ValueError('Could not reshape V to N-Dimensional dataset! Error:' + success)
        return u_mat, self.__s, v_mat
    def compute(self, override=False):
        """
        Computes SVD (by calling test_on_subset() if it has not already been called) and writes results to file.
        Consider calling test() to check results before writing to file. Results are deleted from memory
        upon writing to the HDF5 file
        Parameters
        ----------
        override : bool, optional. default = False
            Set to true to recompute results if prior results are available. Else, returns existing results
        Returns
        -------
         h5_results_grp : :class:`h5py.Group`  object
            HDF5 Group containing all the results
        """
        if self.__u is None and self.__v is None and self.__s is None:
            self.test(override=override)
        if self.h5_results_grp is None:
            self._write_results_chunk()
            self.delete_results()
        h5_group = self.h5_results_grp
        return h5_group
    def delete_results(self):
        """
        Deletes results from memory.
        """
        del self.__u, self.__s, self.__v
        self.__u = None
        self.__v = None
        self.__s = None
    def _write_results_chunk(self):
        """
        Writes the provided SVD results to file
        Parameters
        ----------
        """
        comp_dim = Dimension('Principal Component', 'a. u.', len(self.__s))
        h5_svd_group = create_results_group(self.h5_main, self.process_name,
                                            h5_parent_group=self._h5_target_group)
        self.h5_results_grp = h5_svd_group
        self._write_source_dset_provenance()
        
        write_simple_attrs(h5_svd_group, self.parms_dict)
        write_simple_attrs(h5_svd_group, {'svd_method': 'sklearn-randomized'})
        h5_u = write_main_dataset(h5_svd_group, np.float32(self.__u), 'U', 'Abundance', 'a.u.', None, comp_dim,
                                  h5_pos_inds=self.h5_main.h5_pos_inds, h5_pos_vals=self.h5_main.h5_pos_vals,
                                  dtype=np.float32, chunks=calc_chunks(self.__u.shape, np.float32(0).itemsize))
        # print(get_attr(self.h5_main, 'quantity')[0])
        h5_v = write_main_dataset(h5_svd_group, self.__v, 'V', get_attr(self.h5_main, 'quantity')[0],
                                  'a.u.', comp_dim, None, h5_spec_inds=self.h5_main.h5_spec_inds,
                                  h5_spec_vals=self.h5_main.h5_spec_vals,
                                  chunks=calc_chunks(self.__v.shape, self.h5_main.dtype.itemsize))
        # No point making this 1D dataset a main dataset
        h5_s = h5_svd_group.create_dataset('S', data=np.float32(self.__s))
        '''
        Check h5_main for plot group references.
        Copy them into V if they exist
        '''
        for key in self.h5_main.attrs.keys():
            if '_Plot_Group' not in key:
                continue
            ref_inds = get_indices_for_region_ref(self.h5_main, self.h5_main.attrs[key], return_method='corners')
            ref_inds = ref_inds.reshape([-1, 2, 2])
            ref_inds[:, 1, 0] = h5_v.shape[0] - 1
            svd_ref = create_region_reference(h5_v, ref_inds)
            h5_v.attrs[key] = svd_ref
        # Marking completion:
        self._status_dset_name = 'completed_positions'
        self._h5_status_dset = h5_svd_group.create_dataset(self._status_dset_name,
                                                           data=np.ones(self.h5_main.shape[0], dtype=np.uint8))
        # keeping legacy option:
        h5_svd_group.attrs['last_pixel'] = self.h5_main.shape[0]
    def _check_available_mem(self):
        """
        Check that there is enough memory to perform the SVD decomposition.
        Returns
        -------
        sufficient_mem : bool
            True is enough memory found, False otherwise.
        """
        if self.verbose:
            print('Checking memory availability.')
        n_samples, n_features = self.h5_main.shape
        s_mem_per_comp = np.float32(0).itemsize
        u_mem_per_comp = np.float32(0).itemsize * n_samples
        v_mem_per_comp = self.h5_main.dtype.itemsize * n_features
        mem_per_comp = s_mem_per_comp + u_mem_per_comp + v_mem_per_comp
        max_mem = get_available_memory()
        avail_mem = 0.75 * max_mem
        free_mem = avail_mem - self.h5_main.__sizeof__()
        if free_mem <= 0:
            error_message = 'Cannot load main dataset into memory.\n' + \
                            'Available memory is {}.  Dataset needs {}.'.format(avail_mem,
                                                                                self.h5_main.__sizeof__())
            raise MemoryError(error_message)
        if self.verbose:
            print('Memory available for SVD is {}.'.format(free_mem))
            print('Memory needed per component is {}.'.format(mem_per_comp))
        cant_svd = (free_mem - self.num_components * mem_per_comp) <= 0
        if cant_svd:
            max_comps = np.floor(free_mem / mem_per_comp, dtype=int)
            error_message = 'Not enough free memory for performing SVD with requested number of parameters.\n' + \
                            'Maximum possible parameters is {}.'.format(max_comps)
            raise MemoryError(error_message)
###############################################################################
def simplified_kpca(kpca, source_data):
    """
    Performs kernel PCA on the provided dataset and returns the familiar
    eigenvector, eigenvalue, and scree matrices.
    Note that the positions in the eigenvalues may need to be transposed
    Parameters
    ----------
    kpca : KernelPCA object
        configured Kernel PCA object ready to perform analysis
    source_data : 2D numpy array
        Data arranged as [iteration, features] example - [position, time]
    Returns
    -------
    eigenvalues : 2D numpy array
        Eigenvalues in the original space arranged as [component,iteration]
    scree : 1D numpy array
        S component
    eigenvector : 2D numpy array
        Eigenvectors in the original space arranged as [component,features]
    """
    X_kpca = kpca.fit(source_data.T)
    eigenvectors = X_kpca.alphas_.T
    eigenvalues = X_kpca.fit_transform(source_data)
    # kpca_explained_variance = np.var(kpca.fit_transform(source_data), axis=0)
    # information_content = kpca_explained_variance / np.sum(kpca_explained_variance)
    scree = kpca.lambdas_
    return eigenvalues, scree, eigenvectors
def rebuild_svd(h5_main, components=None, cores=None, max_RAM_mb=1024):
    """
    Rebuild the Image from the SVD results on the windows
    Optionally, only use components less than n_comp.
    Parameters
    ----------
    h5_main : hdf5 Dataset
        dataset which SVD was performed on
    components : {int, iterable of int, slice} optional
        Defines which components to keep
        Default - None, all components kept
        Input Types
        integer : Components less than the input will be kept
        length 2 iterable of integers : Integers define start and stop of component slice to retain
        other iterable of integers or slice : Selection of component indices to retain
    cores : int, optional
        How many cores should be used to rebuild
        Default - None, all but 2 cores will be used, min 1
    max_RAM_mb : int, optional
        Maximum ammount of memory to use when rebuilding, in Mb.
        Default - 1024Mb
    Returns
    -------
    rebuilt_data : HDF5 Dataset
        the rebuilt dataset
    """
    comp_slice, num_comps = get_component_slice(components, total_components=h5_main.shape[1])
    if isinstance(comp_slice, np.ndarray):
        comp_slice = list(comp_slice)
    dset_name = h5_main.name.split('/')[-1]
    # Ensuring that at least one core is available for use / 2 cores are available for other use
    max_cores = max(1, cpu_count() - 2)
    #         print('max_cores',max_cores)
    if cores is not None:
        cores = min(round(abs(cores)), max_cores)
    else:
        cores = max_cores
    max_memory = min(max_RAM_mb * 1024 ** 2, 0.75 * get_available_memory())
    if cores != 1:
        max_memory = int(max_memory / 2)
    '''
    Get the handles for the SVD results
    '''
    try:
        h5_svd_group = find_results_groups(h5_main, 'SVD')[-1]
        h5_S = h5_svd_group['S']
        h5_U = h5_svd_group['U']
        h5_V = h5_svd_group['V']
    except KeyError:
        raise KeyError('SVD Results for {dset} were not found.'.format(dset=dset_name))
    except:
        raise
    func, is_complex, is_compound, n_features, type_mult = check_dtype(h5_V)
    '''
    Calculate the size of a single batch that will fit in the available memory
    '''
    n_comps = h5_S[comp_slice].size
    mem_per_pix = (h5_U.dtype.itemsize + h5_V.dtype.itemsize * h5_V.shape[1]) * n_comps
    fixed_mem = h5_main.size * h5_main.dtype.itemsize
    if cores is None:
        free_mem = max_memory - fixed_mem
    else:
        free_mem = max_memory * 2 - fixed_mem
    batch_size = int(round(float(free_mem) / mem_per_pix))
    batch_slices = gen_batches(h5_U.shape[0], batch_size)
    print('Reconstructing in batches of {} positions.'.format(batch_size))
    print('Batchs should be {} Mb each.'.format(mem_per_pix * batch_size / 1024.0 ** 2))
    '''
    Loop over all batches.
    '''
    ds_V = np.dot(np.diag(h5_S[comp_slice]), func(h5_V[comp_slice, :]))
    rebuild = np.zeros((h5_main.shape[0], ds_V.shape[1]))
    for ibatch, batch in enumerate(batch_slices):
        rebuild[batch, :] += np.dot(h5_U[batch, comp_slice], ds_V)
    rebuild = stack_real_to_target_dtype(rebuild, h5_V.dtype)
    print('Completed reconstruction of data from SVD results.  Writing to file.')
    '''
    Create the Group and dataset to hold the rebuild data
    '''
    rebuilt_grp = create_indexed_group(h5_svd_group, 'Rebuilt_Data')
    h5_rebuilt = write_main_dataset(rebuilt_grp, rebuild, 'Rebuilt_Data',
                                    get_attr(h5_main, 'quantity'), get_attr(h5_main, 'units'),
                                    None, None,
                                    h5_pos_inds=h5_main.h5_pos_inds, h5_pos_vals=h5_main.h5_pos_vals,
                                    h5_spec_inds=h5_main.h5_spec_inds, h5_spec_vals=h5_main.h5_spec_vals,
                                    chunks=h5_main.chunks, compression=h5_main.compression)
    if isinstance(comp_slice, slice):
        rebuilt_grp.attrs['components_used'] = '{}-{}'.format(comp_slice.start, comp_slice.stop)
    else:
        rebuilt_grp.attrs['components_used'] = components
    copy_attributes(h5_main, h5_rebuilt, skip_refs=False)
    h5_main.file.flush()
    print('Done writing reconstructed data to file.')
    return h5_rebuilt
def plot_svd(h5_main, savefig=False, num_plots = 16, **kwargs):
    '''
    Replots the SVD showing the skree, abundance maps, and eigenvectors.
    If h5_main is a Dataset, it will default to the most recent SVD group from that
    Dataset.
    If h5_main is the results group, then it will plot the values for that group.
    
    Parameters
    ----------   
    h5_main : USIDataset or h5py Dataset or h5py Group
    
    savefig : bool, optional
        Saves the figures to disk with some default names
    
    num_plots : int
        Default number of eigenvectors and abundance plots to show
    
    kwargs : dict, optional
        keyword arguments for svd filtering
        
    Returns
    -------
    None
    '''
    
    if isinstance(h5_main, h5py.Group):
        _U = find_dataset(h5_main, 'U')[-1]
        _V = find_dataset(h5_main, 'V')[-1]
        units = 'arbitrary (a.u.)'
        h5_spec_vals = np.arange(_V.shape[1])
        h5_svd_group = _U.parent
    else:
        h5_svd_group = find_results_groups(h5_main, 'SVD')[-1]
        units = h5_main.attrs['quantity']
        h5_spec_vals = h5_main.get_spec_values('Time')
    
    h5_U = h5_svd_group['U']
    h5_V = h5_svd_group['V']
    h5_S = h5_svd_group['S']
    
    _U = USIDataset(h5_U)
    [num_rows, num_cols] = _U.pos_dim_sizes
    
    abun_maps = np.reshape(h5_U[:,:16], (num_rows, num_cols,-1))
    eigen_vecs = h5_V[:16, :]
    
    skree_sum = np.zeros(h5_S.shape)
    for i in range(h5_S.shape[0]):
        skree_sum[i] = np.sum(h5_S[:i])/np.sum(h5_S)
    plt.figure()
    plt.plot(skree_sum, 'bo')
    plt.title('Cumulative Variance')
    plt.xlabel('Total Components')
    plt.ylabel('Total variance ratio (a.u.)')
    
    if savefig:
        plt.savefig('Cumulative_variance_plot.png')
    
    fig_skree, axes = plot_utils.plot_scree(h5_S, title='Scree plot')
    fig_skree.tight_layout()
    if savefig:
        plt.savefig('Scree_plot.png')
    
    fig_abun, axes = plot_utils.plot_map_stack(abun_maps, num_comps=num_plots, title='SVD Abundance Maps',
                                                  color_bar_mode='single', cmap='inferno', reverse_dims=True, 
                                                  fig_mult=(3.5,3.5), facecolor='white', **kwargs)
    fig_abun.tight_layout()
    if savefig:
        plt.savefig('Abundance_maps.png')
    
    fig_eigvec, axes = plot_utils.plot_curves(h5_spec_vals*1e3, eigen_vecs, use_rainbow_plots=False, 
                                                 x_label='Time (ms)', y_label=units, 
                                                 num_plots=num_plots, subtitle_prefix='Component', 
                                                 title='SVD Eigenvectors', evenly_spaced=False, 
                                                 **kwargs)
    fig_eigvec.tight_layout()
    if savefig:
        plt.savefig('Eigenvectors.png')
    
    return  | 
	mit | 
| 
	juliojsb/sarviewer | 
	plotters/matplotlib/swap.py | 
	1 | 
	2062 | 
	#!/usr/bin/env python2
"""
Author        :Julio Sanz
Website       :www.elarraydejota.com
Email         :[email protected]
Description   :Script to create a graph about swap usage
Dependencies  :Python 2.x, matplotlib
Usage         :python swap.py
License       :GPLv3
"""
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt 
import csv
from datetime import datetime
import matplotlib.dates
# ======================
# VARIABLES
# ======================
# Aesthetic parameters
plt.rcParams.update({'font.size': 8})
plt.rcParams['lines.linewidth'] = 1.5
time_format = matplotlib.dates.DateFormatter('%H:%M:%S')
plt.gca().xaxis.set_major_formatter(time_format)
plt.gcf().autofmt_xdate()
# Time (column 0)
x = []
# Data arrays
swap_free = []
swap_used = []
# ======================
# FUNCTIONS
# ======================
def generate_graph():
    with open('../../data/swap.dat', 'r') as csvfile:
        data_source = csv.reader(csvfile, delimiter=' ', skipinitialspace=True)
        for row in data_source:
            # [0] column is a time column
            # Convert to datetime data type
            a = datetime.strptime((row[0]),'%H:%M:%S')
            x.append((a))
            # The remaining columns contain data
            swap_free.append(str(int(row[1])/1024))
            swap_used.append(str(int(row[2])/1024))
            
    # Plot lines
    plt.plot(x,swap_used, label='Used', color='r', antialiased=True)
    plt.plot(x,swap_free, label='Free', color='g', antialiased=True)
    
    # Graph properties
    plt.xlabel('Time',fontstyle='italic')
    plt.ylabel('SWAP (MB)',fontstyle='italic')
    plt.title('SWAP usage graph')
    plt.grid(linewidth=0.4, antialiased=True)
    plt.legend(loc='upper center', bbox_to_anchor=(0.5, -0.15), ncol=2, fancybox=True, shadow=True)
    plt.autoscale(True)
    
    # Graph saved to PNG file
    plt.savefig('../../graphs/swap.png', bbox_inches='tight')
    #plt.show()
# ======================
# MAIN
# ======================
if __name__ == '__main__':
    generate_graph() | 
	gpl-3.0 | 
| 
	ryfeus/lambda-packs | 
	Sklearn_scipy_numpy/source/sklearn/feature_selection/rfe.py | 
	6 | 
	17502 | 
	# Authors: Alexandre Gramfort <[email protected]>
#          Vincent Michel <[email protected]>
#          Gilles Louppe <[email protected]>
#
# License: BSD 3 clause
"""Recursive feature elimination for feature ranking"""
import warnings
import numpy as np
from ..utils import check_X_y, safe_sqr
from ..utils.metaestimators import if_delegate_has_method
from ..base import BaseEstimator
from ..base import MetaEstimatorMixin
from ..base import clone
from ..base import is_classifier
from ..cross_validation import check_cv
from ..cross_validation import _safe_split, _score
from ..metrics.scorer import check_scoring
from .base import SelectorMixin
class RFE(BaseEstimator, MetaEstimatorMixin, SelectorMixin):
    """Feature ranking with recursive feature elimination.
    Given an external estimator that assigns weights to features (e.g., the
    coefficients of a linear model), the goal of recursive feature elimination
    (RFE) is to select features by recursively considering smaller and smaller
    sets of features. First, the estimator is trained on the initial set of
    features and weights are assigned to each one of them. Then, features whose
    absolute weights are the smallest are pruned from the current set features.
    That procedure is recursively repeated on the pruned set until the desired
    number of features to select is eventually reached.
    Read more in the :ref:`User Guide <rfe>`.
    Parameters
    ----------
    estimator : object
        A supervised learning estimator with a `fit` method that updates a
        `coef_` attribute that holds the fitted parameters. Important features
        must correspond to high absolute values in the `coef_` array.
        For instance, this is the case for most supervised learning
        algorithms such as Support Vector Classifiers and Generalized
        Linear Models from the `svm` and `linear_model` modules.
    n_features_to_select : int or None (default=None)
        The number of features to select. If `None`, half of the features
        are selected.
    step : int or float, optional (default=1)
        If greater than or equal to 1, then `step` corresponds to the (integer)
        number of features to remove at each iteration.
        If within (0.0, 1.0), then `step` corresponds to the percentage
        (rounded down) of features to remove at each iteration.
    estimator_params : dict
        Parameters for the external estimator.
        This attribute is deprecated as of version 0.16 and will be removed in
        0.18. Use estimator initialisation or set_params method instead.
    verbose : int, default=0
        Controls verbosity of output.
    Attributes
    ----------
    n_features_ : int
        The number of selected features.
    support_ : array of shape [n_features]
        The mask of selected features.
    ranking_ : array of shape [n_features]
        The feature ranking, such that ``ranking_[i]`` corresponds to the
        ranking position of the i-th feature. Selected (i.e., estimated
        best) features are assigned rank 1.
    estimator_ : object
        The external estimator fit on the reduced dataset.
    Examples
    --------
    The following example shows how to retrieve the 5 right informative
    features in the Friedman #1 dataset.
    >>> from sklearn.datasets import make_friedman1
    >>> from sklearn.feature_selection import RFE
    >>> from sklearn.svm import SVR
    >>> X, y = make_friedman1(n_samples=50, n_features=10, random_state=0)
    >>> estimator = SVR(kernel="linear")
    >>> selector = RFE(estimator, 5, step=1)
    >>> selector = selector.fit(X, y)
    >>> selector.support_ # doctest: +NORMALIZE_WHITESPACE
    array([ True,  True,  True,  True,  True,
            False, False, False, False, False], dtype=bool)
    >>> selector.ranking_
    array([1, 1, 1, 1, 1, 6, 4, 3, 2, 5])
    References
    ----------
    .. [1] Guyon, I., Weston, J., Barnhill, S., & Vapnik, V., "Gene selection
           for cancer classification using support vector machines",
           Mach. Learn., 46(1-3), 389--422, 2002.
    """
    def __init__(self, estimator, n_features_to_select=None, step=1,
                 estimator_params=None, verbose=0):
        self.estimator = estimator
        self.n_features_to_select = n_features_to_select
        self.step = step
        self.estimator_params = estimator_params
        self.verbose = verbose
    @property
    def _estimator_type(self):
        return self.estimator._estimator_type
    def fit(self, X, y):
        """Fit the RFE model and then the underlying estimator on the selected
           features.
        Parameters
        ----------
        X : {array-like, sparse matrix}, shape = [n_samples, n_features]
            The training input samples.
        y : array-like, shape = [n_samples]
            The target values.
        """
        return self._fit(X, y)
    def _fit(self, X, y, step_score=None):
        X, y = check_X_y(X, y, "csc")
        # Initialization
        n_features = X.shape[1]
        if self.n_features_to_select is None:
            n_features_to_select = n_features // 2
        else:
            n_features_to_select = self.n_features_to_select
        if 0.0 < self.step < 1.0:
            step = int(max(1, self.step * n_features))
        else:
            step = int(self.step)
        if step <= 0:
            raise ValueError("Step must be >0")
        if self.estimator_params is not None:
            warnings.warn("The parameter 'estimator_params' is deprecated as "
                          "of version 0.16 and will be removed in 0.18. The "
                          "parameter is no longer necessary because the value "
                          "is set via the estimator initialisation or "
                          "set_params method.", DeprecationWarning)
        support_ = np.ones(n_features, dtype=np.bool)
        ranking_ = np.ones(n_features, dtype=np.int)
        if step_score:
            self.scores_ = []
        # Elimination
        while np.sum(support_) > n_features_to_select:
            # Remaining features
            features = np.arange(n_features)[support_]
            # Rank the remaining features
            estimator = clone(self.estimator)
            if self.estimator_params:
                estimator.set_params(**self.estimator_params)
            if self.verbose > 0:
                print("Fitting estimator with %d features." % np.sum(support_))
            estimator.fit(X[:, features], y)
            # Get coefs
            if hasattr(estimator, 'coef_'):
                coefs = estimator.coef_
            elif hasattr(estimator, 'feature_importances_'):
                coefs = estimator.feature_importances_
            else:
                raise RuntimeError('The classifier does not expose '
                                   '"coef_" or "feature_importances_" '
                                   'attributes')
            # Get ranks
            if coefs.ndim > 1:
                ranks = np.argsort(safe_sqr(coefs).sum(axis=0))
            else:
                ranks = np.argsort(safe_sqr(coefs))
            # for sparse case ranks is matrix
            ranks = np.ravel(ranks)
            # Eliminate the worse features
            threshold = min(step, np.sum(support_) - n_features_to_select)
            # Compute step score on the previous selection iteration
            # because 'estimator' must use features
            # that have not been eliminated yet
            if step_score:
                self.scores_.append(step_score(estimator, features))
            support_[features[ranks][:threshold]] = False
            ranking_[np.logical_not(support_)] += 1
        # Set final attributes
        features = np.arange(n_features)[support_]
        self.estimator_ = clone(self.estimator)
        if self.estimator_params:
            self.estimator_.set_params(**self.estimator_params)
        self.estimator_.fit(X[:, features], y)
        # Compute step score when only n_features_to_select features left
        if step_score:
            self.scores_.append(step_score(self.estimator_, features))
        self.n_features_ = support_.sum()
        self.support_ = support_
        self.ranking_ = ranking_
        return self
    @if_delegate_has_method(delegate='estimator')
    def predict(self, X):
        """Reduce X to the selected features and then predict using the
           underlying estimator.
        Parameters
        ----------
        X : array of shape [n_samples, n_features]
            The input samples.
        Returns
        -------
        y : array of shape [n_samples]
            The predicted target values.
        """
        return self.estimator_.predict(self.transform(X))
    @if_delegate_has_method(delegate='estimator')
    def score(self, X, y):
        """Reduce X to the selected features and then return the score of the
           underlying estimator.
        Parameters
        ----------
        X : array of shape [n_samples, n_features]
            The input samples.
        y : array of shape [n_samples]
            The target values.
        """
        return self.estimator_.score(self.transform(X), y)
    def _get_support_mask(self):
        return self.support_
    @if_delegate_has_method(delegate='estimator')
    def decision_function(self, X):
        return self.estimator_.decision_function(self.transform(X))
    @if_delegate_has_method(delegate='estimator')
    def predict_proba(self, X):
        return self.estimator_.predict_proba(self.transform(X))
    @if_delegate_has_method(delegate='estimator')
    def predict_log_proba(self, X):
        return self.estimator_.predict_log_proba(self.transform(X))
class RFECV(RFE, MetaEstimatorMixin):
    """Feature ranking with recursive feature elimination and cross-validated
    selection of the best number of features.
    Read more in the :ref:`User Guide <rfe>`.
    Parameters
    ----------
    estimator : object
        A supervised learning estimator with a `fit` method that updates a
        `coef_` attribute that holds the fitted parameters. Important features
        must correspond to high absolute values in the `coef_` array.
        For instance, this is the case for most supervised learning
        algorithms such as Support Vector Classifiers and Generalized
        Linear Models from the `svm` and `linear_model` modules.
    step : int or float, optional (default=1)
        If greater than or equal to 1, then `step` corresponds to the (integer)
        number of features to remove at each iteration.
        If within (0.0, 1.0), then `step` corresponds to the percentage
        (rounded down) of features to remove at each iteration.
    cv : int, cross-validation generator or an iterable, optional
        Determines the cross-validation splitting strategy.
        Possible inputs for cv are:
        - None, to use the default 3-fold cross-validation,
        - integer, to specify the number of folds.
        - An object to be used as a cross-validation generator.
        - An iterable yielding train/test splits.
        For integer/None inputs, if ``y`` is binary or multiclass,
        :class:`StratifiedKFold` used. If the estimator is a classifier
        or if ``y`` is neither binary nor multiclass, :class:`KFold` is used.
        Refer :ref:`User Guide <cross_validation>` for the various
        cross-validation strategies that can be used here.
    scoring : string, callable or None, optional, default: None
        A string (see model evaluation documentation) or
        a scorer callable object / function with signature
        ``scorer(estimator, X, y)``.
    estimator_params : dict
        Parameters for the external estimator.
        This attribute is deprecated as of version 0.16 and will be removed in
        0.18. Use estimator initialisation or set_params method instead.
    verbose : int, default=0
        Controls verbosity of output.
    Attributes
    ----------
    n_features_ : int
        The number of selected features with cross-validation.
    support_ : array of shape [n_features]
        The mask of selected features.
    ranking_ : array of shape [n_features]
        The feature ranking, such that `ranking_[i]`
        corresponds to the ranking
        position of the i-th feature.
        Selected (i.e., estimated best)
        features are assigned rank 1.
    grid_scores_ : array of shape [n_subsets_of_features]
        The cross-validation scores such that
        ``grid_scores_[i]`` corresponds to
        the CV score of the i-th subset of features.
    estimator_ : object
        The external estimator fit on the reduced dataset.
    Notes
    -----
    The size of ``grid_scores_`` is equal to ceil((n_features - 1) / step) + 1,
    where step is the number of features removed at each iteration.
    Examples
    --------
    The following example shows how to retrieve the a-priori not known 5
    informative features in the Friedman #1 dataset.
    >>> from sklearn.datasets import make_friedman1
    >>> from sklearn.feature_selection import RFECV
    >>> from sklearn.svm import SVR
    >>> X, y = make_friedman1(n_samples=50, n_features=10, random_state=0)
    >>> estimator = SVR(kernel="linear")
    >>> selector = RFECV(estimator, step=1, cv=5)
    >>> selector = selector.fit(X, y)
    >>> selector.support_ # doctest: +NORMALIZE_WHITESPACE
    array([ True,  True,  True,  True,  True,
            False, False, False, False, False], dtype=bool)
    >>> selector.ranking_
    array([1, 1, 1, 1, 1, 6, 4, 3, 2, 5])
    References
    ----------
    .. [1] Guyon, I., Weston, J., Barnhill, S., & Vapnik, V., "Gene selection
           for cancer classification using support vector machines",
           Mach. Learn., 46(1-3), 389--422, 2002.
    """
    def __init__(self, estimator, step=1, cv=None, scoring=None,
                 estimator_params=None, verbose=0):
        self.estimator = estimator
        self.step = step
        self.cv = cv
        self.scoring = scoring
        self.estimator_params = estimator_params
        self.verbose = verbose
    def fit(self, X, y):
        """Fit the RFE model and automatically tune the number of selected
           features.
        Parameters
        ----------
        X : {array-like, sparse matrix}, shape = [n_samples, n_features]
            Training vector, where `n_samples` is the number of samples and
            `n_features` is the total number of features.
        y : array-like, shape = [n_samples]
            Target values (integers for classification, real numbers for
            regression).
        """
        X, y = check_X_y(X, y, "csr")
        if self.estimator_params is not None:
            warnings.warn("The parameter 'estimator_params' is deprecated as "
                          "of version 0.16 and will be removed in 0.18. "
                          "The parameter is no longer necessary because the "
                          "value is set via the estimator initialisation or "
                          "set_params method.", DeprecationWarning)
        # Initialization
        cv = check_cv(self.cv, X, y, is_classifier(self.estimator))
        scorer = check_scoring(self.estimator, scoring=self.scoring)
        n_features = X.shape[1]
        n_features_to_select = 1
        # Determine the number of subsets of features
        scores = []
        # Cross-validation
        for n, (train, test) in enumerate(cv):
            X_train, y_train = _safe_split(self.estimator, X, y, train)
            X_test, y_test = _safe_split(self.estimator, X, y, test, train)
            rfe = RFE(estimator=self.estimator,
                      n_features_to_select=n_features_to_select,
                      step=self.step, estimator_params=self.estimator_params,
                      verbose=self.verbose - 1)
            rfe._fit(X_train, y_train, lambda estimator, features:
                     _score(estimator, X_test[:, features], y_test, scorer))
            scores.append(np.array(rfe.scores_[::-1]).reshape(1, -1))
        scores = np.sum(np.concatenate(scores, 0), 0)
        # The index in 'scores' when 'n_features' features are selected
        n_feature_index = np.ceil((n_features - n_features_to_select) /
                                  float(self.step))
        n_features_to_select = max(n_features_to_select,
                                   n_features - ((n_feature_index -
                                                 np.argmax(scores)) *
                                                 self.step))
        # Re-execute an elimination with best_k over the whole set
        rfe = RFE(estimator=self.estimator,
                  n_features_to_select=n_features_to_select,
                  step=self.step, estimator_params=self.estimator_params)
        rfe.fit(X, y)
        # Set final attributes
        self.support_ = rfe.support_
        self.n_features_ = rfe.n_features_
        self.ranking_ = rfe.ranking_
        self.estimator_ = clone(self.estimator)
        if self.estimator_params:
            self.estimator_.set_params(**self.estimator_params)
        self.estimator_.fit(self.transform(X), y)
        # Fixing a normalization error, n is equal to len(cv) - 1
        # here, the scores are normalized by len(cv)
        self.grid_scores_ = scores / len(cv)
        return self
 | 
	mit | 
| 
	AlexBryner/SalesforceTools | 
	SalesforceScripts.py | 
	1 | 
	12737 | 
	# coding: utf-8 
import numpy as np
import pandas as pd
import time
from datetime import datetime, timedelta, date
from time import sleep, gmtime, strftime
from pandas import DataFrame, Series, read_csv
from salesforce_bulk_api import SalesforceBulkJob
from SalesforceBulkQuery import *
from simple_salesforce import *
###################################################################################################
# Salesforce Credentials
# Creates SimpleSalesforce Login Instance
sf = Salesforce(username='', password='', security_token='', sandbox=, client_id='')
###################################################################################################
def getBlankDF():
    return pd.DataFrame(np.nan, index=[], columns=[])
def NameCaseAsTitles(x):
    if (str(x).isupper() or str(x).islower()) and '@' not in str(x):
        return str(x).title()
    else:
        return x
    
def getDate(days):
    return(datetime.today() - timedelta(days=days)).strftime('%Y-%m-%dT00:00:00z') # YYYY-MM-DDThh:mm:ssz
def SFNulls(df, FillWith='#N/A'):
    """
        Description: Fills 0's and NAN's with "#N/A" which is the value that the Salesforce Bulk API recognizes as Null.
        Parameters:
            df = Pandas.DataFrame
        Recognizes 'float64', 'int64', and 'int32' data types.
    """
    df.apply(lambda s: pd.to_numeric(s, errors='ignore'))
    NumCol = df.columns.values.tolist()
    for col in NumCol:
        df[col] = df[col].replace(0, np.NAN).fillna('%s' % FillWith)
def SFQuery(SOQL: str, InList=None, LowerHeaders=True, CheckParentChild=True, KeepAttributes=False):
    """
        Description: Queries Salesforce returning all results in a pandas dataframe.  This also sets all possible data types to numbers and sets column headers to lower case. If using InList, this functionality is built with pandas dataframe columns in mind to help simplify filtering from other SOQL results.
        Parameters:
            SOQL = Salesforce SOQL Statement
            InList* = List of items for an "IN" filter. Apex SOQL - "SELECT Id, Name FROM Account Where Id IN :ids"
                SOQL parameter must be written out to the point where the : would be set in a SOQL query in Apex.
                EX: SFQuery("SELECT Id, Name From Contact WHERE FirstName = 'Alex' and Id IN", IdsList)
                InList format - ['id1', 'id2', 'id2', 'id3', 'id3', 'id4', 'id5'] becomes ('id1', 'id2', 'id3', 'id4', 'id5')
                I usually use this with a dataframe column.  
                ex: "SFQuery("Select Id, Name From Contact Where Id In", InList=list(your_dataframe['column_name']))
            LowerHeader = Returns Dataframe with column headers lowercase, defaulted true for previous projects
            CheckParentChild = This checks for the relationships by looking for the ordered dictionaries returned by Salesforce.  It loops through to ensure reached the end of the line if stepping through multiple parent relationships.  Turn off if queries need to run slighly faster.
            
            InList* - This is not an efficent use of api calls.  There are limitations to the length of the queries so this is capped out at a default of 300 elements.  Nested Select statements in the where clause is a more efficent use for api calls but there are always tradeoffs.  At some point it would make more sense to utilize tuples, but unfortunately salesforce did not like the format with the last comma.
    """
    def basicSOQL(SOQLstr : str):
        # formats the Salesforce ordered dictionary into a pandas dataframe
        try:
            od = sf.query_all("%s" % SOQLstr)
            items = {val: dict(od['records'][val]) for val in range(len(od['records'])) } 
            res = DataFrame.from_dict(items, orient='index')
            if LowerHeaders == True:
                res.columns = map(str.lower, res.columns)
            return res.apply(lambda s: pd.to_numeric(s, errors='ignore'))
        except ValueError:
            pass
    def CreateFilterStr(ListToStr):
        # creates a string from a list 
        # ['id1', 'id2', 'id3', 'id4', 'id5'] -> ('id1', 'id2', 'id3', 'id4', 'id5')
        resStr = "("
        r = 0
        for rl in ListToStr:
            if rl is not None:
                if r == 0:
                    resStr += "'"+str(rl)+"'"
                    r = 1
                elif r == 1:
                    resStr += ",'"+str(rl)+"'"
        resStr += ")"
        return resStr
    def BatchQueryList(toBatchList):
        # filters the list of duplicates then batches the lists in groups
        # [('id1', 'id2', 'id3', id4', 'id5'),('id6', 'id7', 'id8', 'id9', 'id10')]
        batchSize = 300
        newList = list(set(toBatchList))
        listSize = len(newList)
        startPoint = 0
        endPoint = batchSize
        res = []
        while startPoint < listSize:
            tempStr = CreateFilterStr(newList[startPoint:endPoint])
            res.append([tempStr])
            startPoint = endPoint
            endPoint += batchSize
        return res
    def InListQuery(SOQL, InList):
        # runs a query for each list from the batched lists and stacks the results
        filterLists = BatchQueryList(InList)
        resDF = None
        i = 0
        for i in range(0,len(filterLists)):
            tempDF = basicSOQL(SOQLstr = "%s %s" % (SOQL, filterLists[i][0]))
            try: resDF = resDF.append(tempDF, ignore_index=True)
            except AttributeError: resDF = tempDF
            i += 1
        return resDF
    def getChildRecords(obj, row):
        if row == None:
            return None
        size = row.get('totalSize')
        records = row.get('records')
        tempDic = {}
        for i in range(0,size):
            tempDic[i] = {}
            for field in records[i].keys():
                try:
                    records[i].get(field).keys()
                    continue
                except AttributeError:
                    pass
                tempDic[i][obj + '.' + field] = records[i].get(field)
        return tempDic
    def getParentRecords(field, row):
        if row == None:
            return None
        else:
            return row.get(field)
    
    rs = None
    if InList == None:
        rs = basicSOQL(SOQL)
    else:
        InList = list(InList)
        rs = InListQuery(SOQL, InList)
    
    # Drops the attributes column passed through by Salesforce
    if CheckParentChild == False and KeepAttributes == False:
        rs = rs.drop(['attributes'], axis=1)
        
    while CheckParentChild:
        CheckParentChild = False
        indexCols = []
        for col in rs:
            obj = None
            relationship = None
            for i in range(len(rs[col])):
                # scans down each column until finding an ordered dict to parse
                if rs[col][i] == None:
                    continue
                try:
                    if rs[col][i].get('type') != None and col == 'attributes':
                        if KeepAttributes == False:
                            rs = rs.drop([col], axis=1)
                        break
                except AttributeError:
                    indexCols.append(col) # will use this later for creating a multi indexed dataframe
                    break
                # Determines whether parent or child query and the object type
                try:
                    obj = rs[col][i].get('attributes').get('type')
                    relationship = 'Parent'
                except:
                    pass
                try:
                    obj = rs[col][i].get('records')[0].get('attributes').get('type')
                    relationship = 'Child'
                except:
                    pass
                break
                
            if relationship == 'Child' and obj != None:
                rs[col] = rs.apply(lambda row: getChildRecords(obj, row[col]), axis=1)
            elif relationship == 'Parent' and obj != None:
                fields = []
                for i in range(len(rs[col])):
                    if rs[col][i] != None:
                        fields.extend(list(rs[col][i].keys()))
                        fields = list(set(fields))
                        
                if KeepAttributes == False:
                    try:
                        fields.remove('attributes')
                    except ValueError:
                        pass
                for field in fields:
                    rs[obj + '.' + field] = rs.apply(lambda row: getParentRecords(field, row[col]), axis=1)
                rs = rs.drop([col], axis=1)
                CheckParentChild = True
        # next I'd like to setup an option for child relationship queries to return a multi indexed dataframe
        # print(indexCols)
    return rs
        
           
def SFFormat(df, SObject, EnforceNulls=False):
    """
        Description: Looks up data types and dynamically formats columns to a correct format for the Bulk Api. Returns error messages for invalid data types or column headers.  If EnforceNulls is true fills all blanks with #N/A, if false will set blanks to ''.
        Parameters:
            df = Pandas.DataFrame
            SObject = Type of object for the upload. Ex: 'Account'
            EnforceNulls = If true will fill blanks with #N/A to set as null in Salesforce
            
        *Currently only formats dates and datetimes
    """
    NoFieldError = ''
    InvalidDataError = ''
    
    df.columns = map(str.lower, df.columns)
    fieldDict = getattr(sf, '%s' % SObject).describe()["fields"]
    numFields = len(fieldDict)
    
    NumCol = df.columns.values.tolist()
    for col in NumCol:
        i = 0
        for x in fieldDict:
            if x['name'].lower() == col:
                dtype = x['type']
                length = x['length']
                try:
                    if dtype == 'date':
                        df[col] = pd.to_datetime(df[col]).dt.strftime('%Y-%m-%d').replace(to_replace='NaT', value='#N/A') 
                    elif dtype == 'datetime':
                        df[col] = pd.to_datetime(df[col]).dt.strftime('%Y-%m-%dT%H:%M:%S').replace(to_replace='NaT', value='#N/A')
                except ValueError: 
                    InvalidDataError += ("Invalid "+dtype+" : "+col+"\n")
                break
            i += 1
            if i >= numFields:
                NoFieldError += (SObject+" does not contain : "+col+"\n")
                
    SFNulls(df)
    if EnforceNulls == False:
        for col in NumCol:
            df[col] = df[col].replace('#N/A','')
    errors = NoFieldError+InvalidDataError
    if len(errors) > 0:
        return(errors)
    else:
        return('No Errors')
    
def SFUpload(df, UploadType, Sobject, batchSize=49995, hangtime=0):
    """
        Description: Upload a pandas dataframe through the Salesforce Bulk API in batches of 50k. Can run either an insert or update to the listed Sobject.  Sobject and UploadType must be listed as a string. ex: 'Update', 'Account'  
        Parameters:
            df         = Pandas.DataFrame
            UploadType = Update or Insert
            Sobject    = Salesforce object in the upload. Ex - Accounts, Contact
            batchSize  = Number of rows that the upload will run before submitting the next group of rows in the dataset. Defaults to 49,995 (5 batches of 9999)
            hangtime   = Number of seconds to wait before uploading a new batch. Defaults to 0.
    """
    if len(df) == 0:
        return
    
    startRow = 0
    endRow = batchSize
    while startRow < len(df):
        upload = df[startRow:endRow]
        Headers = upload.columns.tolist()
        Data = upload.to_records(index=False)
        job = SalesforceBulkJob(UploadType, Sobject, salesforce=sf)
        job.upload(Headers,Data)
        startRow = endRow
        endRow = startRow + batchSize
        time.sleep(hangtime)
    
    
def SFBulkQuery(SObject, SOQL):
    """
        Description: Runs a query through the bulk api.  Creates, Tracks, and Closes the Request and returns the results as a Pandas Dataframe.  Currently there are lots of slighly obnoxious messages to help with tracking the current status.
        Parameters:
            SObject = Salesforce Object, ex: Account, Contact
            SOQL    = Salesforce SOQL Statement for bulk query
    """
    sfbulk = SalesforceBulk(sessionId=sf.session_id, host=sf.sf_instance)
    job = sfbulk.create_query_job(SObject, contentType='CSV')
    batch = sfbulk.query(job, SOQL)
    while not sfbulk.is_batch_done(job, batch):
        time.sleep(10)
    sfbulk.close_job(job)
    res = sfbulk.get_batch_result_iter(job, batch)
    return res
 | 
	mit | 
| 
	Eigenstate/msmbuilder | 
	msmbuilder/commands/implied_timescales.py | 
	12 | 
	5214 | 
	# Author: Robert McGibbon <[email protected]>
# Contributors:
# Copyright (c) 2014, Stanford University
# All rights reserved.
"""Scan the implied timescales of MarkovStateModels with respect to lag time.
This command will build a series of MarkovStateModels at different lag times,
and save a file to disk containing the relaxation timescales of each of the
models.
A plot of these data can then be used to choose the lag time [1].
References
----------
.. [1] Beauchamp, Kyle A., et al. "MSMBuilder2: modeling conformational
   dynamics on the picosecond to millisecond scale." J. Chem. Theory.
   Comput. 7.10 (2011): 3412-3419.
"""
# -----------------------------------------------------------------------------
# Imports
# -----------------------------------------------------------------------------
from __future__ import print_function, division, absolute_import
from os.path import splitext
import sys
import json
import pandas as pd
from ..dataset import dataset
from ..cmdline import Command, argument, argument_group, rangetype, FlagAction
from ..msm import MarkovStateModel, implied_timescales
class ImpliedTimescales(Command):
    _group = 'MSM'
    _concrete = True
    description = __doc__
    lag_times = argument('-l', '--lag_times', default='1:10', help='''Range
        of lag times. Specify as 'start:stop' or 'start:stop:step. The
        endpoints are inclusive.''', type=rangetype)
    inp = argument(
        '-i', '--inp', help='''Path to input dataset, a collection of 1D
        integer sequences (such as the output from clustering)''',
        required=True)
    out = argument('--out', help='''Output file''',
        default='timescales.csv')
    fmt = argument('--fmt', help='Output file format', default='csv',
        choices=('csv', 'json', 'excel'))
    _extensions = {'csv': '.csv', 'json': '.json', 'excel': '.xlsx'}
    n_jobs = argument('--n_jobs', help='Number of parallel processes',
        default=1, type=int)
    p = argument_group('MSM parameters')
    n_timescales = p.add_argument('--n_timescales', default=10, help='''
        The number of dynamical timescales to calculate when diagonalizing
        the transition matrix.''',  type=int)
    reversible_type = p.add_argument('--reversible_type', help='''
        Method by which the reversibility of the transition matrix
        is enforced. 'mle' uses a maximum likelihood method that is
        solved by numerical optimization, and 'transpose'
        uses a more restrictive (but less computationally complex)
        direct symmetrization of the expected number of counts.''',
        choices=('mle', 'transpose'), default='mle')
    ergodic_cutoff = p.add_argument('--ergodic_cutoff', default=1, help='''
        Only the maximal strongly ergodic subgraph of the data is used to build
        an MSM. Ergodicity is determined by ensuring that each state is
        accessible from each other state via one or more paths involving edges
        with a number of observed directed counts greater than or equal to
        ``ergodic_cutoff``. Not that by setting ``ergodic_cutoff`` to 0, this
        trimming is effectively turned off.''',  type=int)
    prior_counts = p.add_argument('--prior_counts', help='''Add a number
        of "pseudo counts" to each entry in the counts matrix. When
        prior_counts == 0 (default), the assigned transition probability
        between two states with no observed transitions will be zero, whereas
        when prior_counts > 0, even this unobserved transitions will be
        given nonzero probability.''', type=float, default=0)
    verbose = p.add_argument('--verbose', default=True,
        help='Enable verbose printout', action=FlagAction)
    def __init__(self, args):
        self.args = args
    def start(self):
        kwargs = {
            'n_timescales': self.args.n_timescales,
            'reversible_type': self.args.reversible_type,
            'ergodic_cutoff': self.args.ergodic_cutoff,
            'prior_counts': self.args.prior_counts,
            'verbose': self.args.verbose,
        }
        with dataset(self.args.inp, mode='r') as ds:
            model = MarkovStateModel(**kwargs)
            lines = implied_timescales(
                ds, lag_times=self.args.lag_times,
                n_timescales=self.args.n_timescales,
                msm=model,
                n_jobs=self.args.n_jobs,
                verbose=self.args.verbose)
        cols = ['Timescale %d' % (d+1) for d in range(len(lines[0]))]
        df = pd.DataFrame(data=lines, columns=cols)
        df['Lag Time'] = self.args.lag_times
        df = df.reindex_axis(sorted(df.columns), axis=1)
        self.write_output(df)
    def write_output(self, df):
        outfile = splitext(self.args.out)[0] + self._extensions[self.args.fmt]
        print('Writing %s' % outfile)
        if self.args.fmt == 'csv':
            df.to_csv(outfile)
        elif self.args.fmt == 'json':
            with open(outfile, 'w') as f:
                json.dump(df.to_dict(orient='records'), f)
        elif self.args.fmt == 'excel':
            df.to_excel(outfile)
        else:
            raise RuntimeError('unknown fmt: %s' % fmt)
        print('All done!')
 | 
	lgpl-2.1 | 
| 
	deepmind/grid-cells | 
	utils.py | 
	1 | 
	5720 | 
	# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Helper functions for creating the training graph and plotting.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from matplotlib.backends.backend_pdf import PdfPages
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
import ensembles  # pylint: disable=g-bad-import-order
np.seterr(invalid="ignore")
def get_place_cell_ensembles(
    env_size, neurons_seed, targets_type, lstm_init_type, n_pc, pc_scale):
  """Create the ensembles for the Place cells."""
  place_cell_ensembles = [
      ensembles.PlaceCellEnsemble(
          n,
          stdev=s,
          pos_min=-env_size / 2.0,
          pos_max=env_size / 2.0,
          seed=neurons_seed,
          soft_targets=targets_type,
          soft_init=lstm_init_type)
      for n, s in zip(n_pc, pc_scale)
  ]
  return place_cell_ensembles
def get_head_direction_ensembles(
    neurons_seed, targets_type, lstm_init_type, n_hdc, hdc_concentration):
  """Create the ensembles for the Head direction cells."""
  head_direction_ensembles = [
      ensembles.HeadDirectionCellEnsemble(
          n,
          concentration=con,
          seed=neurons_seed,
          soft_targets=targets_type,
          soft_init=lstm_init_type)
      for n, con in zip(n_hdc, hdc_concentration)
  ]
  return head_direction_ensembles
def encode_initial_conditions(init_pos, init_hd, place_cell_ensembles,
                              head_direction_ensembles):
  initial_conds = []
  for ens in place_cell_ensembles:
    initial_conds.append(
        tf.squeeze(ens.get_init(init_pos[:, tf.newaxis, :]), axis=1))
  for ens in head_direction_ensembles:
    initial_conds.append(
        tf.squeeze(ens.get_init(init_hd[:, tf.newaxis, :]), axis=1))
  return initial_conds
def encode_targets(target_pos, target_hd, place_cell_ensembles,
                   head_direction_ensembles):
  ensembles_targets = []
  for ens in place_cell_ensembles:
    ensembles_targets.append(ens.get_targets(target_pos))
  for ens in head_direction_ensembles:
    ensembles_targets.append(ens.get_targets(target_hd))
  return ensembles_targets
def clip_all_gradients(g, var, limit):
  # print(var.name)
  return (tf.clip_by_value(g, -limit, limit), var)
def clip_bottleneck_gradient(g, var, limit):
  if ("bottleneck" in var.name or "pc_logits" in var.name):
    return (tf.clip_by_value(g, -limit, limit), var)
  else:
    return (g, var)
def no_clipping(g, var):
  return (g, var)
def concat_dict(acc, new_data):
  """Dictionary concatenation function."""
  def to_array(kk):
    if isinstance(kk, np.ndarray):
      return kk
    else:
      return np.asarray([kk])
  for k, v in new_data.iteritems():
    if isinstance(v, dict):
      if k in acc:
        acc[k] = concat_dict(acc[k], v)
      else:
        acc[k] = concat_dict(dict(), v)
    else:
      v = to_array(v)
      if k in acc:
        acc[k] = np.concatenate([acc[k], v])
      else:
        acc[k] = np.copy(v)
  return acc
def get_scores_and_plot(scorer,
                        data_abs_xy,
                        activations,
                        directory,
                        filename,
                        plot_graphs=True,  # pylint: disable=unused-argument
                        nbins=20,  # pylint: disable=unused-argument
                        cm="jet",
                        sort_by_score_60=True):
  """Plotting function."""
  # Concatenate all trajectories
  xy = data_abs_xy.reshape(-1, data_abs_xy.shape[-1])
  act = activations.reshape(-1, activations.shape[-1])
  n_units = act.shape[1]
  # Get the rate-map for each unit
  s = [
      scorer.calculate_ratemap(xy[:, 0], xy[:, 1], act[:, i])
      for i in xrange(n_units)
  ]
  # Get the scores
  score_60, score_90, max_60_mask, max_90_mask, sac = zip(
      *[scorer.get_scores(rate_map) for rate_map in s])
  # Separations
  # separations = map(np.mean, max_60_mask)
  # Sort by score if desired
  if sort_by_score_60:
    ordering = np.argsort(-np.array(score_60))
  else:
    ordering = range(n_units)
  # Plot
  cols = 16
  rows = int(np.ceil(n_units / cols))
  fig = plt.figure(figsize=(24, rows * 4))
  for i in xrange(n_units):
    rf = plt.subplot(rows * 2, cols, i + 1)
    acr = plt.subplot(rows * 2, cols, n_units + i + 1)
    if i < n_units:
      index = ordering[i]
      title = "%d (%.2f)" % (index, score_60[index])
      # Plot the activation maps
      scorer.plot_ratemap(s[index], ax=rf, title=title, cmap=cm)
      # Plot the autocorrelation of the activation maps
      scorer.plot_sac(
          sac[index],
          mask_params=max_60_mask[index],
          ax=acr,
          title=title,
          cmap=cm)
  # Save
  if not os.path.exists(directory):
    os.makedirs(directory)
  with PdfPages(os.path.join(directory, filename), "w") as f:
    plt.savefig(f, format="pdf")
  plt.close(fig)
  return (np.asarray(score_60), np.asarray(score_90),
          np.asarray(map(np.mean, max_60_mask)),
          np.asarray(map(np.mean, max_90_mask)))
 | 
	apache-2.0 | 
| 
	aayushidwivedi01/spark-tk | 
	regression-tests/sparktkregtests/testcases/frames/lda_groupby_flow_test.py | 
	11 | 
	3240 | 
	# vim: set encoding=utf-8
#  Copyright (c) 2016 Intel Corporation 
#
#  Licensed under the Apache License, Version 2.0 (the "License");
#  you may not use this file except in compliance with the License.
#  You may obtain a copy of the License at
#
#       http://www.apache.org/licenses/LICENSE-2.0
#
#  Unless required by applicable law or agreed to in writing, software
#  distributed under the License is distributed on an "AS IS" BASIS,
#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#  See the License for the specific language governing permissions and
#  limitations under the License.
#
"""Sample LDA/Groupby example"""
import unittest
from sparktkregtests.lib import sparktk_test
import numpy
class LDAExample(sparktk_test.SparkTKTestCase):
    def test_lda_example(self):
        """LDA demo from examples directory"""
        # this is a full worked example of lda and groupby
        # with known correct values
        data = [['nytimes', 'harry', 3], ['nytimes', 'economy', 35], ['nytimes', 'jobs', 40], ['nytimes', 'magic', 1],
                ['nytimes', 'realestate', 15], ['nytimes', 'movies', 6], ['economist', 'economy', 50],
                ['economist', 'jobs', 35], ['economist', 'realestate', 20], ['economist', 'movies', 1],
                ['economist', 'harry', 1], ['economist', 'magic', 1], ['harrypotter', 'harry', 40],
                ['harrypotter', 'magic', 30], ['harrypotter', 'chamber', 20], ['harrypotter', 'secrets', 30]]
        frame = self.context.frame.create(
            data,
            schema=[('doc_id', str),
                    ('word_id', str),
                    ('word_count', long)])
        model = self.context.models.clustering.lda.train(
                frame, "doc_id", "word_id", "word_count", max_iterations=3, num_topics=2)
        doc_results = model.topics_given_doc_frame
        word_results = model.word_given_topics_frame
        doc_results.rename_columns({'topic_probabilities': 'lda_results_doc'})
        word_results.rename_columns(
            {'topic_probabilities': 'lda_results_word'})
        frame = frame.join_left(
            doc_results, left_on="doc_id", right_on="doc_id")
        frame = frame.join_left(
            word_results, left_on="word_id", right_on="word_id")
        # similar to calling predict on a model
        frame.dot_product(
            ['lda_results_doc'], ['lda_results_word'], 'lda_score')
        word_hist = frame.histogram('word_count', 4)
        lda_hist = frame.histogram('lda_score', 2)
        group_frame = frame.group_by(
            'word_id_L',
            {'word_count': self.context.agg.histogram(
                cutoffs=word_hist.cutoffs,
                include_lowest=True,
                strict_binning=False),
             'lda_score': self.context.agg.histogram(lda_hist.cutoffs)})
        pandas = group_frame.to_pandas()
        for (index, row) in pandas.iterrows():
            if str(row["word_id_L"]) == "magic":
                numpy.testing.assert_equal(list(row["word_count_HISTOGRAM"]), [float(2.0/3.0), 0, float(1.0/3.0), 0])
if __name__ == "__main__":
    unittest.main()
 | 
	apache-2.0 | 
| 
	mattgiguere/scikit-learn | 
	sklearn/utils/arpack.py | 
	265 | 
	64837 | 
	"""
This contains a copy of the future version of
scipy.sparse.linalg.eigen.arpack.eigsh
It's an upgraded wrapper of the ARPACK library which
allows the use of shift-invert mode for symmetric matrices.
Find a few eigenvectors and eigenvalues of a matrix.
Uses ARPACK: http://www.caam.rice.edu/software/ARPACK/
"""
# Wrapper implementation notes
#
# ARPACK Entry Points
# -------------------
# The entry points to ARPACK are
# - (s,d)seupd : single and double precision symmetric matrix
# - (s,d,c,z)neupd: single,double,complex,double complex general matrix
# This wrapper puts the *neupd (general matrix) interfaces in eigs()
# and the *seupd (symmetric matrix) in eigsh().
# There is no Hermetian complex/double complex interface.
# To find eigenvalues of a Hermetian matrix you
# must use eigs() and not eigsh()
# It might be desirable to handle the Hermetian case differently
# and, for example, return real eigenvalues.
# Number of eigenvalues returned and complex eigenvalues
# ------------------------------------------------------
# The ARPACK nonsymmetric real and double interface (s,d)naupd return
# eigenvalues and eigenvectors in real (float,double) arrays.
# Since the eigenvalues and eigenvectors are, in general, complex
# ARPACK puts the real and imaginary parts in consecutive entries
# in real-valued arrays.   This wrapper puts the real entries
# into complex data types and attempts to return the requested eigenvalues
# and eigenvectors.
# Solver modes
# ------------
# ARPACK and handle shifted and shift-inverse computations
# for eigenvalues by providing a shift (sigma) and a solver.
__docformat__ = "restructuredtext en"
__all__ = ['eigs', 'eigsh', 'svds', 'ArpackError', 'ArpackNoConvergence']
import warnings
from scipy.sparse.linalg.eigen.arpack import _arpack
import numpy as np
from scipy.sparse.linalg.interface import aslinearoperator, LinearOperator
from scipy.sparse import identity, isspmatrix, isspmatrix_csr
from scipy.linalg import lu_factor, lu_solve
from scipy.sparse.sputils import isdense
from scipy.sparse.linalg import gmres, splu
import scipy
from distutils.version import LooseVersion
_type_conv = {'f': 's', 'd': 'd', 'F': 'c', 'D': 'z'}
_ndigits = {'f': 5, 'd': 12, 'F': 5, 'D': 12}
DNAUPD_ERRORS = {
    0: "Normal exit.",
    1: "Maximum number of iterations taken. "
       "All possible eigenvalues of OP has been found. IPARAM(5) "
       "returns the number of wanted converged Ritz values.",
    2: "No longer an informational error. Deprecated starting "
       "with release 2 of ARPACK.",
    3: "No shifts could be applied during a cycle of the "
       "Implicitly restarted Arnoldi iteration. One possibility "
       "is to increase the size of NCV relative to NEV. ",
    -1: "N must be positive.",
    -2: "NEV must be positive.",
    -3: "NCV-NEV >= 2 and less than or equal to N.",
    -4: "The maximum number of Arnoldi update iterations allowed "
        "must be greater than zero.",
    -5: " WHICH must be one of 'LM', 'SM', 'LR', 'SR', 'LI', 'SI'",
    -6: "BMAT must be one of 'I' or 'G'.",
    -7: "Length of private work array WORKL is not sufficient.",
    -8: "Error return from LAPACK eigenvalue calculation;",
    -9: "Starting vector is zero.",
    -10: "IPARAM(7) must be 1,2,3,4.",
    -11: "IPARAM(7) = 1 and BMAT = 'G' are incompatible.",
    -12: "IPARAM(1) must be equal to 0 or 1.",
    -13: "NEV and WHICH = 'BE' are incompatible.",
    -9999: "Could not build an Arnoldi factorization. "
           "IPARAM(5) returns the size of the current Arnoldi "
           "factorization. The user is advised to check that "
           "enough workspace and array storage has been allocated."
}
SNAUPD_ERRORS = DNAUPD_ERRORS
ZNAUPD_ERRORS = DNAUPD_ERRORS.copy()
ZNAUPD_ERRORS[-10] = "IPARAM(7) must be 1,2,3."
CNAUPD_ERRORS = ZNAUPD_ERRORS
DSAUPD_ERRORS = {
    0: "Normal exit.",
    1: "Maximum number of iterations taken. "
       "All possible eigenvalues of OP has been found.",
    2: "No longer an informational error. Deprecated starting with "
       "release 2 of ARPACK.",
    3: "No shifts could be applied during a cycle of the Implicitly "
       "restarted Arnoldi iteration. One possibility is to increase "
       "the size of NCV relative to NEV. ",
    -1: "N must be positive.",
    -2: "NEV must be positive.",
    -3: "NCV must be greater than NEV and less than or equal to N.",
    -4: "The maximum number of Arnoldi update iterations allowed "
        "must be greater than zero.",
    -5: "WHICH must be one of 'LM', 'SM', 'LA', 'SA' or 'BE'.",
    -6: "BMAT must be one of 'I' or 'G'.",
    -7: "Length of private work array WORKL is not sufficient.",
    -8: "Error return from trid. eigenvalue calculation; "
        "Informational error from LAPACK routine dsteqr .",
    -9: "Starting vector is zero.",
    -10: "IPARAM(7) must be 1,2,3,4,5.",
    -11: "IPARAM(7) = 1 and BMAT = 'G' are incompatible.",
    -12: "IPARAM(1) must be equal to 0 or 1.",
    -13: "NEV and WHICH = 'BE' are incompatible. ",
    -9999: "Could not build an Arnoldi factorization. "
           "IPARAM(5) returns the size of the current Arnoldi "
           "factorization. The user is advised to check that "
           "enough workspace and array storage has been allocated.",
}
SSAUPD_ERRORS = DSAUPD_ERRORS
DNEUPD_ERRORS = {
    0: "Normal exit.",
    1: "The Schur form computed by LAPACK routine dlahqr "
       "could not be reordered by LAPACK routine dtrsen. "
       "Re-enter subroutine dneupd  with IPARAM(5)NCV and "
       "increase the size of the arrays DR and DI to have "
       "dimension at least dimension NCV and allocate at least NCV "
       "columns for Z. NOTE: Not necessary if Z and V share "
       "the same space. Please notify the authors if this error "
       "occurs.",
    -1: "N must be positive.",
    -2: "NEV must be positive.",
    -3: "NCV-NEV >= 2 and less than or equal to N.",
    -5: "WHICH must be one of 'LM', 'SM', 'LR', 'SR', 'LI', 'SI'",
    -6: "BMAT must be one of 'I' or 'G'.",
    -7: "Length of private work WORKL array is not sufficient.",
    -8: "Error return from calculation of a real Schur form. "
        "Informational error from LAPACK routine dlahqr .",
    -9: "Error return from calculation of eigenvectors. "
        "Informational error from LAPACK routine dtrevc.",
    -10: "IPARAM(7) must be 1,2,3,4.",
    -11: "IPARAM(7) = 1 and BMAT = 'G' are incompatible.",
    -12: "HOWMNY = 'S' not yet implemented",
    -13: "HOWMNY must be one of 'A' or 'P' if RVEC = .true.",
    -14: "DNAUPD  did not find any eigenvalues to sufficient "
         "accuracy.",
    -15: "DNEUPD got a different count of the number of converged "
         "Ritz values than DNAUPD got.  This indicates the user "
         "probably made an error in passing data from DNAUPD to "
         "DNEUPD or that the data was modified before entering "
         "DNEUPD",
}
SNEUPD_ERRORS = DNEUPD_ERRORS.copy()
SNEUPD_ERRORS[1] = ("The Schur form computed by LAPACK routine slahqr "
                    "could not be reordered by LAPACK routine strsen . "
                    "Re-enter subroutine dneupd  with IPARAM(5)=NCV and "
                    "increase the size of the arrays DR and DI to have "
                    "dimension at least dimension NCV and allocate at least "
                    "NCV columns for Z. NOTE: Not necessary if Z and V share "
                    "the same space. Please notify the authors if this error "
                    "occurs.")
SNEUPD_ERRORS[-14] = ("SNAUPD did not find any eigenvalues to sufficient "
                      "accuracy.")
SNEUPD_ERRORS[-15] = ("SNEUPD got a different count of the number of "
                      "converged Ritz values than SNAUPD got.  This indicates "
                      "the user probably made an error in passing data from "
                      "SNAUPD to SNEUPD or that the data was modified before "
                      "entering SNEUPD")
ZNEUPD_ERRORS = {0: "Normal exit.",
                 1: "The Schur form computed by LAPACK routine csheqr "
                    "could not be reordered by LAPACK routine ztrsen. "
                    "Re-enter subroutine zneupd with IPARAM(5)=NCV and "
                    "increase the size of the array D to have "
                    "dimension at least dimension NCV and allocate at least "
                    "NCV columns for Z. NOTE: Not necessary if Z and V share "
                    "the same space. Please notify the authors if this error "
                    "occurs.",
                 -1: "N must be positive.",
                 -2: "NEV must be positive.",
                 -3: "NCV-NEV >= 1 and less than or equal to N.",
                 -5: "WHICH must be one of 'LM', 'SM', 'LR', 'SR', 'LI', 'SI'",
                 -6: "BMAT must be one of 'I' or 'G'.",
                 -7: "Length of private work WORKL array is not sufficient.",
                 -8: "Error return from LAPACK eigenvalue calculation. "
                     "This should never happened.",
                 -9: "Error return from calculation of eigenvectors. "
                     "Informational error from LAPACK routine ztrevc.",
                 -10: "IPARAM(7) must be 1,2,3",
                 -11: "IPARAM(7) = 1 and BMAT = 'G' are incompatible.",
                 -12: "HOWMNY = 'S' not yet implemented",
                 -13: "HOWMNY must be one of 'A' or 'P' if RVEC = .true.",
                 -14: "ZNAUPD did not find any eigenvalues to sufficient "
                      "accuracy.",
                 -15: "ZNEUPD got a different count of the number of "
                      "converged Ritz values than ZNAUPD got.  This "
                      "indicates the user probably made an error in passing "
                      "data from ZNAUPD to ZNEUPD or that the data was "
                      "modified before entering ZNEUPD"}
CNEUPD_ERRORS = ZNEUPD_ERRORS.copy()
CNEUPD_ERRORS[-14] = ("CNAUPD did not find any eigenvalues to sufficient "
                      "accuracy.")
CNEUPD_ERRORS[-15] = ("CNEUPD got a different count of the number of "
                      "converged Ritz values than CNAUPD got.  This indicates "
                      "the user probably made an error in passing data from "
                      "CNAUPD to CNEUPD or that the data was modified before "
                      "entering CNEUPD")
DSEUPD_ERRORS = {
    0: "Normal exit.",
    -1: "N must be positive.",
    -2: "NEV must be positive.",
    -3: "NCV must be greater than NEV and less than or equal to N.",
    -5: "WHICH must be one of 'LM', 'SM', 'LA', 'SA' or 'BE'.",
    -6: "BMAT must be one of 'I' or 'G'.",
    -7: "Length of private work WORKL array is not sufficient.",
    -8: ("Error return from trid. eigenvalue calculation; "
         "Information error from LAPACK routine dsteqr."),
    -9: "Starting vector is zero.",
    -10: "IPARAM(7) must be 1,2,3,4,5.",
    -11: "IPARAM(7) = 1 and BMAT = 'G' are incompatible.",
    -12: "NEV and WHICH = 'BE' are incompatible.",
    -14: "DSAUPD  did not find any eigenvalues to sufficient accuracy.",
    -15: "HOWMNY must be one of 'A' or 'S' if RVEC = .true.",
    -16: "HOWMNY = 'S' not yet implemented",
    -17: ("DSEUPD  got a different count of the number of converged "
          "Ritz values than DSAUPD  got.  This indicates the user "
          "probably made an error in passing data from DSAUPD  to "
          "DSEUPD  or that the data was modified before entering  "
          "DSEUPD.")
}
SSEUPD_ERRORS = DSEUPD_ERRORS.copy()
SSEUPD_ERRORS[-14] = ("SSAUPD  did not find any eigenvalues "
                      "to sufficient accuracy.")
SSEUPD_ERRORS[-17] = ("SSEUPD  got a different count of the number of "
                      "converged "
                      "Ritz values than SSAUPD  got.  This indicates the user "
                      "probably made an error in passing data from SSAUPD  to "
                      "SSEUPD  or that the data was modified before entering  "
                      "SSEUPD.")
_SAUPD_ERRORS = {'d': DSAUPD_ERRORS,
                 's': SSAUPD_ERRORS}
_NAUPD_ERRORS = {'d': DNAUPD_ERRORS,
                 's': SNAUPD_ERRORS,
                 'z': ZNAUPD_ERRORS,
                 'c': CNAUPD_ERRORS}
_SEUPD_ERRORS = {'d': DSEUPD_ERRORS,
                 's': SSEUPD_ERRORS}
_NEUPD_ERRORS = {'d': DNEUPD_ERRORS,
                 's': SNEUPD_ERRORS,
                 'z': ZNEUPD_ERRORS,
                 'c': CNEUPD_ERRORS}
# accepted values of parameter WHICH in _SEUPD
_SEUPD_WHICH = ['LM', 'SM', 'LA', 'SA', 'BE']
# accepted values of parameter WHICH in _NAUPD
_NEUPD_WHICH = ['LM', 'SM', 'LR', 'SR', 'LI', 'SI']
class ArpackError(RuntimeError):
    """
    ARPACK error
    """
    def __init__(self, info, infodict=_NAUPD_ERRORS):
        msg = infodict.get(info, "Unknown error")
        RuntimeError.__init__(self, "ARPACK error %d: %s" % (info, msg))
class ArpackNoConvergence(ArpackError):
    """
    ARPACK iteration did not converge
    Attributes
    ----------
    eigenvalues : ndarray
        Partial result. Converged eigenvalues.
    eigenvectors : ndarray
        Partial result. Converged eigenvectors.
    """
    def __init__(self, msg, eigenvalues, eigenvectors):
        ArpackError.__init__(self, -1, {-1: msg})
        self.eigenvalues = eigenvalues
        self.eigenvectors = eigenvectors
class _ArpackParams(object):
    def __init__(self, n, k, tp, mode=1, sigma=None,
                 ncv=None, v0=None, maxiter=None, which="LM", tol=0):
        if k <= 0:
            raise ValueError("k must be positive, k=%d" % k)
        if maxiter is None:
            maxiter = n * 10
        if maxiter <= 0:
            raise ValueError("maxiter must be positive, maxiter=%d" % maxiter)
        if tp not in 'fdFD':
            raise ValueError("matrix type must be 'f', 'd', 'F', or 'D'")
        if v0 is not None:
            # ARPACK overwrites its initial resid,  make a copy
            self.resid = np.array(v0, copy=True)
            info = 1
        else:
            self.resid = np.zeros(n, tp)
            info = 0
        if sigma is None:
            #sigma not used
            self.sigma = 0
        else:
            self.sigma = sigma
        if ncv is None:
            ncv = 2 * k + 1
        ncv = min(ncv, n)
        self.v = np.zeros((n, ncv), tp)  # holds Ritz vectors
        self.iparam = np.zeros(11, "int")
        # set solver mode and parameters
        ishfts = 1
        self.mode = mode
        self.iparam[0] = ishfts
        self.iparam[2] = maxiter
        self.iparam[3] = 1
        self.iparam[6] = mode
        self.n = n
        self.tol = tol
        self.k = k
        self.maxiter = maxiter
        self.ncv = ncv
        self.which = which
        self.tp = tp
        self.info = info
        self.converged = False
        self.ido = 0
    def _raise_no_convergence(self):
        msg = "No convergence (%d iterations, %d/%d eigenvectors converged)"
        k_ok = self.iparam[4]
        num_iter = self.iparam[2]
        try:
            ev, vec = self.extract(True)
        except ArpackError as err:
            msg = "%s [%s]" % (msg, err)
            ev = np.zeros((0,))
            vec = np.zeros((self.n, 0))
            k_ok = 0
        raise ArpackNoConvergence(msg % (num_iter, k_ok, self.k), ev, vec)
class _SymmetricArpackParams(_ArpackParams):
    def __init__(self, n, k, tp, matvec, mode=1, M_matvec=None,
                 Minv_matvec=None, sigma=None,
                 ncv=None, v0=None, maxiter=None, which="LM", tol=0):
        # The following modes are supported:
        #  mode = 1:
        #    Solve the standard eigenvalue problem:
        #      A*x = lambda*x :
        #       A - symmetric
        #    Arguments should be
        #       matvec      = left multiplication by A
        #       M_matvec    = None [not used]
        #       Minv_matvec = None [not used]
        #
        #  mode = 2:
        #    Solve the general eigenvalue problem:
        #      A*x = lambda*M*x
        #       A - symmetric
        #       M - symmetric positive definite
        #    Arguments should be
        #       matvec      = left multiplication by A
        #       M_matvec    = left multiplication by M
        #       Minv_matvec = left multiplication by M^-1
        #
        #  mode = 3:
        #    Solve the general eigenvalue problem in shift-invert mode:
        #      A*x = lambda*M*x
        #       A - symmetric
        #       M - symmetric positive semi-definite
        #    Arguments should be
        #       matvec      = None [not used]
        #       M_matvec    = left multiplication by M
        #                     or None, if M is the identity
        #       Minv_matvec = left multiplication by [A-sigma*M]^-1
        #
        #  mode = 4:
        #    Solve the general eigenvalue problem in Buckling mode:
        #      A*x = lambda*AG*x
        #       A  - symmetric positive semi-definite
        #       AG - symmetric indefinite
        #    Arguments should be
        #       matvec      = left multiplication by A
        #       M_matvec    = None [not used]
        #       Minv_matvec = left multiplication by [A-sigma*AG]^-1
        #
        #  mode = 5:
        #    Solve the general eigenvalue problem in Cayley-transformed mode:
        #      A*x = lambda*M*x
        #       A - symmetric
        #       M - symmetric positive semi-definite
        #    Arguments should be
        #       matvec      = left multiplication by A
        #       M_matvec    = left multiplication by M
        #                     or None, if M is the identity
        #       Minv_matvec = left multiplication by [A-sigma*M]^-1
        if mode == 1:
            if matvec is None:
                raise ValueError("matvec must be specified for mode=1")
            if M_matvec is not None:
                raise ValueError("M_matvec cannot be specified for mode=1")
            if Minv_matvec is not None:
                raise ValueError("Minv_matvec cannot be specified for mode=1")
            self.OP = matvec
            self.B = lambda x: x
            self.bmat = 'I'
        elif mode == 2:
            if matvec is None:
                raise ValueError("matvec must be specified for mode=2")
            if M_matvec is None:
                raise ValueError("M_matvec must be specified for mode=2")
            if Minv_matvec is None:
                raise ValueError("Minv_matvec must be specified for mode=2")
            self.OP = lambda x: Minv_matvec(matvec(x))
            self.OPa = Minv_matvec
            self.OPb = matvec
            self.B = M_matvec
            self.bmat = 'G'
        elif mode == 3:
            if matvec is not None:
                raise ValueError("matvec must not be specified for mode=3")
            if Minv_matvec is None:
                raise ValueError("Minv_matvec must be specified for mode=3")
            if M_matvec is None:
                self.OP = Minv_matvec
                self.OPa = Minv_matvec
                self.B = lambda x: x
                self.bmat = 'I'
            else:
                self.OP = lambda x: Minv_matvec(M_matvec(x))
                self.OPa = Minv_matvec
                self.B = M_matvec
                self.bmat = 'G'
        elif mode == 4:
            if matvec is None:
                raise ValueError("matvec must be specified for mode=4")
            if M_matvec is not None:
                raise ValueError("M_matvec must not be specified for mode=4")
            if Minv_matvec is None:
                raise ValueError("Minv_matvec must be specified for mode=4")
            self.OPa = Minv_matvec
            self.OP = lambda x: self.OPa(matvec(x))
            self.B = matvec
            self.bmat = 'G'
        elif mode == 5:
            if matvec is None:
                raise ValueError("matvec must be specified for mode=5")
            if Minv_matvec is None:
                raise ValueError("Minv_matvec must be specified for mode=5")
            self.OPa = Minv_matvec
            self.A_matvec = matvec
            if M_matvec is None:
                self.OP = lambda x: Minv_matvec(matvec(x) + sigma * x)
                self.B = lambda x: x
                self.bmat = 'I'
            else:
                self.OP = lambda x: Minv_matvec(matvec(x)
                                                + sigma * M_matvec(x))
                self.B = M_matvec
                self.bmat = 'G'
        else:
            raise ValueError("mode=%i not implemented" % mode)
        if which not in _SEUPD_WHICH:
            raise ValueError("which must be one of %s"
                             % ' '.join(_SEUPD_WHICH))
        if k >= n:
            raise ValueError("k must be less than rank(A), k=%d" % k)
        _ArpackParams.__init__(self, n, k, tp, mode, sigma,
                               ncv, v0, maxiter, which, tol)
        if self.ncv > n or self.ncv <= k:
            raise ValueError("ncv must be k<ncv<=n, ncv=%s" % self.ncv)
        self.workd = np.zeros(3 * n, self.tp)
        self.workl = np.zeros(self.ncv * (self.ncv + 8), self.tp)
        ltr = _type_conv[self.tp]
        if ltr not in ["s", "d"]:
            raise ValueError("Input matrix is not real-valued.")
        self._arpack_solver = _arpack.__dict__[ltr + 'saupd']
        self._arpack_extract = _arpack.__dict__[ltr + 'seupd']
        self.iterate_infodict = _SAUPD_ERRORS[ltr]
        self.extract_infodict = _SEUPD_ERRORS[ltr]
        self.ipntr = np.zeros(11, "int")
    def iterate(self):
        self.ido, self.resid, self.v, self.iparam, self.ipntr, self.info = \
            self._arpack_solver(self.ido, self.bmat, self.which, self.k,
                                self.tol, self.resid, self.v, self.iparam,
                                self.ipntr, self.workd, self.workl, self.info)
        xslice = slice(self.ipntr[0] - 1, self.ipntr[0] - 1 + self.n)
        yslice = slice(self.ipntr[1] - 1, self.ipntr[1] - 1 + self.n)
        if self.ido == -1:
            # initialization
            self.workd[yslice] = self.OP(self.workd[xslice])
        elif self.ido == 1:
            # compute y = Op*x
            if self.mode == 1:
                self.workd[yslice] = self.OP(self.workd[xslice])
            elif self.mode == 2:
                self.workd[xslice] = self.OPb(self.workd[xslice])
                self.workd[yslice] = self.OPa(self.workd[xslice])
            elif self.mode == 5:
                Bxslice = slice(self.ipntr[2] - 1, self.ipntr[2] - 1 + self.n)
                Ax = self.A_matvec(self.workd[xslice])
                self.workd[yslice] = self.OPa(Ax + (self.sigma *
                                                    self.workd[Bxslice]))
            else:
                Bxslice = slice(self.ipntr[2] - 1, self.ipntr[2] - 1 + self.n)
                self.workd[yslice] = self.OPa(self.workd[Bxslice])
        elif self.ido == 2:
            self.workd[yslice] = self.B(self.workd[xslice])
        elif self.ido == 3:
            raise ValueError("ARPACK requested user shifts.  Assure ISHIFT==0")
        else:
            self.converged = True
            if self.info == 0:
                pass
            elif self.info == 1:
                self._raise_no_convergence()
            else:
                raise ArpackError(self.info, infodict=self.iterate_infodict)
    def extract(self, return_eigenvectors):
        rvec = return_eigenvectors
        ierr = 0
        howmny = 'A'  # return all eigenvectors
        sselect = np.zeros(self.ncv, 'int')  # unused
        d, z, ierr = self._arpack_extract(rvec, howmny, sselect, self.sigma,
                                          self.bmat, self.which, self.k,
                                          self.tol, self.resid, self.v,
                                          self.iparam[0:7], self.ipntr,
                                          self.workd[0:2 * self.n],
                                          self.workl, ierr)
        if ierr != 0:
            raise ArpackError(ierr, infodict=self.extract_infodict)
        k_ok = self.iparam[4]
        d = d[:k_ok]
        z = z[:, :k_ok]
        if return_eigenvectors:
            return d, z
        else:
            return d
class _UnsymmetricArpackParams(_ArpackParams):
    def __init__(self, n, k, tp, matvec, mode=1, M_matvec=None,
                 Minv_matvec=None, sigma=None,
                 ncv=None, v0=None, maxiter=None, which="LM", tol=0):
        # The following modes are supported:
        #  mode = 1:
        #    Solve the standard eigenvalue problem:
        #      A*x = lambda*x
        #       A - square matrix
        #    Arguments should be
        #       matvec      = left multiplication by A
        #       M_matvec    = None [not used]
        #       Minv_matvec = None [not used]
        #
        #  mode = 2:
        #    Solve the generalized eigenvalue problem:
        #      A*x = lambda*M*x
        #       A - square matrix
        #       M - symmetric, positive semi-definite
        #    Arguments should be
        #       matvec      = left multiplication by A
        #       M_matvec    = left multiplication by M
        #       Minv_matvec = left multiplication by M^-1
        #
        #  mode = 3,4:
        #    Solve the general eigenvalue problem in shift-invert mode:
        #      A*x = lambda*M*x
        #       A - square matrix
        #       M - symmetric, positive semi-definite
        #    Arguments should be
        #       matvec      = None [not used]
        #       M_matvec    = left multiplication by M
        #                     or None, if M is the identity
        #       Minv_matvec = left multiplication by [A-sigma*M]^-1
        #    if A is real and mode==3, use the real part of Minv_matvec
        #    if A is real and mode==4, use the imag part of Minv_matvec
        #    if A is complex and mode==3,
        #       use real and imag parts of Minv_matvec
        if mode == 1:
            if matvec is None:
                raise ValueError("matvec must be specified for mode=1")
            if M_matvec is not None:
                raise ValueError("M_matvec cannot be specified for mode=1")
            if Minv_matvec is not None:
                raise ValueError("Minv_matvec cannot be specified for mode=1")
            self.OP = matvec
            self.B = lambda x: x
            self.bmat = 'I'
        elif mode == 2:
            if matvec is None:
                raise ValueError("matvec must be specified for mode=2")
            if M_matvec is None:
                raise ValueError("M_matvec must be specified for mode=2")
            if Minv_matvec is None:
                raise ValueError("Minv_matvec must be specified for mode=2")
            self.OP = lambda x: Minv_matvec(matvec(x))
            self.OPa = Minv_matvec
            self.OPb = matvec
            self.B = M_matvec
            self.bmat = 'G'
        elif mode in (3, 4):
            if matvec is None:
                raise ValueError("matvec must be specified "
                                 "for mode in (3,4)")
            if Minv_matvec is None:
                raise ValueError("Minv_matvec must be specified "
                                 "for mode in (3,4)")
            self.matvec = matvec
            if tp in 'DF':  # complex type
                if mode == 3:
                    self.OPa = Minv_matvec
                else:
                    raise ValueError("mode=4 invalid for complex A")
            else:  # real type
                if mode == 3:
                    self.OPa = lambda x: np.real(Minv_matvec(x))
                else:
                    self.OPa = lambda x: np.imag(Minv_matvec(x))
            if M_matvec is None:
                self.B = lambda x: x
                self.bmat = 'I'
                self.OP = self.OPa
            else:
                self.B = M_matvec
                self.bmat = 'G'
                self.OP = lambda x: self.OPa(M_matvec(x))
        else:
            raise ValueError("mode=%i not implemented" % mode)
        if which not in _NEUPD_WHICH:
            raise ValueError("Parameter which must be one of %s"
                             % ' '.join(_NEUPD_WHICH))
        if k >= n - 1:
            raise ValueError("k must be less than rank(A)-1, k=%d" % k)
        _ArpackParams.__init__(self, n, k, tp, mode, sigma,
                               ncv, v0, maxiter, which, tol)
        if self.ncv > n or self.ncv <= k + 1:
            raise ValueError("ncv must be k+1<ncv<=n, ncv=%s" % self.ncv)
        self.workd = np.zeros(3 * n, self.tp)
        self.workl = np.zeros(3 * self.ncv * (self.ncv + 2), self.tp)
        ltr = _type_conv[self.tp]
        self._arpack_solver = _arpack.__dict__[ltr + 'naupd']
        self._arpack_extract = _arpack.__dict__[ltr + 'neupd']
        self.iterate_infodict = _NAUPD_ERRORS[ltr]
        self.extract_infodict = _NEUPD_ERRORS[ltr]
        self.ipntr = np.zeros(14, "int")
        if self.tp in 'FD':
            self.rwork = np.zeros(self.ncv, self.tp.lower())
        else:
            self.rwork = None
    def iterate(self):
        if self.tp in 'fd':
            self.ido, self.resid, self.v, self.iparam, self.ipntr, self.info =\
                self._arpack_solver(self.ido, self.bmat, self.which, self.k,
                                    self.tol, self.resid, self.v, self.iparam,
                                    self.ipntr,  self.workd, self.workl,
                                    self.info)
        else:
            self.ido, self.resid, self.v, self.iparam, self.ipntr, self.info =\
                self._arpack_solver(self.ido, self.bmat, self.which, self.k,
                                    self.tol, self.resid, self.v, self.iparam,
                                    self.ipntr, self.workd, self.workl,
                                    self.rwork, self.info)
        xslice = slice(self.ipntr[0] - 1, self.ipntr[0] - 1 + self.n)
        yslice = slice(self.ipntr[1] - 1, self.ipntr[1] - 1 + self.n)
        if self.ido == -1:
            # initialization
            self.workd[yslice] = self.OP(self.workd[xslice])
        elif self.ido == 1:
            # compute y = Op*x
            if self.mode in (1, 2):
                self.workd[yslice] = self.OP(self.workd[xslice])
            else:
                Bxslice = slice(self.ipntr[2] - 1, self.ipntr[2] - 1 + self.n)
                self.workd[yslice] = self.OPa(self.workd[Bxslice])
        elif self.ido == 2:
            self.workd[yslice] = self.B(self.workd[xslice])
        elif self.ido == 3:
            raise ValueError("ARPACK requested user shifts.  Assure ISHIFT==0")
        else:
            self.converged = True
            if self.info == 0:
                pass
            elif self.info == 1:
                self._raise_no_convergence()
            else:
                raise ArpackError(self.info, infodict=self.iterate_infodict)
    def extract(self, return_eigenvectors):
        k, n = self.k, self.n
        ierr = 0
        howmny = 'A'  # return all eigenvectors
        sselect = np.zeros(self.ncv, 'int')  # unused
        sigmar = np.real(self.sigma)
        sigmai = np.imag(self.sigma)
        workev = np.zeros(3 * self.ncv, self.tp)
        if self.tp in 'fd':
            dr = np.zeros(k + 1, self.tp)
            di = np.zeros(k + 1, self.tp)
            zr = np.zeros((n, k + 1), self.tp)
            dr, di, zr, ierr = \
                self._arpack_extract(
                    return_eigenvectors, howmny, sselect, sigmar, sigmai,
                    workev, self.bmat, self.which, k, self.tol, self.resid,
                    self.v, self.iparam, self.ipntr, self.workd, self.workl,
                    self.info)
            if ierr != 0:
                raise ArpackError(ierr, infodict=self.extract_infodict)
            nreturned = self.iparam[4]  # number of good eigenvalues returned
            # Build complex eigenvalues from real and imaginary parts
            d = dr + 1.0j * di
            # Arrange the eigenvectors: complex eigenvectors are stored as
            # real,imaginary in consecutive columns
            z = zr.astype(self.tp.upper())
            # The ARPACK nonsymmetric real and double interface (s,d)naupd
            # return eigenvalues and eigenvectors in real (float,double)
            # arrays.
            # Efficiency: this should check that return_eigenvectors == True
            #  before going through this construction.
            if sigmai == 0:
                i = 0
                while i <= k:
                    # check if complex
                    if abs(d[i].imag) != 0:
                        # this is a complex conjugate pair with eigenvalues
                        # in consecutive columns
                        if i < k:
                            z[:, i] = zr[:, i] + 1.0j * zr[:, i + 1]
                            z[:, i + 1] = z[:, i].conjugate()
                            i += 1
                        else:
                            #last eigenvalue is complex: the imaginary part of
                            # the eigenvector has not been returned
                            #this can only happen if nreturned > k, so we'll
                            # throw out this case.
                            nreturned -= 1
                    i += 1
            else:
                # real matrix, mode 3 or 4, imag(sigma) is nonzero:
                # see remark 3 in <s,d>neupd.f
                # Build complex eigenvalues from real and imaginary parts
                i = 0
                while i <= k:
                    if abs(d[i].imag) == 0:
                        d[i] = np.dot(zr[:, i], self.matvec(zr[:, i]))
                    else:
                        if i < k:
                            z[:, i] = zr[:, i] + 1.0j * zr[:, i + 1]
                            z[:, i + 1] = z[:, i].conjugate()
                            d[i] = ((np.dot(zr[:, i],
                                            self.matvec(zr[:, i]))
                                     + np.dot(zr[:, i + 1],
                                              self.matvec(zr[:, i + 1])))
                                    + 1j * (np.dot(zr[:, i],
                                                   self.matvec(zr[:, i + 1]))
                                            - np.dot(zr[:, i + 1],
                                                     self.matvec(zr[:, i]))))
                            d[i + 1] = d[i].conj()
                            i += 1
                        else:
                            #last eigenvalue is complex: the imaginary part of
                            # the eigenvector has not been returned
                            #this can only happen if nreturned > k, so we'll
                            # throw out this case.
                            nreturned -= 1
                    i += 1
            # Now we have k+1 possible eigenvalues and eigenvectors
            # Return the ones specified by the keyword "which"
            if nreturned <= k:
                # we got less or equal as many eigenvalues we wanted
                d = d[:nreturned]
                z = z[:, :nreturned]
            else:
                # we got one extra eigenvalue (likely a cc pair, but which?)
                # cut at approx precision for sorting
                rd = np.round(d, decimals=_ndigits[self.tp])
                if self.which in ['LR', 'SR']:
                    ind = np.argsort(rd.real)
                elif self.which in ['LI', 'SI']:
                    # for LI,SI ARPACK returns largest,smallest
                    # abs(imaginary) why?
                    ind = np.argsort(abs(rd.imag))
                else:
                    ind = np.argsort(abs(rd))
                if self.which in ['LR', 'LM', 'LI']:
                    d = d[ind[-k:]]
                    z = z[:, ind[-k:]]
                if self.which in ['SR', 'SM', 'SI']:
                    d = d[ind[:k]]
                    z = z[:, ind[:k]]
        else:
            # complex is so much simpler...
            d, z, ierr =\
                self._arpack_extract(
                    return_eigenvectors, howmny, sselect, self.sigma, workev,
                    self.bmat, self.which, k, self.tol, self.resid, self.v,
                    self.iparam, self.ipntr, self.workd, self.workl,
                    self.rwork, ierr)
            if ierr != 0:
                raise ArpackError(ierr, infodict=self.extract_infodict)
            k_ok = self.iparam[4]
            d = d[:k_ok]
            z = z[:, :k_ok]
        if return_eigenvectors:
            return d, z
        else:
            return d
def _aslinearoperator_with_dtype(m):
    m = aslinearoperator(m)
    if not hasattr(m, 'dtype'):
        x = np.zeros(m.shape[1])
        m.dtype = (m * x).dtype
    return m
class SpLuInv(LinearOperator):
    """
    SpLuInv:
       helper class to repeatedly solve M*x=b
       using a sparse LU-decopposition of M
    """
    def __init__(self, M):
        self.M_lu = splu(M)
        LinearOperator.__init__(self, M.shape, self._matvec, dtype=M.dtype)
        self.isreal = not np.issubdtype(self.dtype, np.complexfloating)
    def _matvec(self, x):
        # careful here: splu.solve will throw away imaginary
        # part of x if M is real
        if self.isreal and np.issubdtype(x.dtype, np.complexfloating):
            return (self.M_lu.solve(np.real(x))
                    + 1j * self.M_lu.solve(np.imag(x)))
        else:
            return self.M_lu.solve(x)
class LuInv(LinearOperator):
    """
    LuInv:
       helper class to repeatedly solve M*x=b
       using an LU-decomposition of M
    """
    def __init__(self, M):
        self.M_lu = lu_factor(M)
        LinearOperator.__init__(self, M.shape, self._matvec, dtype=M.dtype)
    def _matvec(self, x):
        return lu_solve(self.M_lu, x)
class IterInv(LinearOperator):
    """
    IterInv:
       helper class to repeatedly solve M*x=b
       using an iterative method.
    """
    def __init__(self, M, ifunc=gmres, tol=0):
        if tol <= 0:
            # when tol=0, ARPACK uses machine tolerance as calculated
            # by LAPACK's _LAMCH function.  We should match this
            tol = np.finfo(M.dtype).eps
        self.M = M
        self.ifunc = ifunc
        self.tol = tol
        if hasattr(M, 'dtype'):
            dtype = M.dtype
        else:
            x = np.zeros(M.shape[1])
            dtype = (M * x).dtype
        LinearOperator.__init__(self, M.shape, self._matvec, dtype=dtype)
    def _matvec(self, x):
        b, info = self.ifunc(self.M, x, tol=self.tol)
        if info != 0:
            raise ValueError("Error in inverting M: function "
                             "%s did not converge (info = %i)."
                             % (self.ifunc.__name__, info))
        return b
class IterOpInv(LinearOperator):
    """
    IterOpInv:
       helper class to repeatedly solve [A-sigma*M]*x = b
       using an iterative method
    """
    def __init__(self, A, M, sigma, ifunc=gmres, tol=0):
        if tol <= 0:
            # when tol=0, ARPACK uses machine tolerance as calculated
            # by LAPACK's _LAMCH function.  We should match this
            tol = np.finfo(A.dtype).eps
        self.A = A
        self.M = M
        self.sigma = sigma
        self.ifunc = ifunc
        self.tol = tol
        x = np.zeros(A.shape[1])
        if M is None:
            dtype = self.mult_func_M_None(x).dtype
            self.OP = LinearOperator(self.A.shape,
                                     self.mult_func_M_None,
                                     dtype=dtype)
        else:
            dtype = self.mult_func(x).dtype
            self.OP = LinearOperator(self.A.shape,
                                     self.mult_func,
                                     dtype=dtype)
        LinearOperator.__init__(self, A.shape, self._matvec, dtype=dtype)
    def mult_func(self, x):
        return self.A.matvec(x) - self.sigma * self.M.matvec(x)
    def mult_func_M_None(self, x):
        return self.A.matvec(x) - self.sigma * x
    def _matvec(self, x):
        b, info = self.ifunc(self.OP, x, tol=self.tol)
        if info != 0:
            raise ValueError("Error in inverting [A-sigma*M]: function "
                             "%s did not converge (info = %i)."
                             % (self.ifunc.__name__, info))
        return b
def get_inv_matvec(M, symmetric=False, tol=0):
    if isdense(M):
        return LuInv(M).matvec
    elif isspmatrix(M):
        if isspmatrix_csr(M) and symmetric:
            M = M.T
        return SpLuInv(M).matvec
    else:
        return IterInv(M, tol=tol).matvec
def get_OPinv_matvec(A, M, sigma, symmetric=False, tol=0):
    if sigma == 0:
        return get_inv_matvec(A, symmetric=symmetric, tol=tol)
    if M is None:
        #M is the identity matrix
        if isdense(A):
            if (np.issubdtype(A.dtype, np.complexfloating)
                    or np.imag(sigma) == 0):
                A = np.copy(A)
            else:
                A = A + 0j
            A.flat[::A.shape[1] + 1] -= sigma
            return LuInv(A).matvec
        elif isspmatrix(A):
            A = A - sigma * identity(A.shape[0])
            if symmetric and isspmatrix_csr(A):
                A = A.T
            return SpLuInv(A.tocsc()).matvec
        else:
            return IterOpInv(_aslinearoperator_with_dtype(A), M, sigma,
                             tol=tol).matvec
    else:
        if ((not isdense(A) and not isspmatrix(A)) or
                (not isdense(M) and not isspmatrix(M))):
            return IterOpInv(_aslinearoperator_with_dtype(A),
                             _aslinearoperator_with_dtype(M), sigma,
                             tol=tol).matvec
        elif isdense(A) or isdense(M):
            return LuInv(A - sigma * M).matvec
        else:
            OP = A - sigma * M
            if symmetric and isspmatrix_csr(OP):
                OP = OP.T
            return SpLuInv(OP.tocsc()).matvec
def _eigs(A, k=6, M=None, sigma=None, which='LM', v0=None, ncv=None,
          maxiter=None, tol=0, return_eigenvectors=True, Minv=None, OPinv=None,
          OPpart=None):
    """
    Find k eigenvalues and eigenvectors of the square matrix A.
    Solves ``A * x[i] = w[i] * x[i]``, the standard eigenvalue problem
    for w[i] eigenvalues with corresponding eigenvectors x[i].
    If M is specified, solves ``A * x[i] = w[i] * M * x[i]``, the
    generalized eigenvalue problem for w[i] eigenvalues
    with corresponding eigenvectors x[i]
    Parameters
    ----------
    A : An N x N matrix, array, sparse matrix, or LinearOperator representing \
    the operation A * x, where A is a real or complex square matrix.
    k : int, default 6
        The number of eigenvalues and eigenvectors desired.
        `k` must be smaller than N. It is not possible to compute all
        eigenvectors of a matrix.
    return_eigenvectors : boolean, default True
        Whether to return the eigenvectors along with the eigenvalues.
    M : An N x N matrix, array, sparse matrix, or LinearOperator representing
        the operation M*x for the generalized eigenvalue problem
          ``A * x = w * M * x``
        M must represent a real symmetric matrix.  For best results, M should
        be of the same type as A.  Additionally:
         * If sigma==None, M is positive definite
         * If sigma is specified, M is positive semi-definite
        If sigma==None, eigs requires an operator to compute the solution
        of the linear equation `M * x = b`. This is done internally via a
        (sparse) LU decomposition for an explicit matrix M, or via an
        iterative solver for a general linear operator.  Alternatively,
        the user can supply the matrix or operator Minv, which gives
        x = Minv * b = M^-1 * b
    sigma : real or complex
        Find eigenvalues near sigma using shift-invert mode.  This requires
        an operator to compute the solution of the linear system
        `[A - sigma * M] * x = b`, where M is the identity matrix if
        unspecified. This is computed internally via a (sparse) LU
        decomposition for explicit matrices A & M, or via an iterative
        solver if either A or M is a general linear operator.
        Alternatively, the user can supply the matrix or operator OPinv,
        which gives x = OPinv * b = [A - sigma * M]^-1 * b.
        For a real matrix A, shift-invert can either be done in imaginary
        mode or real mode, specified by the parameter OPpart ('r' or 'i').
        Note that when sigma is specified, the keyword 'which' (below)
        refers to the shifted eigenvalues w'[i] where:
         * If A is real and OPpart == 'r' (default),
            w'[i] = 1/2 * [ 1/(w[i]-sigma) + 1/(w[i]-conj(sigma)) ]
         * If A is real and OPpart == 'i',
            w'[i] = 1/2i * [ 1/(w[i]-sigma) - 1/(w[i]-conj(sigma)) ]
         * If A is complex,
            w'[i] = 1/(w[i]-sigma)
    v0 : array
        Starting vector for iteration.
    ncv : integer
        The number of Lanczos vectors generated
        `ncv` must be greater than `k`; it is recommended that ``ncv > 2*k``.
    which : string ['LM' | 'SM' | 'LR' | 'SR' | 'LI' | 'SI']
        Which `k` eigenvectors and eigenvalues to find:
         - 'LM' : largest magnitude
         - 'SM' : smallest magnitude
         - 'LR' : largest real part
         - 'SR' : smallest real part
         - 'LI' : largest imaginary part
         - 'SI' : smallest imaginary part
        When sigma != None, 'which' refers to the shifted eigenvalues w'[i]
        (see discussion in 'sigma', above).  ARPACK is generally better
        at finding large values than small values.  If small eigenvalues are
        desired, consider using shift-invert mode for better performance.
    maxiter : integer
        Maximum number of Arnoldi update iterations allowed
    tol : float
        Relative accuracy for eigenvalues (stopping criterion)
        The default value of 0 implies machine precision.
    return_eigenvectors : boolean
        Return eigenvectors (True) in addition to eigenvalues
    Minv : N x N matrix, array, sparse matrix, or linear operator
        See notes in M, above.
        
    OPinv : N x N matrix, array, sparse matrix, or linear operator
        See notes in sigma, above.
    OPpart : 'r' or 'i'.
        See notes in sigma, above
    Returns
    -------
    w : array
        Array of k eigenvalues.
    v : array
        An array of `k` eigenvectors.
        ``v[:, i]`` is the eigenvector corresponding to the eigenvalue w[i].
    Raises
    ------
    ArpackNoConvergence
        When the requested convergence is not obtained.
        The currently converged eigenvalues and eigenvectors can be found
        as ``eigenvalues`` and ``eigenvectors`` attributes of the exception
        object.
    See Also
    --------
    eigsh : eigenvalues and eigenvectors for symmetric matrix A
    svds : singular value decomposition for a matrix A
    Examples
    --------
    Find 6 eigenvectors of the identity matrix:
    >>> from sklearn.utils.arpack import eigs
    >>> id = np.identity(13)
    >>> vals, vecs = eigs(id, k=6)
    >>> vals
    array([ 1.+0.j,  1.+0.j,  1.+0.j,  1.+0.j,  1.+0.j,  1.+0.j])
    >>> vecs.shape
    (13, 6)
    Notes
    -----
    This function is a wrapper to the ARPACK [1]_ SNEUPD, DNEUPD, CNEUPD,
    ZNEUPD, functions which use the Implicitly Restarted Arnoldi Method to
    find the eigenvalues and eigenvectors [2]_.
    References
    ----------
    .. [1] ARPACK Software, http://www.caam.rice.edu/software/ARPACK/
    .. [2] R. B. Lehoucq, D. C. Sorensen, and C. Yang,  ARPACK USERS GUIDE:
       Solution of Large Scale Eigenvalue Problems by Implicitly Restarted
       Arnoldi Methods. SIAM, Philadelphia, PA, 1998.
    """
    if A.shape[0] != A.shape[1]:
        raise ValueError('expected square matrix (shape=%s)' % (A.shape,))
    if M is not None:
        if M.shape != A.shape:
            raise ValueError('wrong M dimensions %s, should be %s'
                             % (M.shape, A.shape))
        if np.dtype(M.dtype).char.lower() != np.dtype(A.dtype).char.lower():
            warnings.warn('M does not have the same type precision as A. '
                          'This may adversely affect ARPACK convergence')
    n = A.shape[0]
    if k <= 0 or k >= n:
        raise ValueError("k must be between 1 and rank(A)-1")
    if sigma is None:
        matvec = _aslinearoperator_with_dtype(A).matvec
        if OPinv is not None:
            raise ValueError("OPinv should not be specified "
                             "with sigma = None.")
        if OPpart is not None:
            raise ValueError("OPpart should not be specified with "
                             "sigma = None or complex A")
        if M is None:
            #standard eigenvalue problem
            mode = 1
            M_matvec = None
            Minv_matvec = None
            if Minv is not None:
                raise ValueError("Minv should not be "
                                 "specified with M = None.")
        else:
            #general eigenvalue problem
            mode = 2
            if Minv is None:
                Minv_matvec = get_inv_matvec(M, symmetric=True, tol=tol)
            else:
                Minv = _aslinearoperator_with_dtype(Minv)
                Minv_matvec = Minv.matvec
            M_matvec = _aslinearoperator_with_dtype(M).matvec
    else:
        #sigma is not None: shift-invert mode
        if np.issubdtype(A.dtype, np.complexfloating):
            if OPpart is not None:
                raise ValueError("OPpart should not be specified "
                                 "with sigma=None or complex A")
            mode = 3
        elif OPpart is None or OPpart.lower() == 'r':
            mode = 3
        elif OPpart.lower() == 'i':
            if np.imag(sigma) == 0:
                raise ValueError("OPpart cannot be 'i' if sigma is real")
            mode = 4
        else:
            raise ValueError("OPpart must be one of ('r','i')")
        matvec = _aslinearoperator_with_dtype(A).matvec
        if Minv is not None:
            raise ValueError("Minv should not be specified when sigma is")
        if OPinv is None:
            Minv_matvec = get_OPinv_matvec(A, M, sigma,
                                           symmetric=False, tol=tol)
        else:
            OPinv = _aslinearoperator_with_dtype(OPinv)
            Minv_matvec = OPinv.matvec
        if M is None:
            M_matvec = None
        else:
            M_matvec = _aslinearoperator_with_dtype(M).matvec
    params = _UnsymmetricArpackParams(n, k, A.dtype.char, matvec, mode,
                                      M_matvec, Minv_matvec, sigma,
                                      ncv, v0, maxiter, which, tol)
    while not params.converged:
        params.iterate()
    return params.extract(return_eigenvectors)
def _eigsh(A, k=6, M=None, sigma=None, which='LM', v0=None, ncv=None,
           maxiter=None, tol=0, return_eigenvectors=True, Minv=None,
           OPinv=None, mode='normal'):
    """
    Find k eigenvalues and eigenvectors of the real symmetric square matrix
    or complex hermitian matrix A.
    Solves ``A * x[i] = w[i] * x[i]``, the standard eigenvalue problem for
    w[i] eigenvalues with corresponding eigenvectors x[i].
    If M is specified, solves ``A * x[i] = w[i] * M * x[i]``, the
    generalized eigenvalue problem for w[i] eigenvalues
    with corresponding eigenvectors x[i]
    Parameters
    ----------
    A : An N x N matrix, array, sparse matrix, or LinearOperator representing
        the operation A * x, where A is a real symmetric matrix
        For buckling mode (see below) A must additionally be positive-definite
    k : integer
        The number of eigenvalues and eigenvectors desired.
        `k` must be smaller than N. It is not possible to compute all
        eigenvectors of a matrix.
    M : An N x N matrix, array, sparse matrix, or linear operator representing
        the operation M * x for the generalized eigenvalue problem
          ``A * x = w * M * x``.
        M must represent a real, symmetric matrix.  For best results, M should
        be of the same type as A.  Additionally:
         * If sigma == None, M is symmetric positive definite
         * If sigma is specified, M is symmetric positive semi-definite
         * In buckling mode, M is symmetric indefinite.
        If sigma == None, eigsh requires an operator to compute the solution
        of the linear equation `M * x = b`. This is done internally via a
        (sparse) LU decomposition for an explicit matrix M, or via an
        iterative solver for a general linear operator.  Alternatively,
        the user can supply the matrix or operator Minv, which gives
        x = Minv * b = M^-1 * b
    sigma : real
        Find eigenvalues near sigma using shift-invert mode.  This requires
        an operator to compute the solution of the linear system
        `[A - sigma * M] x = b`, where M is the identity matrix if
        unspecified.  This is computed internally via a (sparse) LU
        decomposition for explicit matrices A & M, or via an iterative
        solver if either A or M is a general linear operator.
        Alternatively, the user can supply the matrix or operator OPinv,
        which gives x = OPinv * b = [A - sigma * M]^-1 * b.
        Note that when sigma is specified, the keyword 'which' refers to
        the shifted eigenvalues w'[i] where:
         - if mode == 'normal',
             w'[i] = 1 / (w[i] - sigma)
         - if mode == 'cayley',
             w'[i] = (w[i] + sigma) / (w[i] - sigma)
         - if mode == 'buckling',
             w'[i] = w[i] / (w[i] - sigma)
        (see further discussion in 'mode' below)
    v0 : array
        Starting vector for iteration.
    ncv : integer
        The number of Lanczos vectors generated
        ncv must be greater than k and smaller than n;
        it is recommended that ncv > 2*k
    which : string ['LM' | 'SM' | 'LA' | 'SA' | 'BE']
        If A is a complex hermitian matrix, 'BE' is invalid.
        Which `k` eigenvectors and eigenvalues to find
         - 'LM' : Largest (in magnitude) eigenvalues
         - 'SM' : Smallest (in magnitude) eigenvalues
         - 'LA' : Largest (algebraic) eigenvalues
         - 'SA' : Smallest (algebraic) eigenvalues
         - 'BE' : Half (k/2) from each end of the spectrum
                  When k is odd, return one more (k/2+1) from the high end
        When sigma != None, 'which' refers to the shifted eigenvalues w'[i]
        (see discussion in 'sigma', above).  ARPACK is generally better
        at finding large values than small values.  If small eigenvalues are
        desired, consider using shift-invert mode for better performance.
    maxiter : integer
        Maximum number of Arnoldi update iterations allowed
    tol : float
        Relative accuracy for eigenvalues (stopping criterion).
        The default value of 0 implies machine precision.
    Minv : N x N matrix, array, sparse matrix, or LinearOperator
        See notes in M, above
    OPinv : N x N matrix, array, sparse matrix, or LinearOperator
        See notes in sigma, above.
    return_eigenvectors : boolean
        Return eigenvectors (True) in addition to eigenvalues
    mode : string ['normal' | 'buckling' | 'cayley']
        Specify strategy to use for shift-invert mode.  This argument applies
        only for real-valued A and sigma != None.  For shift-invert mode,
        ARPACK internally solves the eigenvalue problem
        ``OP * x'[i] = w'[i] * B * x'[i]``
        and transforms the resulting Ritz vectors x'[i] and Ritz values w'[i]
        into the desired eigenvectors and eigenvalues of the problem
        ``A * x[i] = w[i] * M * x[i]``.
        The modes are as follows:
          - 'normal'   : OP = [A - sigma * M]^-1 * M
                         B = M
                         w'[i] = 1 / (w[i] - sigma)
          - 'buckling' : OP = [A - sigma * M]^-1 * A
                         B = A
                         w'[i] = w[i] / (w[i] - sigma)
          - 'cayley'   : OP = [A - sigma * M]^-1 * [A + sigma * M]
                         B = M
                         w'[i] = (w[i] + sigma) / (w[i] - sigma)
        The choice of mode will affect which eigenvalues are selected by
        the keyword 'which', and can also impact the stability of
        convergence (see [2] for a discussion)
    Returns
    -------
    w : array
        Array of k eigenvalues
    v : array
        An array of k eigenvectors
        The v[i] is the eigenvector corresponding to the eigenvector w[i]
    Raises
    ------
    ArpackNoConvergence
        When the requested convergence is not obtained.
        The currently converged eigenvalues and eigenvectors can be found
        as ``eigenvalues`` and ``eigenvectors`` attributes of the exception
        object.
    See Also
    --------
    eigs : eigenvalues and eigenvectors for a general (nonsymmetric) matrix A
    svds : singular value decomposition for a matrix A
    Notes
    -----
    This function is a wrapper to the ARPACK [1]_ SSEUPD and DSEUPD
    functions which use the Implicitly Restarted Lanczos Method to
    find the eigenvalues and eigenvectors [2]_.
    Examples
    --------
    >>> from sklearn.utils.arpack import eigsh
    >>> id = np.identity(13)
    >>> vals, vecs = eigsh(id, k=6)
    >>> vals # doctest: +SKIP
    array([ 1.+0.j,  1.+0.j,  1.+0.j,  1.+0.j,  1.+0.j,  1.+0.j])
    >>> print(vecs.shape)
    (13, 6)
    References
    ----------
    .. [1] ARPACK Software, http://www.caam.rice.edu/software/ARPACK/
    .. [2] R. B. Lehoucq, D. C. Sorensen, and C. Yang,  ARPACK USERS GUIDE:
       Solution of Large Scale Eigenvalue Problems by Implicitly Restarted
       Arnoldi Methods. SIAM, Philadelphia, PA, 1998.
    """
    # complex hermitian matrices should be solved with eigs
    if np.issubdtype(A.dtype, np.complexfloating):
        if mode != 'normal':
            raise ValueError("mode=%s cannot be used with "
                             "complex matrix A" % mode)
        if which == 'BE':
            raise ValueError("which='BE' cannot be used with complex matrix A")
        elif which == 'LA':
            which = 'LR'
        elif which == 'SA':
            which = 'SR'
        ret = eigs(A, k, M=M, sigma=sigma, which=which, v0=v0,
                   ncv=ncv, maxiter=maxiter, tol=tol,
                   return_eigenvectors=return_eigenvectors, Minv=Minv,
                   OPinv=OPinv)
        if return_eigenvectors:
            return ret[0].real, ret[1]
        else:
            return ret.real
    if A.shape[0] != A.shape[1]:
        raise ValueError('expected square matrix (shape=%s)' % (A.shape,))
    if M is not None:
        if M.shape != A.shape:
            raise ValueError('wrong M dimensions %s, should be %s'
                             % (M.shape, A.shape))
        if np.dtype(M.dtype).char.lower() != np.dtype(A.dtype).char.lower():
            warnings.warn('M does not have the same type precision as A. '
                          'This may adversely affect ARPACK convergence')
    n = A.shape[0]
    if k <= 0 or k >= n:
        raise ValueError("k must be between 1 and rank(A)-1")
    if sigma is None:
        A = _aslinearoperator_with_dtype(A)
        matvec = A.matvec
        if OPinv is not None:
            raise ValueError("OPinv should not be specified "
                             "with sigma = None.")
        if M is None:
            #standard eigenvalue problem
            mode = 1
            M_matvec = None
            Minv_matvec = None
            if Minv is not None:
                raise ValueError("Minv should not be "
                                 "specified with M = None.")
        else:
            #general eigenvalue problem
            mode = 2
            if Minv is None:
                Minv_matvec = get_inv_matvec(M, symmetric=True, tol=tol)
            else:
                Minv = _aslinearoperator_with_dtype(Minv)
                Minv_matvec = Minv.matvec
            M_matvec = _aslinearoperator_with_dtype(M).matvec
    else:
        # sigma is not None: shift-invert mode
        if Minv is not None:
            raise ValueError("Minv should not be specified when sigma is")
        # normal mode
        if mode == 'normal':
            mode = 3
            matvec = None
            if OPinv is None:
                Minv_matvec = get_OPinv_matvec(A, M, sigma,
                                               symmetric=True, tol=tol)
            else:
                OPinv = _aslinearoperator_with_dtype(OPinv)
                Minv_matvec = OPinv.matvec
            if M is None:
                M_matvec = None
            else:
                M = _aslinearoperator_with_dtype(M)
                M_matvec = M.matvec
        # buckling mode
        elif mode == 'buckling':
            mode = 4
            if OPinv is None:
                Minv_matvec = get_OPinv_matvec(A, M, sigma,
                                               symmetric=True, tol=tol)
            else:
                Minv_matvec = _aslinearoperator_with_dtype(OPinv).matvec
            matvec = _aslinearoperator_with_dtype(A).matvec
            M_matvec = None
        # cayley-transform mode
        elif mode == 'cayley':
            mode = 5
            matvec = _aslinearoperator_with_dtype(A).matvec
            if OPinv is None:
                Minv_matvec = get_OPinv_matvec(A, M, sigma,
                                               symmetric=True, tol=tol)
            else:
                Minv_matvec = _aslinearoperator_with_dtype(OPinv).matvec
            if M is None:
                M_matvec = None
            else:
                M_matvec = _aslinearoperator_with_dtype(M).matvec
        # unrecognized mode
        else:
            raise ValueError("unrecognized mode '%s'" % mode)
    params = _SymmetricArpackParams(n, k, A.dtype.char, matvec, mode,
                                    M_matvec, Minv_matvec, sigma,
                                    ncv, v0, maxiter, which, tol)
    while not params.converged:
        params.iterate()
    return params.extract(return_eigenvectors)
def _svds(A, k=6, ncv=None, tol=0):
    """Compute k singular values/vectors for a sparse matrix using ARPACK.
    Parameters
    ----------
    A : sparse matrix
        Array to compute the SVD on
    k : int, optional
        Number of singular values and vectors to compute.
    ncv : integer
        The number of Lanczos vectors generated
        ncv must be greater than k+1 and smaller than n;
        it is recommended that ncv > 2*k
    tol : float, optional
        Tolerance for singular values. Zero (default) means machine precision.
    Notes
    -----
    This is a naive implementation using an eigensolver on A.H * A or
    A * A.H, depending on which one is more efficient.
    """
    if not (isinstance(A, np.ndarray) or isspmatrix(A)):
        A = np.asarray(A)
    n, m = A.shape
    if np.issubdtype(A.dtype, np.complexfloating):
        herm = lambda x: x.T.conjugate()
        eigensolver = eigs
    else:
        herm = lambda x: x.T
        eigensolver = eigsh
    if n > m:
        X = A
        XH = herm(A)
    else:
        XH = A
        X = herm(A)
    if hasattr(XH, 'dot'):
        def matvec_XH_X(x):
            return XH.dot(X.dot(x))
    else:
        def matvec_XH_X(x):
            return np.dot(XH, np.dot(X, x))
    XH_X = LinearOperator(matvec=matvec_XH_X, dtype=X.dtype,
                          shape=(X.shape[1], X.shape[1]))
    # Ignore deprecation warnings here: dot on matrices is deprecated,
    # but this code is a backport anyhow
    with warnings.catch_warnings():
        warnings.simplefilter('ignore', DeprecationWarning)
        eigvals, eigvec = eigensolver(XH_X, k=k, tol=tol ** 2)
    s = np.sqrt(eigvals)
    if n > m:
        v = eigvec
        if hasattr(X, 'dot'):
            u = X.dot(v) / s
        else:
            u = np.dot(X, v) / s
        vh = herm(v)
    else:
        u = eigvec
        if hasattr(X, 'dot'):
            vh = herm(X.dot(u) / s)
        else:
            vh = herm(np.dot(X, u) / s)
    return u, s, vh
# check if backport is actually needed:
if scipy.version.version >= LooseVersion('0.10'):
    from scipy.sparse.linalg import eigs, eigsh, svds
else:
    eigs, eigsh, svds = _eigs, _eigsh, _svds
 | 
	bsd-3-clause | 
| 
	looooo/paraBEM | 
	examples/plots/lifting_line.py | 
	1 | 
	1404 | 
	from __future__ import division
import numpy as np
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import paraBEM
from paraBEM.liftingline import LiftingLine
from paraBEM.utils import check_path
# WingGeometry
spw = 2
numpos = 50
z_fac_1 = -0.3
z_fac_2 = -0.7
y = np.sin(np.linspace(0, np.pi/2, numpos)) * spw/2
x = [0. for _ in y]
z = [i**2 * z_fac_1 + i**6 * z_fac_2 for i in y]
mirror = lambda xyz: [xyz[0], -xyz[1], xyz[2]]
wing = list(zip(x, y, z))
wing = list(map(mirror, wing))[::-1] + list(wing)[1:]
wing = [paraBEM.Vector3(*i) for i in wing]
# LiftingLine
lifting_line = LiftingLine(wing)
lifting_line.v_inf = paraBEM.Vector3(1, 0, 0)
lifting_line.solve_for_best_gamma(1)
gamma = [i.best_gamma for i in lifting_line.segments]
gamma_max = max(gamma)
# Plot
gamma_el = lambda y: gamma_max * (1 - (y / spw * 2)**2)**(1 / 2)
mids = [[i.mids.x, i.mids.y, i.mids.z] for i in lifting_line.segments]
x, y, z = zip(*mids)
fig = plt.figure()
ax1 = fig.add_subplot(3, 1, 1)
ax1.plot(y, z)
ax2 = fig.add_subplot(3, 1, 2)
ax2.plot(y, x, marker="x")
ax3 = fig.add_subplot(3, 1, 3)
y_el = np.linspace(-1, 1, 400)
ax3.plot([-spw/2] + list(y) + [spw/2], [0] + gamma + [0], marker="x")
ax3.plot(y_el, list(map(gamma_el, y_el)))
plt.savefig(check_path("results/2d/liftingline.png"))
total = 0
for i in lifting_line.segments:
    total += i.lift_factor * i.best_gamma
print(total)
 | 
	gpl-3.0 | 
| 
	tdhopper/scikit-learn | 
	examples/svm/plot_svm_scale_c.py | 
	223 | 
	5375 | 
	"""
==============================================
Scaling the regularization parameter for SVCs
==============================================
The following example illustrates the effect of scaling the
regularization parameter when using :ref:`svm` for
:ref:`classification <svm_classification>`.
For SVC classification, we are interested in a risk minimization for the
equation:
.. math::
    C \sum_{i=1, n} \mathcal{L} (f(x_i), y_i) + \Omega (w)
where
    - :math:`C` is used to set the amount of regularization
    - :math:`\mathcal{L}` is a `loss` function of our samples
      and our model parameters.
    - :math:`\Omega` is a `penalty` function of our model parameters
If we consider the loss function to be the individual error per
sample, then the data-fit term, or the sum of the error for each sample, will
increase as we add more samples. The penalization term, however, will not
increase.
When using, for example, :ref:`cross validation <cross_validation>`, to
set the amount of regularization with `C`, there will be a
different amount of samples between the main problem and the smaller problems
within the folds of the cross validation.
Since our loss function is dependent on the amount of samples, the latter
will influence the selected value of `C`.
The question that arises is `How do we optimally adjust C to
account for the different amount of training samples?`
The figures below are used to illustrate the effect of scaling our
`C` to compensate for the change in the number of samples, in the
case of using an `l1` penalty, as well as the `l2` penalty.
l1-penalty case
-----------------
In the `l1` case, theory says that prediction consistency
(i.e. that under given hypothesis, the estimator
learned predicts as well as a model knowing the true distribution)
is not possible because of the bias of the `l1`. It does say, however,
that model consistency, in terms of finding the right set of non-zero
parameters as well as their signs, can be achieved by scaling
`C1`.
l2-penalty case
-----------------
The theory says that in order to achieve prediction consistency, the
penalty parameter should be kept constant
as the number of samples grow.
Simulations
------------
The two figures below plot the values of `C` on the `x-axis` and the
corresponding cross-validation scores on the `y-axis`, for several different
fractions of a generated data-set.
In the `l1` penalty case, the cross-validation-error correlates best with
the test-error, when scaling our `C` with the number of samples, `n`,
which can be seen in the first figure.
For the `l2` penalty case, the best result comes from the case where `C`
is not scaled.
.. topic:: Note:
    Two separate datasets are used for the two different plots. The reason
    behind this is the `l1` case works better on sparse data, while `l2`
    is better suited to the non-sparse case.
"""
print(__doc__)
# Author: Andreas Mueller <[email protected]>
#         Jaques Grobler <[email protected]>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.svm import LinearSVC
from sklearn.cross_validation import ShuffleSplit
from sklearn.grid_search import GridSearchCV
from sklearn.utils import check_random_state
from sklearn import datasets
rnd = check_random_state(1)
# set up dataset
n_samples = 100
n_features = 300
# l1 data (only 5 informative features)
X_1, y_1 = datasets.make_classification(n_samples=n_samples,
                                        n_features=n_features, n_informative=5,
                                        random_state=1)
# l2 data: non sparse, but less features
y_2 = np.sign(.5 - rnd.rand(n_samples))
X_2 = rnd.randn(n_samples, n_features / 5) + y_2[:, np.newaxis]
X_2 += 5 * rnd.randn(n_samples, n_features / 5)
clf_sets = [(LinearSVC(penalty='l1', loss='squared_hinge', dual=False,
                       tol=1e-3),
             np.logspace(-2.3, -1.3, 10), X_1, y_1),
            (LinearSVC(penalty='l2', loss='squared_hinge', dual=True,
                       tol=1e-4),
             np.logspace(-4.5, -2, 10), X_2, y_2)]
colors = ['b', 'g', 'r', 'c']
for fignum, (clf, cs, X, y) in enumerate(clf_sets):
    # set up the plot for each regressor
    plt.figure(fignum, figsize=(9, 10))
    for k, train_size in enumerate(np.linspace(0.3, 0.7, 3)[::-1]):
        param_grid = dict(C=cs)
        # To get nice curve, we need a large number of iterations to
        # reduce the variance
        grid = GridSearchCV(clf, refit=False, param_grid=param_grid,
                            cv=ShuffleSplit(n=n_samples, train_size=train_size,
                                            n_iter=250, random_state=1))
        grid.fit(X, y)
        scores = [x[1] for x in grid.grid_scores_]
        scales = [(1, 'No scaling'),
                  ((n_samples * train_size), '1/n_samples'),
                  ]
        for subplotnum, (scaler, name) in enumerate(scales):
            plt.subplot(2, 1, subplotnum + 1)
            plt.xlabel('C')
            plt.ylabel('CV Score')
            grid_cs = cs * float(scaler)  # scale the C's
            plt.semilogx(grid_cs, scores, label="fraction %.2f" %
                         train_size)
            plt.title('scaling=%s, penalty=%s, loss=%s' %
                      (name, clf.penalty, clf.loss))
    plt.legend(loc="best")
plt.show()
 | 
	bsd-3-clause | 
| 
	DTUWindEnergy/Python4WindEnergy | 
	lesson 3/results/ebra.py | 
	1 | 
	8402 | 
	# -*- coding: utf-8 -*- <nbformat>3.0</nbformat>
# <headingcell level=1>
# Plotting with Matplotlib
# <headingcell level=2>
# Prepare for action
# <codecell>
import numpy as np
import scipy as sp
import sympy
# Pylab combines the pyplot functionality (for plotting) with the numpy
# functionality (for mathematics and for working with arrays) in a single namespace
# aims to provide a closer MATLAB feel (the easy way). Note that his approach
# should only be used when doing some interactive quick and dirty data inspection.
# DO NOT USE THIS FOR SCRIPTS
#from pylab import *
# the convienient Matplotib plotting interface pyplot (the tidy/right way)
# use this for building scripts. The examples here will all use pyplot.
import matplotlib.pyplot as plt
# for using the matplotlib API directly (the hard and verbose way)
# use this when building applications, and/or backends
import matplotlib as mpl
# <markdowncell>
# How would you like the IPython notebook show your plots? In order to use the
# matplotlib IPython magic youre IPython notebook should be launched as
# 
#     ipython notebook --matplotlib=inline
# 
# Make plots appear as a pop up window, chose the backend: 'gtk', 'inline', 'osx', 'qt', 'qt4', 'tk', 'wx'
#     
#     %matplotlib qt
#     
# or inline the notebook (no panning, zooming through the plot). Not working in IPython 0.x
#     
#     %matplotib inline
#     
# <codecell>
# activate pop up plots
#%matplotlib qt
# or change to inline plots
# %matplotlib inline
# <headingcell level=3>
# Matplotlib documentation
# <markdowncell>
# Finding your own way (aka RTFM). Hint: there is search box available!
# 
# * http://matplotlib.org/contents.html
# 
# The Matplotlib API docs:
# 
# * http://matplotlib.org/api/index.html
# 
# Pyplot, object oriented plotting:
# 
# * http://matplotlib.org/api/pyplot_api.html
# * http://matplotlib.org/api/pyplot_summary.html
# 
# Extensive gallery with examples:
# 
# * http://matplotlib.org/gallery.html
# <headingcell level=3>
# Tutorials for those who want to start playing
# <markdowncell>
# If reading manuals is too much for you, there is a very good tutorial available here:
# 
# * http://nbviewer.ipython.org/github/jrjohansson/scientific-python-lectures/blob/master/Lecture-4-Matplotlib.ipynb
# 
# Note that this tutorial uses
# 
#     from pylab import *
# 
# which is usually not adviced in more advanced script environments. When using
#     
#     import matplotlib.pyplot as plt
# 
# you need to preceed all plotting commands as used in the above tutorial with
#     
#     plt.
# <markdowncell>
# Give me more!
# 
# [EuroScipy 2012 Matlotlib tutorial](http://www.loria.fr/~rougier/teaching/matplotlib/). Note that here the author uses ```from pylab import * ```. When using ```import matplotliblib.pyplot as plt``` the plotting commands need to be proceeded with ```plt.```
# <headingcell level=2>
# Plotting template starting point
# <codecell>
# some sample data
x = np.arange(-10,10,0.1)
# <markdowncell>
# To change the default plot configuration values.
# <codecell>
page_width_cm = 13
dpi = 200
inch = 2.54 # inch in cm
# setting global plot configuration using the RC configuration style
plt.rc('font', family='serif')
plt.rc('xtick', labelsize=12) # tick labels
plt.rc('ytick', labelsize=20) # tick labels
plt.rc('axes', labelsize=20)  # axes labels
# If you don’t need LaTeX, don’t use it. It is slower to plot, and text
# looks just fine without. If you need it, e.g. for symbols, then use it.
#plt.rc('text', usetex=True) #<- P-E: Doesn't work on my Mac
# <codecell>
# create a figure instance, note that figure size is given in inches!
fig, ax = plt.subplots(nrows=1, ncols=1, figsize=(8,6))
# set the big title (note aligment relative to figure)
fig.suptitle("suptitle 16, figure alignment", fontsize=16)
# actual plotting
ax.plot(x, x**2, label="label 12")
# set axes title (note aligment relative to axes)
ax.set_title("title 14, axes alignment", fontsize=14)
# axes labels
ax.set_xlabel('xlabel 12')
ax.set_ylabel(r'$y_{\alpha}$ 12', fontsize=8)
# legend
ax.legend(fontsize=12, loc="best")
# saving the figure in different formats
# fig.savefig('figure-%03i.png' % dpi, dpi=dpi)
# fig.savefig('figure.svg')
# fig.savefig('figure.eps')
# <codecell>
# following steps are only relevant when using figures as pop up windows (with %matplotlib qt)
# to update a figure with has been modified
fig.canvas.draw()
# show a figure
fig.show()
# <headingcell level=2>
# Exercise
# <markdowncell>
# The current section is about you trying to figure out how to do several plotting features. You should use the previously mentioned resources to find how to do that. In many cases, google is your friend!
# <markdowncell>
# * add a grid to the plot
# <codecell>
plt.plot(x,x**2)
plt.grid('on')
# <markdowncell>
# * change the location of the legend to different places
# <codecell>
plt.plot(x,x**2, label="label 12")
plt.legend(fontsize=12, loc="upper right")
# <markdowncell>
# * find a way to control the line type and color, marker type and color, control the frequency of the marks (`markevery`). See plot options at: http://matplotlib.org/api/pyplot_api.html#matplotlib.pyplot.plot 
# <codecell>
stride = max( int(len(x) / 20), 1)
plt.plot(x,x**2, 'ko-',color='forestgreen', markevery=stride,label="label 12") 
plt.legend(fontsize=12, loc="upper center")
# <markdowncell>
# * add different sub-plots
# <codecell>
fig, axes = plt.subplots(nrows=2, ncols=1,sharex=True)
axes[0].plot(x,x**2)
axes[1].plot(x,-x**2)
# <markdowncell>
# * size the figure such that when included on an A4 page the fonts are given in their true size
# <codecell>
# matplotlib.rcParams.update({'font.size': 22})
fig, axes = plt.subplots(nrows=2, ncols=1,sharex=True)
axes[0].plot(x,x**2)
axes[1].plot(x,-x**2)
fig.set_size_inches(8.2,3) # using A4 width in inches?
fig.set_dpi(100)
for ax in axes:
    for item in ([ax.title, ax.xaxis.label, ax.yaxis.label] +
                 ax.get_xticklabels() + ax.get_yticklabels()):
        item.set_fontsize(12)
# ax[0].set('xtick', labelsize=12) # tick labels
# .rc('ytick', labelsize=20) # tick labels
# .rc('axes', labelsize=20)  # axes labels
# fig.savefig('figure.pdf')
# <markdowncell>
# * make a contour plot
# <codecell>
X, Y = np.meshgrid(x,x)
plt.figure()
plt.contourf(X,Y,X*Y,linewidth=0.3,cmap=plt.get_cmap('hsv'),levels=np.arange(-1,1,0.1))
plt.show
# im=ax.contourf(x,y,ui,levels=np.arange(Umean-5*Ustd,Umean+5*Ustd,Ustd/30),cmap=plt.get_cmap('hsv'),linewidth=0.1)
# <markdowncell>
# * use twinx() to create a second axis on the right for the second plot
# <codecell>
plt.figure()
ax=plt.gca()
ax.plot(x,x**2)
ax2 = ax.twinx()
ax2.plot(x,x**4, 'r')
# <markdowncell>
# * add horizontal and vertical lines using axvline(), axhline()
# <codecell>
plt.figure()
plt.plot(x,x**2)
plt.axvline(2)
plt.axhline(10)
# <markdowncell>
# * autoformat dates for nice printing on the x-axis using fig.autofmt_xdate()
# <codecell>
import datetime
dates = np.array([datetime.datetime.now() + datetime.timedelta(days=i) for i in xrange(24)])
fig, ax = plt.subplots(nrows=1, ncols=1)
ax.plot(dates,xrange(24))
fig.autofmt_xdate()
# <headingcell level=2>
# Advanced exercises
# <markdowncell>
# We are going to play a bit with regression
# <markdowncell>
# * Create a vector x of equally spaced number between $x \in [0, 5\pi]$ of 1000 points (keyword: linspace)
# <codecell>
n=1000
x=np.linspace(0,5*np.pi,n)
# <markdowncell>
# * create a vector y, so that y=sin(x) with some random noise
# <codecell>
y   = np.sin(x) +np.random.rand(n)-0.5
yth = np.sin(x)
# <markdowncell>
# * plot it like this: 
# <codecell>
fig=plt.figure()
ax=plt.gca()
ax.plot(x,y,'b.')
ax.plot(x,yth,'k--',label=r'$y=sin(x)$')
# <markdowncell>
# Try to do a polynomial fit on y(x) with different polynomial degree (Use numpy.polyfit to obtain coefficients)
# 
# Plot it like this (use np.poly1d(coef)(x) to plot polynomials) 
# <codecell>
for order in xrange(9):
    coeff=np.polyfit(x,y,order)
    ax.plot(x,np.poly1d(coeff)(x),label='deg %d'%order)
# shrink current axis by 20%
box = ax.get_position()
ax.set_position([box.x0, box.y0, box.width * 0.8, box.height])
# Put a legend to the right of the current axis
ax.legend(loc='center left', bbox_to_anchor=(1, 0.5))
plt.show()
# <codecell>
 | 
	apache-2.0 | 
| 
	B3AU/waveTree | 
	sklearn/utils/testing.py | 
	4 | 
	12125 | 
	"""Testing utilities."""
# Copyright (c) 2011, 2012
# Authors: Pietro Berkes,
#          Andreas Muller
#          Mathieu Blondel
#          Olivier Grisel
#          Arnaud Joly
# License: BSD 3 clause
import inspect
import pkgutil
import warnings
import scipy as sp
from functools import wraps
try:
    # Python 2
    from urllib2 import urlopen
    from urllib2 import HTTPError
except ImportError:
    # Python 3+
    from urllib.request import urlopen
    from urllib.error import HTTPError
import sklearn
from sklearn.base import BaseEstimator
from .fixes import savemat
# Conveniently import all assertions in one place.
from nose.tools import assert_equal
from nose.tools import assert_not_equal
from nose.tools import assert_true
from nose.tools import assert_false
from nose.tools import assert_raises
from nose.tools import raises
from nose import SkipTest
from nose import with_setup
from numpy.testing import assert_almost_equal
from numpy.testing import assert_array_equal
from numpy.testing import assert_array_almost_equal
from numpy.testing import assert_array_less
import numpy as np
from sklearn.base import (ClassifierMixin, RegressorMixin, TransformerMixin,
                          ClusterMixin)
__all__ = ["assert_equal", "assert_not_equal", "assert_raises", "raises",
           "with_setup", "assert_true", "assert_false", "assert_almost_equal",
           "assert_array_equal", "assert_array_almost_equal",
           "assert_array_less"]
try:
    from nose.tools import assert_in, assert_not_in
except ImportError:
    # Nose < 1.0.0
    def assert_in(x, container):
        assert_true(x in container, msg="%r in %r" % (x, container))
    def assert_not_in(x, container):
        assert_false(x in container, msg="%r in %r" % (x, container))
def _assert_less(a, b, msg=None):
    message = "%r is not lower than %r" % (a, b)
    if msg is not None:
        message += ": " + msg
    assert a < b, message
def _assert_greater(a, b, msg=None):
    message = "%r is not greater than %r" % (a, b)
    if msg is not None:
        message += ": " + msg
    assert a > b, message
# To remove when we support numpy 1.7
def assert_warns(warning_class, func, *args, **kw):
    with warnings.catch_warnings(record=True) as w:
        # Cause all warnings to always be triggered.
        warnings.simplefilter("always")
        # Trigger a warning.
        result = func(*args, **kw)
        # Verify some things
        if not len(w) > 0:
            raise AssertionError("No warning raised when calling %s"
                                 % func.__name__)
        if not w[0].category is warning_class:
            raise AssertionError("First warning for %s is not a "
                                 "%s( is %s)"
                                 % (func.__name__, warning_class, w[0]))
    return result
# To remove when we support numpy 1.7
def assert_no_warnings(func, *args, **kw):
    # XXX: once we may depend on python >= 2.6, this can be replaced by the
    # warnings module context manager.
    with warnings.catch_warnings(record=True) as w:
        warnings.simplefilter('always')
        result = func(*args, **kw)
        if len(w) > 0:
            raise AssertionError("Got warnings when calling %s: %s"
                                 % (func.__name__, w))
    return result
def ignore_warnings(fn):
    """Decorator to catch and hide warnings without visual nesting"""
    @wraps(fn)
    def wrapper(*args, **kwargs):
        with warnings.catch_warnings(record=True) as w:
            warnings.simplefilter('always')
            return fn(*args, **kwargs)
            w[:] = []
    return wrapper
try:
    from nose.tools import assert_less
except ImportError:
    assert_less = _assert_less
try:
    from nose.tools import assert_greater
except ImportError:
    assert_greater = _assert_greater
def _assert_allclose(actual, desired, rtol=1e-7, atol=0,
                     err_msg='', verbose=True):
    actual, desired = np.asanyarray(actual), np.asanyarray(desired)
    if np.allclose(actual, desired, rtol=rtol, atol=atol):
        return
    msg = ('Array not equal to tolerance rtol=%g, atol=%g: '
           'actual %s, desired %s') % (rtol, atol, actual, desired)
    raise AssertionError(msg)
if hasattr(np.testing, 'assert_allclose'):
    assert_allclose = np.testing.assert_allclose
else:
    assert_allclose = _assert_allclose
def assert_raise_message(exception, message, function, *args, **kwargs):
    """Helper function to test error messages in exceptions"""
    try:
        function(*args, **kwargs)
        raise AssertionError("Should have raised %r" % exception(message))
    except exception as e:
        error_message = str(e)
        assert_in(message, error_message)
def fake_mldata(columns_dict, dataname, matfile, ordering=None):
    """Create a fake mldata data set.
    Parameters
    ----------
    columns_dict: contains data as
                  columns_dict[column_name] = array of data
    dataname: name of data set
    matfile: file-like object or file name
    ordering: list of column_names, determines the ordering in the data set
    Note: this function transposes all arrays, while fetch_mldata only
    transposes 'data', keep that into account in the tests.
    """
    datasets = dict(columns_dict)
    # transpose all variables
    for name in datasets:
        datasets[name] = datasets[name].T
    if ordering is None:
        ordering = sorted(list(datasets.keys()))
    # NOTE: setting up this array is tricky, because of the way Matlab
    # re-packages 1D arrays
    datasets['mldata_descr_ordering'] = sp.empty((1, len(ordering)),
                                                 dtype='object')
    for i, name in enumerate(ordering):
        datasets['mldata_descr_ordering'][0, i] = name
    savemat(matfile, datasets, oned_as='column')
class mock_mldata_urlopen(object):
    def __init__(self, mock_datasets):
        """Object that mocks the urlopen function to fake requests to mldata.
        `mock_datasets` is a dictionary of {dataset_name: data_dict}, or
        {dataset_name: (data_dict, ordering).
        `data_dict` itself is a dictionary of {column_name: data_array},
        and `ordering` is a list of column_names to determine the ordering
        in the data set (see `fake_mldata` for details).
        When requesting a dataset with a name that is in mock_datasets,
        this object creates a fake dataset in a StringIO object and
        returns it. Otherwise, it raises an HTTPError.
        """
        self.mock_datasets = mock_datasets
    def __call__(self, urlname):
        dataset_name = urlname.split('/')[-1]
        if dataset_name in self.mock_datasets:
            resource_name = '_' + dataset_name
            from io import BytesIO
            matfile = BytesIO()
            dataset = self.mock_datasets[dataset_name]
            ordering = None
            if isinstance(dataset, tuple):
                dataset, ordering = dataset
            fake_mldata(dataset, resource_name, matfile, ordering)
            matfile.seek(0)
            return matfile
        else:
            raise HTTPError(urlname, 404, dataset_name + " is not available",
                            [], None)
def install_mldata_mock(mock_datasets):
    # Lazy import to avoid mutually recursive imports
    from sklearn import datasets
    datasets.mldata.urlopen = mock_mldata_urlopen(mock_datasets)
def uninstall_mldata_mock():
    # Lazy import to avoid mutually recursive imports
    from sklearn import datasets
    datasets.mldata.urlopen = urlopen
# Meta estimators need another estimator to be instantiated.
meta_estimators = ["OneVsOneClassifier",
                   "OutputCodeClassifier", "OneVsRestClassifier", "RFE",
                   "RFECV", "BaseEnsemble"]
# estimators that there is no way to default-construct sensibly
other = ["Pipeline", "FeatureUnion", "GridSearchCV", "RandomizedSearchCV"]
def all_estimators(include_meta_estimators=False, include_other=False,
                   type_filter=None):
    """Get a list of all estimators from sklearn.
    This function crawls the module and gets all classes that inherit
    from BaseEstimator. Classes that are defined in test-modules are not
    included.
    By default meta_estimators such as GridSearchCV are also not included.
    Parameters
    ----------
    include_meta_estimators : boolean, default=False
        Whether to include meta-estimators that can be constructed using
        an estimator as their first argument. These are currently
        BaseEnsemble, OneVsOneClassifier, OutputCodeClassifier,
        OneVsRestClassifier, RFE, RFECV.
    include_others : boolean, default=False
        Wether to include meta-estimators that are somehow special and can
        not be default-constructed sensibly. These are currently
        Pipeline, FeatureUnion and GridSearchCV
    type_filter : string or None, default=None
        Which kind of estimators should be returned. If None, no filter is
        applied and all estimators are returned.  Possible values are
        'classifier', 'regressor', 'cluster' and 'transformer' to get
        estimators only of these specific types.
    Returns
    -------
    estimators : list of tuples
        List of (name, class), where ``name`` is the class name as string
        and ``class`` is the actuall type of the class.
    """
    def is_abstract(c):
        if not(hasattr(c, '__abstractmethods__')):
            return False
        if not len(c.__abstractmethods__):
            return False
        return True
    all_classes = []
    # get parent folder
    path = sklearn.__path__
    for importer, modname, ispkg in pkgutil.walk_packages(
            path=path, prefix='sklearn.', onerror=lambda x: None):
        module = __import__(modname, fromlist="dummy")
        if ".tests." in modname:
            continue
        classes = inspect.getmembers(module, inspect.isclass)
        all_classes.extend(classes)
    all_classes = set(all_classes)
    estimators = [c for c in all_classes
                  if (issubclass(c[1], BaseEstimator)
                      and c[0] != 'BaseEstimator')]
    # get rid of abstract base classes
    estimators = [c for c in estimators if not is_abstract(c[1])]
    if not include_other:
        estimators = [c for c in estimators if not c[0] in other]
    # possibly get rid of meta estimators
    if not include_meta_estimators:
        estimators = [c for c in estimators if not c[0] in meta_estimators]
    if type_filter == 'classifier':
        estimators = [est for est in estimators
                      if issubclass(est[1], ClassifierMixin)]
    elif type_filter == 'regressor':
        estimators = [est for est in estimators
                      if issubclass(est[1], RegressorMixin)]
    elif type_filter == 'transformer':
        estimators = [est for est in estimators
                      if issubclass(est[1], TransformerMixin)]
    elif type_filter == 'cluster':
        estimators = [est for est in estimators
                      if issubclass(est[1], ClusterMixin)]
    elif type_filter is not None:
        raise ValueError("Parameter type_filter must be 'classifier', "
                         "'regressor', 'transformer', 'cluster' or None, got"
                         " %s." % repr(type_filter))
    # We sort in order to have reproducible test failures
    return sorted(estimators)
def set_random_state(estimator, random_state=0):
    if "random_state" in estimator.get_params().keys():
        estimator.set_params(random_state=random_state)
def if_matplotlib(func):
    """Test decorator that skips test if matplotlib not installed. """
    @wraps(func)
    def run_test(*args, **kwargs):
        try:
            import matplotlib
            matplotlib.use('Agg', warn=False)
            # this fails if no $DISPLAY specified
            matplotlib.pylab.figure()
        except:
            raise SkipTest('Matplotlib not available.')
        else:
            return func(*args, **kwargs)
    return run_test
 | 
	bsd-3-clause | 
| 
	kylerbrown/scikit-learn | 
	sklearn/covariance/tests/test_robust_covariance.py | 
	213 | 
	3359 | 
	# Author: Alexandre Gramfort <[email protected]>
#         Gael Varoquaux <[email protected]>
#         Virgile Fritsch <[email protected]>
#
# License: BSD 3 clause
import numpy as np
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.validation import NotFittedError
from sklearn import datasets
from sklearn.covariance import empirical_covariance, MinCovDet, \
    EllipticEnvelope
X = datasets.load_iris().data
X_1d = X[:, 0]
n_samples, n_features = X.shape
def test_mcd():
    # Tests the FastMCD algorithm implementation
    # Small data set
    # test without outliers (random independent normal data)
    launch_mcd_on_dataset(100, 5, 0, 0.01, 0.1, 80)
    # test with a contaminated data set (medium contamination)
    launch_mcd_on_dataset(100, 5, 20, 0.01, 0.01, 70)
    # test with a contaminated data set (strong contamination)
    launch_mcd_on_dataset(100, 5, 40, 0.1, 0.1, 50)
    # Medium data set
    launch_mcd_on_dataset(1000, 5, 450, 0.1, 0.1, 540)
    # Large data set
    launch_mcd_on_dataset(1700, 5, 800, 0.1, 0.1, 870)
    # 1D data set
    launch_mcd_on_dataset(500, 1, 100, 0.001, 0.001, 350)
def launch_mcd_on_dataset(n_samples, n_features, n_outliers, tol_loc, tol_cov,
                          tol_support):
    rand_gen = np.random.RandomState(0)
    data = rand_gen.randn(n_samples, n_features)
    # add some outliers
    outliers_index = rand_gen.permutation(n_samples)[:n_outliers]
    outliers_offset = 10. * \
        (rand_gen.randint(2, size=(n_outliers, n_features)) - 0.5)
    data[outliers_index] += outliers_offset
    inliers_mask = np.ones(n_samples).astype(bool)
    inliers_mask[outliers_index] = False
    pure_data = data[inliers_mask]
    # compute MCD by fitting an object
    mcd_fit = MinCovDet(random_state=rand_gen).fit(data)
    T = mcd_fit.location_
    S = mcd_fit.covariance_
    H = mcd_fit.support_
    # compare with the estimates learnt from the inliers
    error_location = np.mean((pure_data.mean(0) - T) ** 2)
    assert(error_location < tol_loc)
    error_cov = np.mean((empirical_covariance(pure_data) - S) ** 2)
    assert(error_cov < tol_cov)
    assert(np.sum(H) >= tol_support)
    assert_array_almost_equal(mcd_fit.mahalanobis(data), mcd_fit.dist_)
def test_mcd_issue1127():
    # Check that the code does not break with X.shape = (3, 1)
    # (i.e. n_support = n_samples)
    rnd = np.random.RandomState(0)
    X = rnd.normal(size=(3, 1))
    mcd = MinCovDet()
    mcd.fit(X)
def test_outlier_detection():
    rnd = np.random.RandomState(0)
    X = rnd.randn(100, 10)
    clf = EllipticEnvelope(contamination=0.1)
    assert_raises(NotFittedError, clf.predict, X)
    assert_raises(NotFittedError, clf.decision_function, X)
    clf.fit(X)
    y_pred = clf.predict(X)
    decision = clf.decision_function(X, raw_values=True)
    decision_transformed = clf.decision_function(X, raw_values=False)
    assert_array_almost_equal(
        decision, clf.mahalanobis(X))
    assert_array_almost_equal(clf.mahalanobis(X), clf.dist_)
    assert_almost_equal(clf.score(X, np.ones(100)),
                        (100 - y_pred[y_pred == -1].size) / 100.)
    assert(sum(y_pred == -1) == sum(decision_transformed < 0))
 | 
	bsd-3-clause | 
| 
	cl4rke/scikit-learn | 
	sklearn/metrics/tests/test_regression.py | 
	272 | 
	6066 | 
	from __future__ import division, print_function
import numpy as np
from itertools import product
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.metrics import explained_variance_score
from sklearn.metrics import mean_absolute_error
from sklearn.metrics import mean_squared_error
from sklearn.metrics import median_absolute_error
from sklearn.metrics import r2_score
from sklearn.metrics.regression import _check_reg_targets
def test_regression_metrics(n_samples=50):
    y_true = np.arange(n_samples)
    y_pred = y_true + 1
    assert_almost_equal(mean_squared_error(y_true, y_pred), 1.)
    assert_almost_equal(mean_absolute_error(y_true, y_pred), 1.)
    assert_almost_equal(median_absolute_error(y_true, y_pred), 1.)
    assert_almost_equal(r2_score(y_true, y_pred),  0.995, 2)
    assert_almost_equal(explained_variance_score(y_true, y_pred), 1.)
def test_multioutput_regression():
    y_true = np.array([[1, 0, 0, 1], [0, 1, 1, 1], [1, 1, 0, 1]])
    y_pred = np.array([[0, 0, 0, 1], [1, 0, 1, 1], [0, 0, 0, 1]])
    error = mean_squared_error(y_true, y_pred)
    assert_almost_equal(error, (1. / 3 + 2. / 3 + 2. / 3) / 4.)
    # mean_absolute_error and mean_squared_error are equal because
    # it is a binary problem.
    error = mean_absolute_error(y_true, y_pred)
    assert_almost_equal(error, (1. / 3 + 2. / 3 + 2. / 3) / 4.)
    error = r2_score(y_true, y_pred, multioutput='variance_weighted')
    assert_almost_equal(error, 1. - 5. / 2)
    error = r2_score(y_true, y_pred, multioutput='uniform_average')
    assert_almost_equal(error, -.875)
def test_regression_metrics_at_limits():
    assert_almost_equal(mean_squared_error([0.], [0.]), 0.00, 2)
    assert_almost_equal(mean_absolute_error([0.], [0.]), 0.00, 2)
    assert_almost_equal(median_absolute_error([0.], [0.]), 0.00, 2)
    assert_almost_equal(explained_variance_score([0.], [0.]), 1.00, 2)
    assert_almost_equal(r2_score([0., 1], [0., 1]), 1.00, 2)
def test__check_reg_targets():
    # All of length 3
    EXAMPLES = [
        ("continuous", [1, 2, 3], 1),
        ("continuous", [[1], [2], [3]], 1),
        ("continuous-multioutput", [[1, 1], [2, 2], [3, 1]], 2),
        ("continuous-multioutput", [[5, 1], [4, 2], [3, 1]], 2),
        ("continuous-multioutput", [[1, 3, 4], [2, 2, 2], [3, 1, 1]], 3),
    ]
    for (type1, y1, n_out1), (type2, y2, n_out2) in product(EXAMPLES,
                                                            repeat=2):
        if type1 == type2 and n_out1 == n_out2:
            y_type, y_check1, y_check2, multioutput = _check_reg_targets(
                y1, y2, None)
            assert_equal(type1, y_type)
            if type1 == 'continuous':
                assert_array_equal(y_check1, np.reshape(y1, (-1, 1)))
                assert_array_equal(y_check2, np.reshape(y2, (-1, 1)))
            else:
                assert_array_equal(y_check1, y1)
                assert_array_equal(y_check2, y2)
        else:
            assert_raises(ValueError, _check_reg_targets, y1, y2, None)
def test_regression_multioutput_array():
    y_true = [[1, 2], [2.5, -1], [4.5, 3], [5, 7]]
    y_pred = [[1, 1], [2, -1], [5, 4], [5, 6.5]]
    mse = mean_squared_error(y_true, y_pred, multioutput='raw_values')
    mae = mean_absolute_error(y_true, y_pred, multioutput='raw_values')
    r = r2_score(y_true, y_pred, multioutput='raw_values')
    evs = explained_variance_score(y_true, y_pred, multioutput='raw_values')
    assert_array_almost_equal(mse, [0.125, 0.5625], decimal=2)
    assert_array_almost_equal(mae, [0.25, 0.625], decimal=2)
    assert_array_almost_equal(r, [0.95, 0.93], decimal=2)
    assert_array_almost_equal(evs, [0.95, 0.93], decimal=2)
    # mean_absolute_error and mean_squared_error are equal because
    # it is a binary problem.
    y_true = [[0, 0]]*4
    y_pred = [[1, 1]]*4
    mse = mean_squared_error(y_true, y_pred, multioutput='raw_values')
    mae = mean_absolute_error(y_true, y_pred, multioutput='raw_values')
    r = r2_score(y_true, y_pred, multioutput='raw_values')
    assert_array_almost_equal(mse, [1., 1.], decimal=2)
    assert_array_almost_equal(mae, [1., 1.], decimal=2)
    assert_array_almost_equal(r, [0., 0.], decimal=2)
    r = r2_score([[0, -1], [0, 1]], [[2, 2], [1, 1]], multioutput='raw_values')
    assert_array_almost_equal(r, [0, -3.5], decimal=2)
    assert_equal(np.mean(r), r2_score([[0, -1], [0, 1]], [[2, 2], [1, 1]],
                 multioutput='uniform_average'))
    evs = explained_variance_score([[0, -1], [0, 1]], [[2, 2], [1, 1]],
                                   multioutput='raw_values')
    assert_array_almost_equal(evs, [0, -1.25], decimal=2)
    # Checking for the condition in which both numerator and denominator is
    # zero.
    y_true = [[1, 3], [-1, 2]]
    y_pred = [[1, 4], [-1, 1]]
    r2 = r2_score(y_true, y_pred, multioutput='raw_values')
    assert_array_almost_equal(r2, [1., -3.], decimal=2)
    assert_equal(np.mean(r2), r2_score(y_true, y_pred,
                 multioutput='uniform_average'))
    evs = explained_variance_score(y_true, y_pred, multioutput='raw_values')
    assert_array_almost_equal(evs, [1., -3.], decimal=2)
    assert_equal(np.mean(evs), explained_variance_score(y_true, y_pred))
def test_regression_custom_weights():
    y_true = [[1, 2], [2.5, -1], [4.5, 3], [5, 7]]
    y_pred = [[1, 1], [2, -1], [5, 4], [5, 6.5]]
    msew = mean_squared_error(y_true, y_pred, multioutput=[0.4, 0.6])
    maew = mean_absolute_error(y_true, y_pred, multioutput=[0.4, 0.6])
    rw = r2_score(y_true, y_pred, multioutput=[0.4, 0.6])
    evsw = explained_variance_score(y_true, y_pred, multioutput=[0.4, 0.6])
    assert_almost_equal(msew, 0.39, decimal=2)
    assert_almost_equal(maew, 0.475, decimal=3)
    assert_almost_equal(rw, 0.94, decimal=2)
    assert_almost_equal(evsw, 0.94, decimal=2)
 | 
	bsd-3-clause | 
| 
	ahye/FYS2140-Resources | 
	examples/animation/func_animate_sin.py | 
	1 | 
	1284 | 
	#!/usr/bin/env python
"""
Created on Mon 2 Dec 2013
Eksempelscript som viser hvordan en sinusboelge kan animeres med
funksjonsanimasjon.
@author Benedicte Emilie Braekken
"""
from numpy import *
from matplotlib.pyplot import *
from matplotlib import animation
def wave( x, t ):
    '''
    Funksjonen beskriver en sinusboelge ved tiden t og punktet x.
    '''
    omega = 1   # Vinkelhastighet
    k = 1       # Boelgetall
    return sin( k * x - omega * t )
T = 10
dt = 0.01
nx = 1e3
nt = int( T / dt ) # Antall tidssteg
t = 0
all_waves = [] # Tom liste for aa ta vare paa boelgetilstandene
x = linspace( -pi, pi, nx )
while t < T:
    # Legger til en ny boelgetilstand for hver kjoering
    all_waves.append( wave( x, t ) )
    t += dt
# Tegner initialtilstanden
fig = figure() # Passer paa aa ta vare paa figuren
line, = plot( x, all_waves[0] )
draw()
# Konstanter til animasjonen
FPS = 60 # Bilder i sekundet
inter = 1. / FPS # Tid mellom hvert bilde
def init():
    '''
    '''
    line.set_data( [], [] )
    return line,
def get_frame( frame ):
    '''
    '''
    line.set_data( x, all_waves[ frame ] )
    return line,
anim = animation.FuncAnimation( fig, get_frame, init_func=init,
                                frames=nt, interval=inter, blit=True )
show()
 | 
	mit | 
| 
	briandalessandro/courses | 
	deeplearning1/nbs/utils/utils.py | 
	8 | 
	7644 | 
	from __future__ import division,print_function
import math, os, json, sys, re
import cPickle as pickle
from glob import glob
import numpy as np
from matplotlib import pyplot as plt
from operator import itemgetter, attrgetter, methodcaller
from collections import OrderedDict
import itertools
from itertools import chain
import pandas as pd
import PIL
from PIL import Image
from numpy.random import random, permutation, randn, normal, uniform, choice
from numpy import newaxis
import scipy
from scipy import misc, ndimage
from scipy.ndimage.interpolation import zoom
from scipy.ndimage import imread
from sklearn.metrics import confusion_matrix
import bcolz
from sklearn.preprocessing import OneHotEncoder
from sklearn.manifold import TSNE
from IPython.lib.display import FileLink
import theano
from theano import shared, tensor as T
from theano.tensor.nnet import conv2d, nnet
from theano.tensor.signal import pool
import keras
from keras import backend as K
from keras.utils.data_utils import get_file
from keras.utils import np_utils
from keras.utils.np_utils import to_categorical
from keras.models import Sequential, Model
from keras.layers import Input, Embedding, Reshape, merge, LSTM, Bidirectional
from keras.layers import TimeDistributed, Activation, SimpleRNN, GRU
from keras.layers.core import Flatten, Dense, Dropout, Lambda
from keras.regularizers import l2, activity_l2, l1, activity_l1
from keras.layers.normalization import BatchNormalization
from keras.optimizers import SGD, RMSprop, Adam
from keras.utils.layer_utils import layer_from_config
from keras.metrics import categorical_crossentropy, categorical_accuracy
from keras.layers.convolutional import *
from keras.preprocessing import image, sequence
from keras.preprocessing.text import Tokenizer
from vgg16 import *
from vgg16bn import *
np.set_printoptions(precision=4, linewidth=100)
to_bw = np.array([0.299, 0.587, 0.114])
def gray(img):
    return np.rollaxis(img,0,3).dot(to_bw)
def to_plot(img):
    return np.rollaxis(img, 0, 3).astype(np.uint8)
def plot(img):
    plt.imshow(to_plot(img))
def floor(x):
    return int(math.floor(x))
def ceil(x):
    return int(math.ceil(x))
def plots(ims, figsize=(12,6), rows=1, interp=False, titles=None):
    if type(ims[0]) is np.ndarray:
        ims = np.array(ims).astype(np.uint8)
        if (ims.shape[-1] != 3):
            ims = ims.transpose((0,2,3,1))
    f = plt.figure(figsize=figsize)
    for i in range(len(ims)):
        sp = f.add_subplot(rows, len(ims)//rows, i+1)
        if titles is not None:
            sp.set_title(titles[i], fontsize=18)
        plt.imshow(ims[i], interpolation=None if interp else 'none')
def do_clip(arr, mx):
    clipped = np.clip(arr, (1-mx)/1, mx)
    return clipped/clipped.sum(axis=1)[:, np.newaxis]
def get_batches(dirname, gen=image.ImageDataGenerator(), shuffle=True, batch_size=4, class_mode='categorical',
                target_size=(224,224)):
    return gen.flow_from_directory(dirname, target_size=target_size,
            class_mode=class_mode, shuffle=shuffle, batch_size=batch_size)
def onehot(x):
    return to_categorical(x)
def wrap_config(layer):
    return {'class_name': layer.__class__.__name__, 'config': layer.get_config()}
def copy_layer(layer): return layer_from_config(wrap_config(layer))
def copy_layers(layers): return [copy_layer(layer) for layer in layers]
def copy_weights(from_layers, to_layers):
    for from_layer,to_layer in zip(from_layers, to_layers):
        to_layer.set_weights(from_layer.get_weights())
def copy_model(m):
    res = Sequential(copy_layers(m.layers))
    copy_weights(m.layers, res.layers)
    return res
def insert_layer(model, new_layer, index):
    res = Sequential()
    for i,layer in enumerate(model.layers):
        if i==index: res.add(new_layer)
        copied = layer_from_config(wrap_config(layer))
        res.add(copied)
        copied.set_weights(layer.get_weights())
    return res
def adjust_dropout(weights, prev_p, new_p):
    scal = (1-prev_p)/(1-new_p)
    return [o*scal for o in weights]
def get_data(path, target_size=(224,224)):
    batches = get_batches(path, shuffle=False, batch_size=1, class_mode=None, target_size=target_size)
    return np.concatenate([batches.next() for i in range(batches.nb_sample)])
def plot_confusion_matrix(cm, classes, normalize=False, title='Confusion matrix', cmap=plt.cm.Blues):
    """
    This function prints and plots the confusion matrix.
    Normalization can be applied by setting `normalize=True`.
    (This function is copied from the scikit docs.)
    """
    plt.figure()
    plt.imshow(cm, interpolation='nearest', cmap=cmap)
    plt.title(title)
    plt.colorbar()
    tick_marks = np.arange(len(classes))
    plt.xticks(tick_marks, classes, rotation=45)
    plt.yticks(tick_marks, classes)
    if normalize:
        cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
    print(cm)
    thresh = cm.max() / 2.
    for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
        plt.text(j, i, cm[i, j], horizontalalignment="center", color="white" if cm[i, j] > thresh else "black")
    plt.tight_layout()
    plt.ylabel('True label')
    plt.xlabel('Predicted label')
def save_array(fname, arr):
    c=bcolz.carray(arr, rootdir=fname, mode='w')
    c.flush()
def load_array(fname):
    return bcolz.open(fname)[:]
def mk_size(img, r2c):
    r,c,_ = img.shape
    curr_r2c = r/c
    new_r, new_c = r,c
    if r2c>curr_r2c:
        new_r = floor(c*r2c)
    else:
        new_c = floor(r/r2c)
    arr = np.zeros((new_r, new_c, 3), dtype=np.float32)
    r2=(new_r-r)//2
    c2=(new_c-c)//2
    arr[floor(r2):floor(r2)+r,floor(c2):floor(c2)+c] = img
    return arr
def mk_square(img):
    x,y,_ = img.shape
    maxs = max(img.shape[:2])
    y2=(maxs-y)//2
    x2=(maxs-x)//2
    arr = np.zeros((maxs,maxs,3), dtype=np.float32)
    arr[floor(x2):floor(x2)+x,floor(y2):floor(y2)+y] = img
    return arr
def vgg_ft(out_dim):
    vgg = Vgg16()
    vgg.ft(out_dim)
    model = vgg.model
    return model
def vgg_ft_bn(out_dim):
    vgg = Vgg16BN()
    vgg.ft(out_dim)
    model = vgg.model
    return model
def get_classes(path):
    batches = get_batches(path+'train', shuffle=False, batch_size=1)
    val_batches = get_batches(path+'valid', shuffle=False, batch_size=1)
    test_batches = get_batches(path+'test', shuffle=False, batch_size=1)
    return (val_batches.classes, batches.classes, onehot(val_batches.classes), onehot(batches.classes),
        val_batches.filenames, batches.filenames, test_batches.filenames)
def split_at(model, layer_type):
    layers = model.layers
    layer_idx = [index for index,layer in enumerate(layers)
                 if type(layer) is layer_type][-1]
    return layers[:layer_idx+1], layers[layer_idx+1:]
class MixIterator(object):
    def __init__(self, iters):
        self.iters = iters
        self.multi = type(iters) is list
        if self.multi:
            self.N = sum([it[0].N for it in self.iters])
        else:
            self.N = sum([it.N for it in self.iters])
    def reset(self):
        for it in self.iters: it.reset()
    def __iter__(self):
        return self
    def next(self, *args, **kwargs):
        if self.multi:
            nexts = [[next(it) for it in o] for o in self.iters]
            n0s = np.concatenate([n[0] for n in o])
            n1s = np.concatenate([n[1] for n in o])
            return (n0, n1)
        else:
            nexts = [next(it) for it in self.iters]
            n0 = np.concatenate([n[0] for n in nexts])
            n1 = np.concatenate([n[1] for n in nexts])
            return (n0, n1)
 | 
	apache-2.0 | 
| 
	DinoCow/airflow | 
	tests/providers/apache/pinot/hooks/test_pinot.py | 
	3 | 
	9346 | 
	#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements.  See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership.  The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License.  You may obtain a copy of the License at
#
#   http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied.  See the License for the
# specific language governing permissions and limitations
# under the License.
#
import io
import os
import subprocess
import unittest
from unittest import mock
from airflow.exceptions import AirflowException
from airflow.providers.apache.pinot.hooks.pinot import PinotAdminHook, PinotDbApiHook
class TestPinotAdminHook(unittest.TestCase):
    def setUp(self):
        super().setUp()
        self.conn = conn = mock.MagicMock()
        self.conn.host = 'host'
        self.conn.port = '1000'
        self.conn.extra_dejson = {'cmd_path': './pinot-admin.sh'}
        class PinotAdminHookTest(PinotAdminHook):
            def get_connection(self, conn_id):
                return conn
        self.db_hook = PinotAdminHookTest()
    @mock.patch('airflow.providers.apache.pinot.hooks.pinot.PinotAdminHook.run_cli')
    def test_add_schema(self, mock_run_cli):
        params = ["schema_file", False]
        self.db_hook.add_schema(*params)
        mock_run_cli.assert_called_once_with(
            [
                'AddSchema',
                '-controllerHost',
                self.conn.host,
                '-controllerPort',
                self.conn.port,
                '-schemaFile',
                params[0],
            ]
        )
    @mock.patch('airflow.providers.apache.pinot.hooks.pinot.PinotAdminHook.run_cli')
    def test_add_table(self, mock_run_cli):
        params = ["config_file", False]
        self.db_hook.add_table(*params)
        mock_run_cli.assert_called_once_with(
            [
                'AddTable',
                '-controllerHost',
                self.conn.host,
                '-controllerPort',
                self.conn.port,
                '-filePath',
                params[0],
            ]
        )
    @mock.patch('airflow.providers.apache.pinot.hooks.pinot.PinotAdminHook.run_cli')
    def test_create_segment(self, mock_run_cli):
        params = {
            "generator_config_file": "a",
            "data_dir": "b",
            "segment_format": "c",
            "out_dir": "d",
            "overwrite": True,
            "table_name": "e",
            "segment_name": "f",
            "time_column_name": "g",
            "schema_file": "h",
            "reader_config_file": "i",
            "enable_star_tree_index": False,
            "star_tree_index_spec_file": "j",
            "hll_size": 9,
            "hll_columns": "k",
            "hll_suffix": "l",
            "num_threads": 8,
            "post_creation_verification": True,
            "retry": 7,
        }
        self.db_hook.create_segment(**params)
        mock_run_cli.assert_called_once_with(
            [
                'CreateSegment',
                '-generatorConfigFile',
                params["generator_config_file"],
                '-dataDir',
                params["data_dir"],
                '-format',
                params["segment_format"],
                '-outDir',
                params["out_dir"],
                '-overwrite',
                params["overwrite"],
                '-tableName',
                params["table_name"],
                '-segmentName',
                params["segment_name"],
                '-timeColumnName',
                params["time_column_name"],
                '-schemaFile',
                params["schema_file"],
                '-readerConfigFile',
                params["reader_config_file"],
                '-starTreeIndexSpecFile',
                params["star_tree_index_spec_file"],
                '-hllSize',
                params["hll_size"],
                '-hllColumns',
                params["hll_columns"],
                '-hllSuffix',
                params["hll_suffix"],
                '-numThreads',
                params["num_threads"],
                '-postCreationVerification',
                params["post_creation_verification"],
                '-retry',
                params["retry"],
            ]
        )
    @mock.patch('airflow.providers.apache.pinot.hooks.pinot.PinotAdminHook.run_cli')
    def test_upload_segment(self, mock_run_cli):
        params = ["segment_dir", False]
        self.db_hook.upload_segment(*params)
        mock_run_cli.assert_called_once_with(
            [
                'UploadSegment',
                '-controllerHost',
                self.conn.host,
                '-controllerPort',
                self.conn.port,
                '-segmentDir',
                params[0],
            ]
        )
    @mock.patch('subprocess.Popen')
    def test_run_cli_success(self, mock_popen):
        mock_proc = mock.MagicMock()
        mock_proc.returncode = 0
        mock_proc.stdout = io.BytesIO(b'')
        mock_popen.return_value = mock_proc
        params = ["foo", "bar", "baz"]
        self.db_hook.run_cli(params)
        params.insert(0, self.conn.extra_dejson.get('cmd_path'))
        mock_popen.assert_called_once_with(
            params, stderr=subprocess.STDOUT, stdout=subprocess.PIPE, close_fds=True, env=None
        )
    @mock.patch('subprocess.Popen')
    def test_run_cli_failure_error_message(self, mock_popen):
        msg = b"Exception caught"
        mock_proc = mock.MagicMock()
        mock_proc.returncode = 0
        mock_proc.stdout = io.BytesIO(msg)
        mock_popen.return_value = mock_proc
        params = ["foo", "bar", "baz"]
        with self.assertRaises(AirflowException, msg=msg):
            self.db_hook.run_cli(params)
        params.insert(0, self.conn.extra_dejson.get('cmd_path'))
        mock_popen.assert_called_once_with(
            params, stderr=subprocess.STDOUT, stdout=subprocess.PIPE, close_fds=True, env=None
        )
    @mock.patch('subprocess.Popen')
    def test_run_cli_failure_status_code(self, mock_popen):
        mock_proc = mock.MagicMock()
        mock_proc.returncode = 1
        mock_proc.stdout = io.BytesIO(b'')
        mock_popen.return_value = mock_proc
        self.db_hook.pinot_admin_system_exit = True
        params = ["foo", "bar", "baz"]
        with self.assertRaises(AirflowException):
            self.db_hook.run_cli(params)
        params.insert(0, self.conn.extra_dejson.get('cmd_path'))
        env = os.environ.copy()
        env.update({"JAVA_OPTS": "-Dpinot.admin.system.exit=true "})
        mock_popen.assert_called_once_with(
            params, stderr=subprocess.STDOUT, stdout=subprocess.PIPE, close_fds=True, env=env
        )
class TestPinotDbApiHook(unittest.TestCase):
    def setUp(self):
        super().setUp()
        self.conn = conn = mock.MagicMock()
        self.conn.host = 'host'
        self.conn.port = '1000'
        self.conn.conn_type = 'http'
        self.conn.extra_dejson = {'endpoint': 'query/sql'}
        self.cur = mock.MagicMock()
        self.conn.cursor.return_value = self.cur
        self.conn.__enter__.return_value = self.cur
        self.conn.__exit__.return_value = None
        class TestPinotDBApiHook(PinotDbApiHook):
            def get_conn(self):
                return conn
            def get_connection(self, conn_id):
                return conn
        self.db_hook = TestPinotDBApiHook
    def test_get_uri(self):
        """
        Test on getting a pinot connection uri
        """
        db_hook = self.db_hook()
        self.assertEqual(db_hook.get_uri(), 'http://host:1000/query/sql')
    def test_get_conn(self):
        """
        Test on getting a pinot connection
        """
        conn = self.db_hook().get_conn()
        self.assertEqual(conn.host, 'host')
        self.assertEqual(conn.port, '1000')
        self.assertEqual(conn.conn_type, 'http')
        self.assertEqual(conn.extra_dejson.get('endpoint'), 'query/sql')
    def test_get_records(self):
        statement = 'SQL'
        result_sets = [('row1',), ('row2',)]
        self.cur.fetchall.return_value = result_sets
        self.assertEqual(result_sets, self.db_hook().get_records(statement))
    def test_get_first(self):
        statement = 'SQL'
        result_sets = [('row1',), ('row2',)]
        self.cur.fetchone.return_value = result_sets[0]
        self.assertEqual(result_sets[0], self.db_hook().get_first(statement))
    def test_get_pandas_df(self):
        statement = 'SQL'
        column = 'col'
        result_sets = [('row1',), ('row2',)]
        self.cur.description = [(column,)]
        self.cur.fetchall.return_value = result_sets
        df = self.db_hook().get_pandas_df(statement)
        self.assertEqual(column, df.columns[0])
        for i in range(len(result_sets)):  # pylint: disable=consider-using-enumerate
            self.assertEqual(result_sets[i][0], df.values.tolist()[i][0])
 | 
	apache-2.0 | 
| 
	rnowling/pop-gen-models | 
	single-pop/single_pop.py | 
	1 | 
	3379 | 
	import sys
import numpy as np
import numpy.random as npr
from sklearn.neighbors.kde import KernelDensity
from scipy.special import gammaln
import matplotlib.pyplot as plt
from calculate_phist import read_counts
from calculate_phist import normalize_haplotypes
def log_factorial(n):
	return gammaln(n+1)
def log_multinomial(xs, ps):
	n = np.sum(xs)
	log_prob = log_factorial(n) - np.sum(log_factorial(xs)) + np.sum(xs * np.log(ps + 0.0000000000001))
	return log_prob
class KDE_MCMC_Sampler(object):
	def __init__(self, observed_counts):
		"""
		Observed counts is 3D matrix of pop, locus, haplotype
		"""
		self.observed_counts = observed_counts
		self.individual_counts = observed_counts.sum(axis=2)
		self.observed_frequencies = normalize_haplotypes(observed_counts)
		self.n_loci, self.n_pop, self.n_haplotypes = self.observed_counts.shape
		
		# from bamova
		self.DWEIGHT = 1.0
		self.DADD = 0.00001
		self.SMALL_NUM = 0.0000000000001
		print "initializing frequencies"
		self.freq = np.zeros((self.n_loci, self.n_haplotypes))
		for l in xrange(self.n_loci):
			self.freq[l, :] = self.sample_locus_freq(self.observed_frequencies[l, 0, :])
	def sample_locus_freq(self, freq):
		alphas = self.DWEIGHT * freq + self.DADD + self.SMALL_NUM
		return npr.dirichlet(alphas)
	def locus_prob(self, locus_obs_counts, locus_freq):
		log_prob_sum = 0.0
		for p in xrange(self.n_pop):
			log_prob_sum += log_multinomial(locus_obs_counts[p], locus_freq)
		return log_prob_sum
	def step(self):
		total_log_prob = 0.0
		for l in xrange(self.n_loci):
			locus_indiv_counts = self.individual_counts[l, :]
			locus_obs_counts = self.observed_counts[l, :, :]
			log_prob = self.locus_prob(locus_obs_counts, self.freq[l, :])
			proposed_locus_freq = self.sample_locus_freq(self.freq[l, :])
			proposed_log_prob = self.locus_prob(locus_obs_counts, proposed_locus_freq)
				
			log_prob_ratio = proposed_log_prob - log_prob
			log_r = np.log(npr.random())
			if proposed_log_prob >= log_prob or log_r <= log_prob_ratio:
				self.freq[l, :] = proposed_locus_freq
				log_prob = proposed_log_prob
			total_log_prob += log_prob
		locus_prob = []
		for l in xrange(self.n_loci):
			log_prob = self.locus_prob(locus_obs_counts, self.freq[l, :])
			locus_prob.append(log_prob)
		return self.freq, total_log_prob, locus_prob
def plot_log_prob(flname, log_probs):
	plt.clf()
	plt.hold(True)
	plt.hist(log_probs, bins=30)
	plt.xlabel("Log Probability", fontsize=16)
	plt.xlim([min(log_probs), 0.0])
	plt.ylabel("Occurrences (Loci)", fontsize=16)
	plt.savefig(flname, DPI=200)
def simulate(occur_fl, n_steps, plot_flname, prob_flname):
	print "reading occurrences"
	observed_counts = read_counts(occur_fl)
	individual_counts = observed_counts.sum(axis=2)
	observed_frequencies = normalize_haplotypes(observed_counts)
	sampler = KDE_MCMC_Sampler(observed_counts)
	fl = open(prob_flname, "w")
	locus_log_prob = []
	for i in xrange(n_steps):
		freq, log_prob, locus_log_prob = sampler.step()
		print "step", i, "log prob", log_prob
		if i % 100 == 0:
			for j, prob in enumerate(locus_log_prob):
				fl.write("%s %s %s\n" % (i, j, prob))
	fl.close()
	plot_log_prob(plot_flname, locus_log_prob)
if __name__ == "__main__":
	occur_fl = sys.argv[1]
	n_steps = int(sys.argv[2])
	plot_flname = sys.argv[3]
	prob_flname = sys.argv[4]
	simulate(occur_fl, n_steps, plot_flname, prob_flname)
	 | 
	apache-2.0 | 
| 
	mljar/mljar-api-python | 
	tests/result_client_test.py | 
	1 | 
	4641 | 
	'''
ResultClient tests.
'''
import os
import unittest
import pandas as pd
import time
from mljar.client.project import ProjectClient
from mljar.client.dataset import DatasetClient
from mljar.client.experiment import ExperimentClient
from mljar.client.result import ResultClient
from mljar.exceptions import BadRequestException
from .project_based_test import ProjectBasedTest, get_postfix
class ResultClientTest(ProjectBasedTest):
    def setUp(self):
        proj_title = 'Test project-01'+get_postfix()
        proj_task = 'bin_class'
        self.expt_title = 'Test experiment-01'
        self.validation_kfolds = 5
        self.validation_shuffle = True
        self.validation_stratify = True
        self.validation_train_split = None
        self.algorithms = ['xgb']
        self.metric = 'logloss'
        self.tuning_mode = 'Normal'
        self.time_constraint = 1
        self.create_enseble = False
        # setup project
        self.project_client = ProjectClient()
        self.project = self.project_client.create_project(title = proj_title, task = proj_task)
        # load data
        df = pd.read_csv('tests/data/test_1.csv')
        cols = ['sepal length', 'sepal width', 'petal length', 'petal width']
        target = 'class'
        # add dataset
        self.dataset = DatasetClient(self.project.hid).add_dataset_if_not_exists(df[cols], df[target])
    def tearDown(self):
        # clean
        self.project_client.delete_project(self.project.hid)
    def test_get_results_for_wrong_project(self):
        with self.assertRaises(BadRequestException) as context:
            # init result client
            rc = ResultClient('wrong-hid')
            self.assertTrue(rc is not None)
            # get results - should raise exception
            rc.get_results()
    def test_get_results_for_project(self):
        # init result client
        rc = ResultClient(self.project.hid)
        self.assertNotEqual(rc, None)
        # get results - should be empty
        results = rc.get_results()
        self.assertEqual(results, [])
        # add experiment
        ec = ExperimentClient(self.project.hid)
        # create new experiment
        self.experiment = ec.add_experiment_if_not_exists(self.dataset, None, self.expt_title, self.project.task,
                                            self.validation_kfolds, self.validation_shuffle,
                                            self.validation_stratify, self.validation_train_split,
                                            self.algorithms, self.metric,
                                            self.tuning_mode, self.time_constraint, self.create_enseble)
        # wait some time till models are initialized
        time.sleep(60)
        # get results - should be some models there
        results = rc.get_results()
        self.assertNotEqual(len(results), 0)
    def test_get_results_for_experiment(self):
        # init result client
        rc = ResultClient(self.project.hid)
        self.assertNotEqual(rc, None)
        # get results - should be empty
        results = rc.get_results()
        self.assertEqual(results, [])
        # get results for wrong experiment hid
        results = rc.get_results('wrong-hid')
        self.assertEqual(results, [])
        # add experiment
        ec = ExperimentClient(self.project.hid)
        # create new experiment
        self.experiment = ec.add_experiment_if_not_exists(self.dataset, None, self.expt_title, self.project.task,
                                            self.validation_kfolds, self.validation_shuffle,
                                            self.validation_stratify, self.validation_train_split,
                                            self.algorithms, self.metric,
                                            self.tuning_mode, self.time_constraint, self.create_enseble)
        # wait some time till models are initialized
        time.sleep(60)
        # get results for experiment - should be some models there
        results = rc.get_results(self.experiment.hid)
        self.assertNotEqual(len(results), 0)
        # get results for project
        project_results = rc.get_results()
        self.assertNotEqual(results, [])
        # get results for wrong experiment hid
        # all results from project should be returned
        results_2 = rc.get_results('wrong-hid')
        self.assertEqual(len(project_results), len(results_2))
        for r in project_results:
            # test __str__ method
            self.assertTrue('id' in str(r))
            self.assertTrue('model' in str(r))
            self.assertTrue('status' in str(r))
 | 
	apache-2.0 | 
| 
	ephes/scikit-learn | 
	sklearn/feature_extraction/tests/test_text.py | 
	110 | 
	34127 | 
	from __future__ import unicode_literals
import warnings
from sklearn.feature_extraction.text import strip_tags
from sklearn.feature_extraction.text import strip_accents_unicode
from sklearn.feature_extraction.text import strip_accents_ascii
from sklearn.feature_extraction.text import HashingVectorizer
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.feature_extraction.text import ENGLISH_STOP_WORDS
from sklearn.cross_validation import train_test_split
from sklearn.cross_validation import cross_val_score
from sklearn.grid_search import GridSearchCV
from sklearn.pipeline import Pipeline
from sklearn.svm import LinearSVC
from sklearn.base import clone
import numpy as np
from nose import SkipTest
from nose.tools import assert_equal
from nose.tools import assert_false
from nose.tools import assert_not_equal
from nose.tools import assert_true
from nose.tools import assert_almost_equal
from numpy.testing import assert_array_almost_equal
from numpy.testing import assert_array_equal
from numpy.testing import assert_raises
from sklearn.utils.testing import (assert_in, assert_less, assert_greater,
                                   assert_warns_message, assert_raise_message,
                                   clean_warning_registry)
from collections import defaultdict, Mapping
from functools import partial
import pickle
from io import StringIO
JUNK_FOOD_DOCS = (
    "the pizza pizza beer copyright",
    "the pizza burger beer copyright",
    "the the pizza beer beer copyright",
    "the burger beer beer copyright",
    "the coke burger coke copyright",
    "the coke burger burger",
)
NOTJUNK_FOOD_DOCS = (
    "the salad celeri copyright",
    "the salad salad sparkling water copyright",
    "the the celeri celeri copyright",
    "the tomato tomato salad water",
    "the tomato salad water copyright",
)
ALL_FOOD_DOCS = JUNK_FOOD_DOCS + NOTJUNK_FOOD_DOCS
def uppercase(s):
    return strip_accents_unicode(s).upper()
def strip_eacute(s):
    return s.replace('\xe9', 'e')
def split_tokenize(s):
    return s.split()
def lazy_analyze(s):
    return ['the_ultimate_feature']
def test_strip_accents():
    # check some classical latin accentuated symbols
    a = '\xe0\xe1\xe2\xe3\xe4\xe5\xe7\xe8\xe9\xea\xeb'
    expected = 'aaaaaaceeee'
    assert_equal(strip_accents_unicode(a), expected)
    a = '\xec\xed\xee\xef\xf1\xf2\xf3\xf4\xf5\xf6\xf9\xfa\xfb\xfc\xfd'
    expected = 'iiiinooooouuuuy'
    assert_equal(strip_accents_unicode(a), expected)
    # check some arabic
    a = '\u0625'  # halef with a hamza below
    expected = '\u0627'  # simple halef
    assert_equal(strip_accents_unicode(a), expected)
    # mix letters accentuated and not
    a = "this is \xe0 test"
    expected = 'this is a test'
    assert_equal(strip_accents_unicode(a), expected)
def test_to_ascii():
    # check some classical latin accentuated symbols
    a = '\xe0\xe1\xe2\xe3\xe4\xe5\xe7\xe8\xe9\xea\xeb'
    expected = 'aaaaaaceeee'
    assert_equal(strip_accents_ascii(a), expected)
    a = '\xec\xed\xee\xef\xf1\xf2\xf3\xf4\xf5\xf6\xf9\xfa\xfb\xfc\xfd'
    expected = 'iiiinooooouuuuy'
    assert_equal(strip_accents_ascii(a), expected)
    # check some arabic
    a = '\u0625'  # halef with a hamza below
    expected = ''  # halef has no direct ascii match
    assert_equal(strip_accents_ascii(a), expected)
    # mix letters accentuated and not
    a = "this is \xe0 test"
    expected = 'this is a test'
    assert_equal(strip_accents_ascii(a), expected)
def test_word_analyzer_unigrams():
    for Vectorizer in (CountVectorizer, HashingVectorizer):
        wa = Vectorizer(strip_accents='ascii').build_analyzer()
        text = ("J'ai mang\xe9 du kangourou  ce midi, "
                "c'\xe9tait pas tr\xeas bon.")
        expected = ['ai', 'mange', 'du', 'kangourou', 'ce', 'midi',
                    'etait', 'pas', 'tres', 'bon']
        assert_equal(wa(text), expected)
        text = "This is a test, really.\n\n I met Harry yesterday."
        expected = ['this', 'is', 'test', 'really', 'met', 'harry',
                    'yesterday']
        assert_equal(wa(text), expected)
        wa = Vectorizer(input='file').build_analyzer()
        text = StringIO("This is a test with a file-like object!")
        expected = ['this', 'is', 'test', 'with', 'file', 'like',
                    'object']
        assert_equal(wa(text), expected)
        # with custom preprocessor
        wa = Vectorizer(preprocessor=uppercase).build_analyzer()
        text = ("J'ai mang\xe9 du kangourou  ce midi, "
                " c'\xe9tait pas tr\xeas bon.")
        expected = ['AI', 'MANGE', 'DU', 'KANGOUROU', 'CE', 'MIDI',
                    'ETAIT', 'PAS', 'TRES', 'BON']
        assert_equal(wa(text), expected)
        # with custom tokenizer
        wa = Vectorizer(tokenizer=split_tokenize,
                        strip_accents='ascii').build_analyzer()
        text = ("J'ai mang\xe9 du kangourou  ce midi, "
                "c'\xe9tait pas tr\xeas bon.")
        expected = ["j'ai", 'mange', 'du', 'kangourou', 'ce', 'midi,',
                    "c'etait", 'pas', 'tres', 'bon.']
        assert_equal(wa(text), expected)
def test_word_analyzer_unigrams_and_bigrams():
    wa = CountVectorizer(analyzer="word", strip_accents='unicode',
                         ngram_range=(1, 2)).build_analyzer()
    text = "J'ai mang\xe9 du kangourou  ce midi, c'\xe9tait pas tr\xeas bon."
    expected = ['ai', 'mange', 'du', 'kangourou', 'ce', 'midi',
                'etait', 'pas', 'tres', 'bon', 'ai mange', 'mange du',
                'du kangourou', 'kangourou ce', 'ce midi', 'midi etait',
                'etait pas', 'pas tres', 'tres bon']
    assert_equal(wa(text), expected)
def test_unicode_decode_error():
    # decode_error default to strict, so this should fail
    # First, encode (as bytes) a unicode string.
    text = "J'ai mang\xe9 du kangourou  ce midi, c'\xe9tait pas tr\xeas bon."
    text_bytes = text.encode('utf-8')
    # Then let the Analyzer try to decode it as ascii. It should fail,
    # because we have given it an incorrect encoding.
    wa = CountVectorizer(ngram_range=(1, 2), encoding='ascii').build_analyzer()
    assert_raises(UnicodeDecodeError, wa, text_bytes)
    ca = CountVectorizer(analyzer='char', ngram_range=(3, 6),
                         encoding='ascii').build_analyzer()
    assert_raises(UnicodeDecodeError, ca, text_bytes)
def test_char_ngram_analyzer():
    cnga = CountVectorizer(analyzer='char', strip_accents='unicode',
                           ngram_range=(3, 6)).build_analyzer()
    text = "J'ai mang\xe9 du kangourou  ce midi, c'\xe9tait pas tr\xeas bon"
    expected = ["j'a", "'ai", 'ai ', 'i m', ' ma']
    assert_equal(cnga(text)[:5], expected)
    expected = ['s tres', ' tres ', 'tres b', 'res bo', 'es bon']
    assert_equal(cnga(text)[-5:], expected)
    text = "This \n\tis a test, really.\n\n I met Harry yesterday"
    expected = ['thi', 'his', 'is ', 's i', ' is']
    assert_equal(cnga(text)[:5], expected)
    expected = [' yeste', 'yester', 'esterd', 'sterda', 'terday']
    assert_equal(cnga(text)[-5:], expected)
    cnga = CountVectorizer(input='file', analyzer='char',
                           ngram_range=(3, 6)).build_analyzer()
    text = StringIO("This is a test with a file-like object!")
    expected = ['thi', 'his', 'is ', 's i', ' is']
    assert_equal(cnga(text)[:5], expected)
def test_char_wb_ngram_analyzer():
    cnga = CountVectorizer(analyzer='char_wb', strip_accents='unicode',
                           ngram_range=(3, 6)).build_analyzer()
    text = "This \n\tis a test, really.\n\n I met Harry yesterday"
    expected = [' th', 'thi', 'his', 'is ', ' thi']
    assert_equal(cnga(text)[:5], expected)
    expected = ['yester', 'esterd', 'sterda', 'terday', 'erday ']
    assert_equal(cnga(text)[-5:], expected)
    cnga = CountVectorizer(input='file', analyzer='char_wb',
                           ngram_range=(3, 6)).build_analyzer()
    text = StringIO("A test with a file-like object!")
    expected = [' a ', ' te', 'tes', 'est', 'st ', ' tes']
    assert_equal(cnga(text)[:6], expected)
def test_countvectorizer_custom_vocabulary():
    vocab = {"pizza": 0, "beer": 1}
    terms = set(vocab.keys())
    # Try a few of the supported types.
    for typ in [dict, list, iter, partial(defaultdict, int)]:
        v = typ(vocab)
        vect = CountVectorizer(vocabulary=v)
        vect.fit(JUNK_FOOD_DOCS)
        if isinstance(v, Mapping):
            assert_equal(vect.vocabulary_, vocab)
        else:
            assert_equal(set(vect.vocabulary_), terms)
        X = vect.transform(JUNK_FOOD_DOCS)
        assert_equal(X.shape[1], len(terms))
def test_countvectorizer_custom_vocabulary_pipeline():
    what_we_like = ["pizza", "beer"]
    pipe = Pipeline([
        ('count', CountVectorizer(vocabulary=what_we_like)),
        ('tfidf', TfidfTransformer())])
    X = pipe.fit_transform(ALL_FOOD_DOCS)
    assert_equal(set(pipe.named_steps['count'].vocabulary_),
                 set(what_we_like))
    assert_equal(X.shape[1], len(what_we_like))
def test_countvectorizer_custom_vocabulary_repeated_indeces():
    vocab = {"pizza": 0, "beer": 0}
    try:
        CountVectorizer(vocabulary=vocab)
    except ValueError as e:
        assert_in("vocabulary contains repeated indices", str(e).lower())
def test_countvectorizer_custom_vocabulary_gap_index():
    vocab = {"pizza": 1, "beer": 2}
    try:
        CountVectorizer(vocabulary=vocab)
    except ValueError as e:
        assert_in("doesn't contain index", str(e).lower())
def test_countvectorizer_stop_words():
    cv = CountVectorizer()
    cv.set_params(stop_words='english')
    assert_equal(cv.get_stop_words(), ENGLISH_STOP_WORDS)
    cv.set_params(stop_words='_bad_str_stop_')
    assert_raises(ValueError, cv.get_stop_words)
    cv.set_params(stop_words='_bad_unicode_stop_')
    assert_raises(ValueError, cv.get_stop_words)
    stoplist = ['some', 'other', 'words']
    cv.set_params(stop_words=stoplist)
    assert_equal(cv.get_stop_words(), set(stoplist))
def test_countvectorizer_empty_vocabulary():
    try:
        vect = CountVectorizer(vocabulary=[])
        vect.fit(["foo"])
        assert False, "we shouldn't get here"
    except ValueError as e:
        assert_in("empty vocabulary", str(e).lower())
    try:
        v = CountVectorizer(max_df=1.0, stop_words="english")
        # fit on stopwords only
        v.fit(["to be or not to be", "and me too", "and so do you"])
        assert False, "we shouldn't get here"
    except ValueError as e:
        assert_in("empty vocabulary", str(e).lower())
def test_fit_countvectorizer_twice():
    cv = CountVectorizer()
    X1 = cv.fit_transform(ALL_FOOD_DOCS[:5])
    X2 = cv.fit_transform(ALL_FOOD_DOCS[5:])
    assert_not_equal(X1.shape[1], X2.shape[1])
def test_tf_idf_smoothing():
    X = [[1, 1, 1],
         [1, 1, 0],
         [1, 0, 0]]
    tr = TfidfTransformer(smooth_idf=True, norm='l2')
    tfidf = tr.fit_transform(X).toarray()
    assert_true((tfidf >= 0).all())
    # check normalization
    assert_array_almost_equal((tfidf ** 2).sum(axis=1), [1., 1., 1.])
    # this is robust to features with only zeros
    X = [[1, 1, 0],
         [1, 1, 0],
         [1, 0, 0]]
    tr = TfidfTransformer(smooth_idf=True, norm='l2')
    tfidf = tr.fit_transform(X).toarray()
    assert_true((tfidf >= 0).all())
def test_tfidf_no_smoothing():
    X = [[1, 1, 1],
         [1, 1, 0],
         [1, 0, 0]]
    tr = TfidfTransformer(smooth_idf=False, norm='l2')
    tfidf = tr.fit_transform(X).toarray()
    assert_true((tfidf >= 0).all())
    # check normalization
    assert_array_almost_equal((tfidf ** 2).sum(axis=1), [1., 1., 1.])
    # the lack of smoothing make IDF fragile in the presence of feature with
    # only zeros
    X = [[1, 1, 0],
         [1, 1, 0],
         [1, 0, 0]]
    tr = TfidfTransformer(smooth_idf=False, norm='l2')
    clean_warning_registry()
    with warnings.catch_warnings(record=True) as w:
        1. / np.array([0.])
        numpy_provides_div0_warning = len(w) == 1
    in_warning_message = 'divide by zero'
    tfidf = assert_warns_message(RuntimeWarning, in_warning_message,
                                 tr.fit_transform, X).toarray()
    if not numpy_provides_div0_warning:
        raise SkipTest("Numpy does not provide div 0 warnings.")
def test_sublinear_tf():
    X = [[1], [2], [3]]
    tr = TfidfTransformer(sublinear_tf=True, use_idf=False, norm=None)
    tfidf = tr.fit_transform(X).toarray()
    assert_equal(tfidf[0], 1)
    assert_greater(tfidf[1], tfidf[0])
    assert_greater(tfidf[2], tfidf[1])
    assert_less(tfidf[1], 2)
    assert_less(tfidf[2], 3)
def test_vectorizer():
    # raw documents as an iterator
    train_data = iter(ALL_FOOD_DOCS[:-1])
    test_data = [ALL_FOOD_DOCS[-1]]
    n_train = len(ALL_FOOD_DOCS) - 1
    # test without vocabulary
    v1 = CountVectorizer(max_df=0.5)
    counts_train = v1.fit_transform(train_data)
    if hasattr(counts_train, 'tocsr'):
        counts_train = counts_train.tocsr()
    assert_equal(counts_train[0, v1.vocabulary_["pizza"]], 2)
    # build a vectorizer v1 with the same vocabulary as the one fitted by v1
    v2 = CountVectorizer(vocabulary=v1.vocabulary_)
    # compare that the two vectorizer give the same output on the test sample
    for v in (v1, v2):
        counts_test = v.transform(test_data)
        if hasattr(counts_test, 'tocsr'):
            counts_test = counts_test.tocsr()
        vocabulary = v.vocabulary_
        assert_equal(counts_test[0, vocabulary["salad"]], 1)
        assert_equal(counts_test[0, vocabulary["tomato"]], 1)
        assert_equal(counts_test[0, vocabulary["water"]], 1)
        # stop word from the fixed list
        assert_false("the" in vocabulary)
        # stop word found automatically by the vectorizer DF thresholding
        # words that are high frequent across the complete corpus are likely
        # to be not informative (either real stop words of extraction
        # artifacts)
        assert_false("copyright" in vocabulary)
        # not present in the sample
        assert_equal(counts_test[0, vocabulary["coke"]], 0)
        assert_equal(counts_test[0, vocabulary["burger"]], 0)
        assert_equal(counts_test[0, vocabulary["beer"]], 0)
        assert_equal(counts_test[0, vocabulary["pizza"]], 0)
    # test tf-idf
    t1 = TfidfTransformer(norm='l1')
    tfidf = t1.fit(counts_train).transform(counts_train).toarray()
    assert_equal(len(t1.idf_), len(v1.vocabulary_))
    assert_equal(tfidf.shape, (n_train, len(v1.vocabulary_)))
    # test tf-idf with new data
    tfidf_test = t1.transform(counts_test).toarray()
    assert_equal(tfidf_test.shape, (len(test_data), len(v1.vocabulary_)))
    # test tf alone
    t2 = TfidfTransformer(norm='l1', use_idf=False)
    tf = t2.fit(counts_train).transform(counts_train).toarray()
    assert_equal(t2.idf_, None)
    # test idf transform with unlearned idf vector
    t3 = TfidfTransformer(use_idf=True)
    assert_raises(ValueError, t3.transform, counts_train)
    # test idf transform with incompatible n_features
    X = [[1, 1, 5],
         [1, 1, 0]]
    t3.fit(X)
    X_incompt = [[1, 3],
                 [1, 3]]
    assert_raises(ValueError, t3.transform, X_incompt)
    # L1-normalized term frequencies sum to one
    assert_array_almost_equal(np.sum(tf, axis=1), [1.0] * n_train)
    # test the direct tfidf vectorizer
    # (equivalent to term count vectorizer + tfidf transformer)
    train_data = iter(ALL_FOOD_DOCS[:-1])
    tv = TfidfVectorizer(norm='l1')
    tv.max_df = v1.max_df
    tfidf2 = tv.fit_transform(train_data).toarray()
    assert_false(tv.fixed_vocabulary_)
    assert_array_almost_equal(tfidf, tfidf2)
    # test the direct tfidf vectorizer with new data
    tfidf_test2 = tv.transform(test_data).toarray()
    assert_array_almost_equal(tfidf_test, tfidf_test2)
    # test transform on unfitted vectorizer with empty vocabulary
    v3 = CountVectorizer(vocabulary=None)
    assert_raises(ValueError, v3.transform, train_data)
    # ascii preprocessor?
    v3.set_params(strip_accents='ascii', lowercase=False)
    assert_equal(v3.build_preprocessor(), strip_accents_ascii)
    # error on bad strip_accents param
    v3.set_params(strip_accents='_gabbledegook_', preprocessor=None)
    assert_raises(ValueError, v3.build_preprocessor)
    # error with bad analyzer type
    v3.set_params = '_invalid_analyzer_type_'
    assert_raises(ValueError, v3.build_analyzer)
def test_tfidf_vectorizer_setters():
    tv = TfidfVectorizer(norm='l2', use_idf=False, smooth_idf=False,
                         sublinear_tf=False)
    tv.norm = 'l1'
    assert_equal(tv._tfidf.norm, 'l1')
    tv.use_idf = True
    assert_true(tv._tfidf.use_idf)
    tv.smooth_idf = True
    assert_true(tv._tfidf.smooth_idf)
    tv.sublinear_tf = True
    assert_true(tv._tfidf.sublinear_tf)
def test_hashing_vectorizer():
    v = HashingVectorizer()
    X = v.transform(ALL_FOOD_DOCS)
    token_nnz = X.nnz
    assert_equal(X.shape, (len(ALL_FOOD_DOCS), v.n_features))
    assert_equal(X.dtype, v.dtype)
    # By default the hashed values receive a random sign and l2 normalization
    # makes the feature values bounded
    assert_true(np.min(X.data) > -1)
    assert_true(np.min(X.data) < 0)
    assert_true(np.max(X.data) > 0)
    assert_true(np.max(X.data) < 1)
    # Check that the rows are normalized
    for i in range(X.shape[0]):
        assert_almost_equal(np.linalg.norm(X[0].data, 2), 1.0)
    # Check vectorization with some non-default parameters
    v = HashingVectorizer(ngram_range=(1, 2), non_negative=True, norm='l1')
    X = v.transform(ALL_FOOD_DOCS)
    assert_equal(X.shape, (len(ALL_FOOD_DOCS), v.n_features))
    assert_equal(X.dtype, v.dtype)
    # ngrams generate more non zeros
    ngrams_nnz = X.nnz
    assert_true(ngrams_nnz > token_nnz)
    assert_true(ngrams_nnz < 2 * token_nnz)
    # makes the feature values bounded
    assert_true(np.min(X.data) > 0)
    assert_true(np.max(X.data) < 1)
    # Check that the rows are normalized
    for i in range(X.shape[0]):
        assert_almost_equal(np.linalg.norm(X[0].data, 1), 1.0)
def test_feature_names():
    cv = CountVectorizer(max_df=0.5)
    # test for Value error on unfitted/empty vocabulary
    assert_raises(ValueError, cv.get_feature_names)
    X = cv.fit_transform(ALL_FOOD_DOCS)
    n_samples, n_features = X.shape
    assert_equal(len(cv.vocabulary_), n_features)
    feature_names = cv.get_feature_names()
    assert_equal(len(feature_names), n_features)
    assert_array_equal(['beer', 'burger', 'celeri', 'coke', 'pizza',
                        'salad', 'sparkling', 'tomato', 'water'],
                       feature_names)
    for idx, name in enumerate(feature_names):
        assert_equal(idx, cv.vocabulary_.get(name))
def test_vectorizer_max_features():
    vec_factories = (
        CountVectorizer,
        TfidfVectorizer,
    )
    expected_vocabulary = set(['burger', 'beer', 'salad', 'pizza'])
    expected_stop_words = set([u'celeri', u'tomato', u'copyright', u'coke',
                               u'sparkling', u'water', u'the'])
    for vec_factory in vec_factories:
        # test bounded number of extracted features
        vectorizer = vec_factory(max_df=0.6, max_features=4)
        vectorizer.fit(ALL_FOOD_DOCS)
        assert_equal(set(vectorizer.vocabulary_), expected_vocabulary)
        assert_equal(vectorizer.stop_words_, expected_stop_words)
def test_count_vectorizer_max_features():
    # Regression test: max_features didn't work correctly in 0.14.
    cv_1 = CountVectorizer(max_features=1)
    cv_3 = CountVectorizer(max_features=3)
    cv_None = CountVectorizer(max_features=None)
    counts_1 = cv_1.fit_transform(JUNK_FOOD_DOCS).sum(axis=0)
    counts_3 = cv_3.fit_transform(JUNK_FOOD_DOCS).sum(axis=0)
    counts_None = cv_None.fit_transform(JUNK_FOOD_DOCS).sum(axis=0)
    features_1 = cv_1.get_feature_names()
    features_3 = cv_3.get_feature_names()
    features_None = cv_None.get_feature_names()
    # The most common feature is "the", with frequency 7.
    assert_equal(7, counts_1.max())
    assert_equal(7, counts_3.max())
    assert_equal(7, counts_None.max())
    # The most common feature should be the same
    assert_equal("the", features_1[np.argmax(counts_1)])
    assert_equal("the", features_3[np.argmax(counts_3)])
    assert_equal("the", features_None[np.argmax(counts_None)])
def test_vectorizer_max_df():
    test_data = ['abc', 'dea', 'eat']
    vect = CountVectorizer(analyzer='char', max_df=1.0)
    vect.fit(test_data)
    assert_true('a' in vect.vocabulary_.keys())
    assert_equal(len(vect.vocabulary_.keys()), 6)
    assert_equal(len(vect.stop_words_), 0)
    vect.max_df = 0.5  # 0.5 * 3 documents -> max_doc_count == 1.5
    vect.fit(test_data)
    assert_true('a' not in vect.vocabulary_.keys())  # {ae} ignored
    assert_equal(len(vect.vocabulary_.keys()), 4)    # {bcdt} remain
    assert_true('a' in vect.stop_words_)
    assert_equal(len(vect.stop_words_), 2)
    vect.max_df = 1
    vect.fit(test_data)
    assert_true('a' not in vect.vocabulary_.keys())  # {ae} ignored
    assert_equal(len(vect.vocabulary_.keys()), 4)    # {bcdt} remain
    assert_true('a' in vect.stop_words_)
    assert_equal(len(vect.stop_words_), 2)
def test_vectorizer_min_df():
    test_data = ['abc', 'dea', 'eat']
    vect = CountVectorizer(analyzer='char', min_df=1)
    vect.fit(test_data)
    assert_true('a' in vect.vocabulary_.keys())
    assert_equal(len(vect.vocabulary_.keys()), 6)
    assert_equal(len(vect.stop_words_), 0)
    vect.min_df = 2
    vect.fit(test_data)
    assert_true('c' not in vect.vocabulary_.keys())  # {bcdt} ignored
    assert_equal(len(vect.vocabulary_.keys()), 2)    # {ae} remain
    assert_true('c' in vect.stop_words_)
    assert_equal(len(vect.stop_words_), 4)
    vect.min_df = 0.8  # 0.8 * 3 documents -> min_doc_count == 2.4
    vect.fit(test_data)
    assert_true('c' not in vect.vocabulary_.keys())  # {bcdet} ignored
    assert_equal(len(vect.vocabulary_.keys()), 1)    # {a} remains
    assert_true('c' in vect.stop_words_)
    assert_equal(len(vect.stop_words_), 5)
def test_count_binary_occurrences():
    # by default multiple occurrences are counted as longs
    test_data = ['aaabc', 'abbde']
    vect = CountVectorizer(analyzer='char', max_df=1.0)
    X = vect.fit_transform(test_data).toarray()
    assert_array_equal(['a', 'b', 'c', 'd', 'e'], vect.get_feature_names())
    assert_array_equal([[3, 1, 1, 0, 0],
                        [1, 2, 0, 1, 1]], X)
    # using boolean features, we can fetch the binary occurrence info
    # instead.
    vect = CountVectorizer(analyzer='char', max_df=1.0, binary=True)
    X = vect.fit_transform(test_data).toarray()
    assert_array_equal([[1, 1, 1, 0, 0],
                        [1, 1, 0, 1, 1]], X)
    # check the ability to change the dtype
    vect = CountVectorizer(analyzer='char', max_df=1.0,
                           binary=True, dtype=np.float32)
    X_sparse = vect.fit_transform(test_data)
    assert_equal(X_sparse.dtype, np.float32)
def test_hashed_binary_occurrences():
    # by default multiple occurrences are counted as longs
    test_data = ['aaabc', 'abbde']
    vect = HashingVectorizer(analyzer='char', non_negative=True,
                             norm=None)
    X = vect.transform(test_data)
    assert_equal(np.max(X[0:1].data), 3)
    assert_equal(np.max(X[1:2].data), 2)
    assert_equal(X.dtype, np.float64)
    # using boolean features, we can fetch the binary occurrence info
    # instead.
    vect = HashingVectorizer(analyzer='char', non_negative=True, binary=True,
                             norm=None)
    X = vect.transform(test_data)
    assert_equal(np.max(X.data), 1)
    assert_equal(X.dtype, np.float64)
    # check the ability to change the dtype
    vect = HashingVectorizer(analyzer='char', non_negative=True, binary=True,
                             norm=None, dtype=np.float64)
    X = vect.transform(test_data)
    assert_equal(X.dtype, np.float64)
def test_vectorizer_inverse_transform():
    # raw documents
    data = ALL_FOOD_DOCS
    for vectorizer in (TfidfVectorizer(), CountVectorizer()):
        transformed_data = vectorizer.fit_transform(data)
        inversed_data = vectorizer.inverse_transform(transformed_data)
        analyze = vectorizer.build_analyzer()
        for doc, inversed_terms in zip(data, inversed_data):
            terms = np.sort(np.unique(analyze(doc)))
            inversed_terms = np.sort(np.unique(inversed_terms))
            assert_array_equal(terms, inversed_terms)
        # Test that inverse_transform also works with numpy arrays
        transformed_data = transformed_data.toarray()
        inversed_data2 = vectorizer.inverse_transform(transformed_data)
        for terms, terms2 in zip(inversed_data, inversed_data2):
            assert_array_equal(np.sort(terms), np.sort(terms2))
def test_count_vectorizer_pipeline_grid_selection():
    # raw documents
    data = JUNK_FOOD_DOCS + NOTJUNK_FOOD_DOCS
    # label junk food as -1, the others as +1
    target = [-1] * len(JUNK_FOOD_DOCS) + [1] * len(NOTJUNK_FOOD_DOCS)
    # split the dataset for model development and final evaluation
    train_data, test_data, target_train, target_test = train_test_split(
        data, target, test_size=.2, random_state=0)
    pipeline = Pipeline([('vect', CountVectorizer()),
                         ('svc', LinearSVC())])
    parameters = {
        'vect__ngram_range': [(1, 1), (1, 2)],
        'svc__loss': ('hinge', 'squared_hinge')
    }
    # find the best parameters for both the feature extraction and the
    # classifier
    grid_search = GridSearchCV(pipeline, parameters, n_jobs=1)
    # Check that the best model found by grid search is 100% correct on the
    # held out evaluation set.
    pred = grid_search.fit(train_data, target_train).predict(test_data)
    assert_array_equal(pred, target_test)
    # on this toy dataset bigram representation which is used in the last of
    # the grid_search is considered the best estimator since they all converge
    # to 100% accuracy models
    assert_equal(grid_search.best_score_, 1.0)
    best_vectorizer = grid_search.best_estimator_.named_steps['vect']
    assert_equal(best_vectorizer.ngram_range, (1, 1))
def test_vectorizer_pipeline_grid_selection():
    # raw documents
    data = JUNK_FOOD_DOCS + NOTJUNK_FOOD_DOCS
    # label junk food as -1, the others as +1
    target = [-1] * len(JUNK_FOOD_DOCS) + [1] * len(NOTJUNK_FOOD_DOCS)
    # split the dataset for model development and final evaluation
    train_data, test_data, target_train, target_test = train_test_split(
        data, target, test_size=.1, random_state=0)
    pipeline = Pipeline([('vect', TfidfVectorizer()),
                         ('svc', LinearSVC())])
    parameters = {
        'vect__ngram_range': [(1, 1), (1, 2)],
        'vect__norm': ('l1', 'l2'),
        'svc__loss': ('hinge', 'squared_hinge'),
    }
    # find the best parameters for both the feature extraction and the
    # classifier
    grid_search = GridSearchCV(pipeline, parameters, n_jobs=1)
    # Check that the best model found by grid search is 100% correct on the
    # held out evaluation set.
    pred = grid_search.fit(train_data, target_train).predict(test_data)
    assert_array_equal(pred, target_test)
    # on this toy dataset bigram representation which is used in the last of
    # the grid_search is considered the best estimator since they all converge
    # to 100% accuracy models
    assert_equal(grid_search.best_score_, 1.0)
    best_vectorizer = grid_search.best_estimator_.named_steps['vect']
    assert_equal(best_vectorizer.ngram_range, (1, 1))
    assert_equal(best_vectorizer.norm, 'l2')
    assert_false(best_vectorizer.fixed_vocabulary_)
def test_vectorizer_pipeline_cross_validation():
    # raw documents
    data = JUNK_FOOD_DOCS + NOTJUNK_FOOD_DOCS
    # label junk food as -1, the others as +1
    target = [-1] * len(JUNK_FOOD_DOCS) + [1] * len(NOTJUNK_FOOD_DOCS)
    pipeline = Pipeline([('vect', TfidfVectorizer()),
                         ('svc', LinearSVC())])
    cv_scores = cross_val_score(pipeline, data, target, cv=3)
    assert_array_equal(cv_scores, [1., 1., 1.])
def test_vectorizer_unicode():
    # tests that the count vectorizer works with cyrillic.
    document = (
        "\xd0\x9c\xd0\xb0\xd1\x88\xd0\xb8\xd0\xbd\xd0\xbd\xd0\xbe\xd0"
        "\xb5 \xd0\xbe\xd0\xb1\xd1\x83\xd1\x87\xd0\xb5\xd0\xbd\xd0\xb8\xd0"
        "\xb5 \xe2\x80\x94 \xd0\xbe\xd0\xb1\xd1\x88\xd0\xb8\xd1\x80\xd0\xbd"
        "\xd1\x8b\xd0\xb9 \xd0\xbf\xd0\xbe\xd0\xb4\xd1\x80\xd0\xb0\xd0\xb7"
        "\xd0\xb4\xd0\xb5\xd0\xbb \xd0\xb8\xd1\x81\xd0\xba\xd1\x83\xd1\x81"
        "\xd1\x81\xd1\x82\xd0\xb2\xd0\xb5\xd0\xbd\xd0\xbd\xd0\xbe\xd0\xb3"
        "\xd0\xbe \xd0\xb8\xd0\xbd\xd1\x82\xd0\xb5\xd0\xbb\xd0\xbb\xd0"
        "\xb5\xd0\xba\xd1\x82\xd0\xb0, \xd0\xb8\xd0\xb7\xd1\x83\xd1\x87"
        "\xd0\xb0\xd1\x8e\xd1\x89\xd0\xb8\xd0\xb9 \xd0\xbc\xd0\xb5\xd1\x82"
        "\xd0\xbe\xd0\xb4\xd1\x8b \xd0\xbf\xd0\xbe\xd1\x81\xd1\x82\xd1\x80"
        "\xd0\xbe\xd0\xb5\xd0\xbd\xd0\xb8\xd1\x8f \xd0\xb0\xd0\xbb\xd0\xb3"
        "\xd0\xbe\xd1\x80\xd0\xb8\xd1\x82\xd0\xbc\xd0\xbe\xd0\xb2, \xd1\x81"
        "\xd0\xbf\xd0\xbe\xd1\x81\xd0\xbe\xd0\xb1\xd0\xbd\xd1\x8b\xd1\x85 "
        "\xd0\xbe\xd0\xb1\xd1\x83\xd1\x87\xd0\xb0\xd1\x82\xd1\x8c\xd1\x81\xd1"
        "\x8f.")
    vect = CountVectorizer()
    X_counted = vect.fit_transform([document])
    assert_equal(X_counted.shape, (1, 15))
    vect = HashingVectorizer(norm=None, non_negative=True)
    X_hashed = vect.transform([document])
    assert_equal(X_hashed.shape, (1, 2 ** 20))
    # No collisions on such a small dataset
    assert_equal(X_counted.nnz, X_hashed.nnz)
    # When norm is None and non_negative, the tokens are counted up to
    # collisions
    assert_array_equal(np.sort(X_counted.data), np.sort(X_hashed.data))
def test_tfidf_vectorizer_with_fixed_vocabulary():
    # non regression smoke test for inheritance issues
    vocabulary = ['pizza', 'celeri']
    vect = TfidfVectorizer(vocabulary=vocabulary)
    X_1 = vect.fit_transform(ALL_FOOD_DOCS)
    X_2 = vect.transform(ALL_FOOD_DOCS)
    assert_array_almost_equal(X_1.toarray(), X_2.toarray())
    assert_true(vect.fixed_vocabulary_)
def test_pickling_vectorizer():
    instances = [
        HashingVectorizer(),
        HashingVectorizer(norm='l1'),
        HashingVectorizer(binary=True),
        HashingVectorizer(ngram_range=(1, 2)),
        CountVectorizer(),
        CountVectorizer(preprocessor=strip_tags),
        CountVectorizer(analyzer=lazy_analyze),
        CountVectorizer(preprocessor=strip_tags).fit(JUNK_FOOD_DOCS),
        CountVectorizer(strip_accents=strip_eacute).fit(JUNK_FOOD_DOCS),
        TfidfVectorizer(),
        TfidfVectorizer(analyzer=lazy_analyze),
        TfidfVectorizer().fit(JUNK_FOOD_DOCS),
    ]
    for orig in instances:
        s = pickle.dumps(orig)
        copy = pickle.loads(s)
        assert_equal(type(copy), orig.__class__)
        assert_equal(copy.get_params(), orig.get_params())
        assert_array_equal(
            copy.fit_transform(JUNK_FOOD_DOCS).toarray(),
            orig.fit_transform(JUNK_FOOD_DOCS).toarray())
def test_stop_words_removal():
    # Ensure that deleting the stop_words_ attribute doesn't affect transform
    fitted_vectorizers = (
        TfidfVectorizer().fit(JUNK_FOOD_DOCS),
        CountVectorizer(preprocessor=strip_tags).fit(JUNK_FOOD_DOCS),
        CountVectorizer(strip_accents=strip_eacute).fit(JUNK_FOOD_DOCS)
    )
    for vect in fitted_vectorizers:
        vect_transform = vect.transform(JUNK_FOOD_DOCS).toarray()
        vect.stop_words_ = None
        stop_None_transform = vect.transform(JUNK_FOOD_DOCS).toarray()
        delattr(vect, 'stop_words_')
        stop_del_transform = vect.transform(JUNK_FOOD_DOCS).toarray()
        assert_array_equal(stop_None_transform, vect_transform)
        assert_array_equal(stop_del_transform, vect_transform)
def test_pickling_transformer():
    X = CountVectorizer().fit_transform(JUNK_FOOD_DOCS)
    orig = TfidfTransformer().fit(X)
    s = pickle.dumps(orig)
    copy = pickle.loads(s)
    assert_equal(type(copy), orig.__class__)
    assert_array_equal(
        copy.fit_transform(X).toarray(),
        orig.fit_transform(X).toarray())
def test_non_unique_vocab():
    vocab = ['a', 'b', 'c', 'a', 'a']
    vect = CountVectorizer(vocabulary=vocab)
    assert_raises(ValueError, vect.fit, [])
def test_hashingvectorizer_nan_in_docs():
    # np.nan can appear when using pandas to load text fields from a csv file
    # with missing values.
    message = "np.nan is an invalid document, expected byte or unicode string."
    exception = ValueError
    def func():
        hv = HashingVectorizer()
        hv.fit_transform(['hello world', np.nan, 'hello hello'])
    assert_raise_message(exception, message, func)
def test_tfidfvectorizer_binary():
    # Non-regression test: TfidfVectorizer used to ignore its "binary" param.
    v = TfidfVectorizer(binary=True, use_idf=False, norm=None)
    assert_true(v.binary)
    X = v.fit_transform(['hello world', 'hello hello']).toarray()
    assert_array_equal(X.ravel(), [1, 1, 1, 0])
    X2 = v.transform(['hello world', 'hello hello']).toarray()
    assert_array_equal(X2.ravel(), [1, 1, 1, 0])
def test_tfidfvectorizer_export_idf():
    vect = TfidfVectorizer(use_idf=True)
    vect.fit(JUNK_FOOD_DOCS)
    assert_array_almost_equal(vect.idf_, vect._tfidf.idf_)
def test_vectorizer_vocab_clone():
    vect_vocab = TfidfVectorizer(vocabulary=["the"])
    vect_vocab_clone = clone(vect_vocab)
    vect_vocab.fit(ALL_FOOD_DOCS)
    vect_vocab_clone.fit(ALL_FOOD_DOCS)
    assert_equal(vect_vocab_clone.vocabulary_, vect_vocab.vocabulary_)
 | 
	bsd-3-clause | 
| 
	IssamLaradji/scikit-learn | 
	sklearn/qda.py | 
	15 | 
	7139 | 
	"""
Quadratic Discriminant Analysis
"""
# Author: Matthieu Perrot <[email protected]>
#
# License: BSD 3 clause
import warnings
import numpy as np
from .base import BaseEstimator, ClassifierMixin
from .externals.six.moves import xrange
from .utils import check_array, check_X_y
__all__ = ['QDA']
class QDA(BaseEstimator, ClassifierMixin):
    """
    Quadratic Discriminant Analysis (QDA)
    A classifier with a quadratic decision boundary, generated
    by fitting class conditional densities to the data
    and using Bayes' rule.
    The model fits a Gaussian density to each class.
    Parameters
    ----------
    priors : array, optional, shape = [n_classes]
        Priors on classes
    reg_param : float, optional
        Regularizes the covariance estimate as
        ``(1-reg_param)*Sigma + reg_param*np.eye(n_features)``
    Attributes
    ----------
    covariances_ : list of array-like, shape = [n_features, n_features]
        Covariance matrices of each class.
    means_ : array-like, shape = [n_classes, n_features]
        Class means.
    priors_ : array-like, shape = [n_classes]
        Class priors (sum to 1).
    rotations_ : list of arrays
        For each class an array of shape [n_samples, n_samples], the
        rotation of the Gaussian distribution, i.e. its principal axis.
    scalings_ : array-like, shape = [n_classes, n_features]
        Contains the scaling of the Gaussian
        distributions along the principal axes for each
        class, i.e. the variance in the rotated coordinate system.
    Examples
    --------
    >>> from sklearn.qda import QDA
    >>> import numpy as np
    >>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
    >>> y = np.array([1, 1, 1, 2, 2, 2])
    >>> clf = QDA()
    >>> clf.fit(X, y)
    QDA(priors=None, reg_param=0.0)
    >>> print(clf.predict([[-0.8, -1]]))
    [1]
    See also
    --------
    sklearn.lda.LDA: Linear discriminant analysis
    """
    def __init__(self, priors=None, reg_param=0.):
        self.priors = np.asarray(priors) if priors is not None else None
        self.reg_param = reg_param
    def fit(self, X, y, store_covariances=False, tol=1.0e-4):
        """
        Fit the QDA model according to the given training data and parameters.
        Parameters
        ----------
        X : array-like, shape = [n_samples, n_features]
            Training vector, where n_samples in the number of samples and
            n_features is the number of features.
        y : array, shape = [n_samples]
            Target values (integers)
        store_covariances : boolean
            If True the covariance matrices are computed and stored in the
            `self.covariances_` attribute.
        """
        X, y = check_X_y(X, y)
        self.classes_, y = np.unique(y, return_inverse=True)
        n_samples, n_features = X.shape
        n_classes = len(self.classes_)
        if n_classes < 2:
            raise ValueError('y has less than 2 classes')
        if self.priors is None:
            self.priors_ = np.bincount(y) / float(n_samples)
        else:
            self.priors_ = self.priors
        cov = None
        if store_covariances:
            cov = []
        means = []
        scalings = []
        rotations = []
        for ind in xrange(n_classes):
            Xg = X[y == ind, :]
            meang = Xg.mean(0)
            means.append(meang)
            Xgc = Xg - meang
            # Xgc = U * S * V.T
            U, S, Vt = np.linalg.svd(Xgc, full_matrices=False)
            rank = np.sum(S > tol)
            if rank < n_features:
                warnings.warn("Variables are collinear")
            S2 = (S ** 2) / (len(Xg) - 1)
            S2 = ((1 - self.reg_param) * S2) + self.reg_param
            if store_covariances:
                # cov = V * (S^2 / (n-1)) * V.T
                cov.append(np.dot(S2 * Vt.T, Vt))
            scalings.append(S2)
            rotations.append(Vt.T)
        if store_covariances:
            self.covariances_ = cov
        self.means_ = np.asarray(means)
        self.scalings_ = np.asarray(scalings)
        self.rotations_ = rotations
        return self
    def _decision_function(self, X):
        X = check_array(X)
        norm2 = []
        for i in range(len(self.classes_)):
            R = self.rotations_[i]
            S = self.scalings_[i]
            Xm = X - self.means_[i]
            X2 = np.dot(Xm, R * (S ** (-0.5)))
            norm2.append(np.sum(X2 ** 2, 1))
        norm2 = np.array(norm2).T   # shape = [len(X), n_classes]
        return (-0.5 * (norm2 + np.sum(np.log(self.scalings_), 1))
                + np.log(self.priors_))
    def decision_function(self, X):
        """Apply decision function to an array of samples.
        Parameters
        ----------
        X : array-like, shape = [n_samples, n_features]
            Array of samples (test vectors).
        Returns
        -------
        C : array, shape = [n_samples, n_classes] or [n_samples,]
            Decision function values related to each class, per sample.
            In the two-class case, the shape is [n_samples,], giving the
            log likelihood ratio of the positive class.
        """
        dec_func = self._decision_function(X)
        # handle special case of two classes
        if len(self.classes_) == 2:
            return dec_func[:, 1] - dec_func[:, 0]
        return dec_func
    def predict(self, X):
        """Perform classification on an array of test vectors X.
        The predicted class C for each sample in X is returned.
        Parameters
        ----------
        X : array-like, shape = [n_samples, n_features]
        Returns
        -------
        C : array, shape = [n_samples]
        """
        d = self._decision_function(X)
        y_pred = self.classes_.take(d.argmax(1))
        return y_pred
    def predict_proba(self, X):
        """Return posterior probabilities of classification.
        Parameters
        ----------
        X : array-like, shape = [n_samples, n_features]
            Array of samples/test vectors.
        Returns
        -------
        C : array, shape = [n_samples, n_classes]
            Posterior probabilities of classification per class.
        """
        values = self._decision_function(X)
        # compute the likelihood of the underlying gaussian models
        # up to a multiplicative constant.
        likelihood = np.exp(values - values.max(axis=1)[:, np.newaxis])
        # compute posterior probabilities
        return likelihood / likelihood.sum(axis=1)[:, np.newaxis]
    def predict_log_proba(self, X):
        """Return posterior probabilities of classification.
        Parameters
        ----------
        X : array-like, shape = [n_samples, n_features]
            Array of samples/test vectors.
        Returns
        -------
        C : array, shape = [n_samples, n_classes]
            Posterior log-probabilities of classification per class.
        """
        # XXX : can do better to avoid precision overflows
        probas_ = self.predict_proba(X)
        return np.log(probas_)
 | 
	bsd-3-clause | 
| 
	elvandy/nltools | 
	nltools/data/adjacency.py | 
	1 | 
	34227 | 
	from __future__ import division
'''
This data class is for working with similarity/dissimilarity matrices
'''
__author__ = ["Luke Chang"]
__license__ = "MIT"
import os
import pandas as pd
import numpy as np
import six
from copy import deepcopy
from sklearn.metrics.pairwise import pairwise_distances
from sklearn.manifold import MDS
from sklearn.utils import check_random_state
from scipy.spatial.distance import squareform
from scipy.stats import ttest_1samp
import seaborn as sns
import matplotlib.pyplot as plt
from nltools.stats import (correlation_permutation,
                           one_sample_permutation,
                           two_sample_permutation,
                           summarize_bootstrap,
                           matrix_permutation)
from nltools.stats import regress as regression
from nltools.plotting import (plot_stacked_adjacency,
                              plot_silhouette)
from nltools.utils import (all_same,
                           attempt_to_import,
                           concatenate,
                           _bootstrap_apply_func)
from .design_matrix import Design_Matrix
from joblib import Parallel, delayed
# Optional dependencies
nx = attempt_to_import('networkx', 'nx')
MAX_INT = np.iinfo(np.int32).max
class Adjacency(object):
    '''
    Adjacency is a class to represent Adjacency matrices as a vector rather
    than a 2-dimensional matrix. This makes it easier to perform data
    manipulation and analyses.
    Args:
        data: pandas data instance or list of files
        matrix_type: (str) type of matrix.  Possible values include:
                    ['distance','similarity','directed','distance_flat',
                    'similarity_flat','directed_flat']
        Y: Pandas DataFrame of training labels
        **kwargs: Additional keyword arguments
    '''
    def __init__(self, data=None, Y=None, matrix_type=None, labels=None,
                 **kwargs):
        if matrix_type is not None:
            if matrix_type.lower() not in ['distance','similarity','directed',
                                            'distance_flat','similarity_flat',
                                            'directed_flat']:
                raise ValueError("matrix_type must be [None,'distance', "
                                "'similarity','directed','distance_flat', "
                                "'similarity_flat','directed_flat']")
        if data is None:
            self.data = np.array([])
            self.matrix_type = 'empty'
            self.is_single_matrix = np.nan
            self.issymmetric = np.nan
        elif isinstance(data, list):
            if isinstance(data[0], Adjacency):
                tmp = concatenate(data)
                for item in ['data', 'matrix_type', 'Y','issymmetric']:
                    setattr(self, item, getattr(tmp,item))
            else:
                d_all = []; symmetric_all = []; matrix_type_all = []
                for d in data:
                    data_tmp, issymmetric_tmp, matrix_type_tmp, _ = self._import_single_data(d, matrix_type=matrix_type)
                    d_all.append(data_tmp)
                    symmetric_all.append(issymmetric_tmp)
                    matrix_type_all.append(matrix_type_tmp)
                if not all_same(symmetric_all):
                    raise ValueError('Not all matrices are of the same '
                                    'symmetric type.')
                if not all_same(matrix_type_all):
                    raise ValueError('Not all matrices are of the same matrix '
                                    'type.')
                self.data = np.array(d_all)
                self.issymmetric = symmetric_all[0]
                self.matrix_type = matrix_type_all[0]
            self.is_single_matrix = False
        else:
            self.data, self.issymmetric, self.matrix_type, self.is_single_matrix = self._import_single_data(data, matrix_type=matrix_type)
        if Y is not None:
            if isinstance(Y, six.string_types):
                if os.path.isfile(Y):
                    Y = pd.read_csv(Y, header=None, index_col=None)
            if isinstance(Y, pd.DataFrame):
                if self.data.shape[0] != len(Y):
                    raise ValueError("Y does not match the correct size of "
                                     "data")
                self.Y = Y
            else:
                raise ValueError("Make sure Y is a pandas data frame.")
        else:
            self.Y = pd.DataFrame()
        if labels is not None:
            if not isinstance(labels, (list, np.ndarray)):
                raise ValueError( "Make sure labels is a list or numpy array.")
            if self.is_single_matrix:
                if len(labels) != self.square_shape()[0]:
                    raise ValueError('Make sure the length of labels matches the shape of data.')
                self.labels = deepcopy(labels)
            else:
                if len(labels) != len(self):
                    if len(labels) != self.square_shape()[0]:
                        raise ValueError('Make sure length of labels either '
                                         'matches the number of Adjacency '
                                         'matrices or the size of a single '
                                         'matrix.')
                    else:
                        self.labels = list(labels) * len(self)
                else:
                    if np.all(np.array([len(x) for x in labels]) !=self.square_shape()[0]):
                        raise ValueError("All lists of labels must be same length as shape of data.")
                    self.labels = deepcopy(labels)
        else:
            self.labels = None
    def __repr__(self):
        return ("%s.%s(shape=%s, square_shape=%s, Y=%s, is_symmetric=%s,"
                "matrix_type=%s)") % (
                    self.__class__.__module__,
                    self.__class__.__name__,
                    self.shape(),
                    self.square_shape(),
                    len(self.Y),
                    self.issymmetric,
                    self.matrix_type)
    def __getitem__(self,index):
        new = self.copy()
        if isinstance(index, int):
            new.data = np.array(self.data[index, :]).flatten()
            new.is_single_matrix = True
        else:
            new.data = np.array(self.data[index, :])
        if not self.Y.empty:
            new.Y = self.Y.iloc[index]
        return new
    def __len__(self):
        if self.is_single_matrix:
            return 1
        else:
            return self.data.shape[0]
    def __iter__(self):
        for x in range(len(self)):
            yield self[x]
    def __add__(self, y):
        new = deepcopy(self)
        if isinstance(y, (int, float)):
            new.data = new.data + y
        if isinstance(y, Adjacency):
            if self.shape() != y.shape():
                raise ValueError('Both Adjacency() instances need to be the '
                                 'same shape.')
            new.data = new.data + y.data
        return new
    def __sub__(self, y):
        new = deepcopy(self)
        if isinstance(y, (int, float)):
            new.data = new.data - y
        if isinstance(y, Adjacency):
            if self.shape() != y.shape():
                raise ValueError('Both Adjacency() instances need to be the '
                                 'same shape.')
            new.data = new.data - y.data
        return new
    def __mul__(self, y):
        new = deepcopy(self)
        if isinstance(y, (int, float)):
            new.data = new.data * y
        if isinstance(y, Adjacency):
            if self.shape() != y.shape():
                raise ValueError('Both Adjacency() instances need to be the '
                                 'same shape.')
            new.data = np.multiply(new.data, y.data)
        return new
    def _import_single_data(self, data, matrix_type=None):
        ''' Helper function to import single data matrix.'''
        if isinstance(data, six.string_types):
            if os.path.isfile(data):
                data = pd.read_csv(data)
            else:
                raise ValueError('Make sure you have specified a valid file '
                                 'path.')
        def test_is_single_matrix(data):
            if len(data.shape) == 1:
                return True
            else:
                return False
        if matrix_type is not None:
            if matrix_type.lower() == 'distance_flat':
                matrix_type = 'distance'
                data = np.array(data)
                issymmetric = True
                is_single_matrix = test_is_single_matrix(data)
            elif matrix_type.lower() == 'similarity_flat':
                matrix_type = 'similarity'
                data = np.array(data)
                issymmetric = True
                is_single_matrix = test_is_single_matrix(data)
            elif matrix_type.lower() == 'directed_flat':
                matrix_type = 'directed'
                data = np.array(data).flatten()
                issymmetric = False
                is_single_matrix = test_is_single_matrix(data)
            elif matrix_type.lower() in ['distance', 'similarity', 'directed']:
                if data.shape[0] != data.shape[1]:
                    raise ValueError('Data matrix must be square')
                data = np.array(data)
                matrix_type = matrix_type.lower()
                if matrix_type in ['distance', 'similarity']:
                    issymmetric = True
                    data = data[np.triu_indices(data.shape[0], k=1)]
                else:
                    issymmetric = False
                    if isinstance(data, pd.DataFrame):
                        data = data.values.flatten()
                    elif isinstance(data, np.ndarray):
                        data = data.flatten()
                is_single_matrix = True
        else:
            if len(data.shape) == 1:  # Single Vector
                try:
                    data = squareform(data)
                except ValueError:
                    print('Data is not flattened upper triangle from '
                          'similarity/distance matrix or flattened directed '
                          'matrix.')
                is_single_matrix = True
            elif data.shape[0] == data.shape[1]:  # Square Matrix
                is_single_matrix = True
            else:  # Rectangular Matrix
                data_all = deepcopy(data)
                try:
                    data = squareform(data_all[0, :])
                except ValueError:
                    print('Data is not flattened upper triangle from multiple '
                          'similarity/distance matrices or flattened directed '
                          'matrices.')
                is_single_matrix = False
            # Test if matrix is symmetrical
            if np.all(data[np.triu_indices(data.shape[0], k=1)] == data.T[np.triu_indices(data.shape[0], k=1)]):
                issymmetric = True
            else:
                issymmetric = False
            # Determine matrix type
            if issymmetric:
                if np.sum(np.diag(data)) == 0:
                    matrix_type = 'distance'
                elif np.sum(np.diag(data)) == data.shape[0]:
                    matrix_type = 'similarity'
                data = data[np.triu_indices(data.shape[0], k=1)]
            else:
                matrix_type = 'directed'
                data = data.flatten()
            if not is_single_matrix:
                data = data_all
        return (data, issymmetric, matrix_type, is_single_matrix)
    def isempty(self):
        '''Check if Adjacency object is empty'''
        return bool(self.matrix_type is 'empty')
    def squareform(self):
        '''Convert adjacency back to squareform'''
        if self.issymmetric:
            if self.is_single_matrix:
                return squareform(self.data)
            else:
                return [squareform(x.data) for x in self]
        else:
            if self.is_single_matrix:
                return self.data.reshape(int(np.sqrt(self.data.shape[0])),
                                         int(np.sqrt(self.data.shape[0])))
            else:
                return [x.data.reshape(int(np.sqrt(x.data.shape[0])),
                            int(np.sqrt(x.data.shape[0]))) for x in self]
    def plot(self, limit=3, *args, **kwargs):
        ''' Create Heatmap of Adjacency Matrix'''
        if self.is_single_matrix:
            f, a = plt.subplots(nrows=1, figsize=(7, 5))
            if self.labels is None:
                sns.heatmap(self.squareform(), square=True, ax=a,
                                   *args, **kwargs)
            else:
                sns.heatmap(self.squareform(), square=True, ax=a,
                                   xticklabels=self.labels,
                                   yticklabels=self.labels,
                                   *args, **kwargs)
        else:
            n_subs = np.minimum(len(self), limit)
            f, a = plt.subplots(nrows=n_subs, figsize=(7, len(self)*5))
            if self.labels is None:
                for i in range(n_subs):
                    sns.heatmap(self[i].squareform(), square=True, ax=a[i],
                                *args, **kwargs)
            else:
                for i in range(n_subs):
                    sns.heatmap(self[i].squareform(), square=True,
                                xticklabels=self.labels[i],
                                yticklabels=self.labels[i],
                                ax=a[i], *args, **kwargs)
        return f
    def mean(self, axis=0):
        ''' Calculate mean of Adjacency
        Args:
            axis:  calculate mean over features (0) or data (1).
                    For data it will be on upper triangle.
        Returns:
            mean:  float if single, adjacency if axis=0, np.array if axis=1
                    and multiple
        '''
        if self.is_single_matrix:
            return np.mean(self.data)
        else:
            if axis == 0:
                return Adjacency(data=np.mean(self.data, axis=axis),
                                 matrix_type=self.matrix_type + '_flat')
            elif axis == 1:
                return np.mean(self.data, axis=axis)
    def std(self, axis=0):
        ''' Calculate standard deviation of Adjacency
        Args:
            axis:  calculate std over features (0) or data (1).
                    For data it will be on upper triangle.
        Returns:
            std:  float if single, adjacency if axis=0, np.array if axis=1 and
                    multiple
        '''
        if self.is_single_matrix:
            return np.std(self.data)
        else:
            if axis == 0:
                return Adjacency(data=np.std(self.data, axis=axis),
                                 matrix_type=self.matrix_type + '_flat')
            elif axis == 1:
                return np.std(self.data, axis=axis)
    def shape(self):
        ''' Calculate shape of data. '''
        return self.data.shape
    def square_shape(self):
        ''' Calculate shape of squareform data. '''
        if self.matrix_type is 'empty':
            return np.array([])
        else:
            if self.is_single_matrix:
                return self.squareform().shape
            else:
                return self[0].squareform().shape
    def copy(self):
        ''' Create a copy of Adjacency object.'''
        return deepcopy(self)
    def append(self, data):
        ''' Append data to Adjacency instance
        Args:
            data:  Adjacency instance to append
        Returns:
            out: new appended Adjacency instance
        '''
        if not isinstance(data, Adjacency):
            raise ValueError('Make sure data is a Adjacency instance.')
        if self.isempty():
            out = data.copy()
        else:
            out = self.copy()
            if self.square_shape() != data.square_shape():
                raise ValueError('Data is not the same shape as Adjacency '
                                 'instance.')
            out.data = np.vstack([self.data, data.data])
            out.is_single_matrix = False
            if out.Y.size:
                out.Y = self.Y.append(data.Y)
        return out
    def write(self, file_name, method='long'):
        ''' Write out Adjacency object to csv file.
            Args:
                file_name (str):  name of file name to write
                method (str):     method to write out data ['long','square']
        '''
        if method not in ['long', 'square']:
            raise ValueError('Make sure method is ["long","square"].')
        if self.is_single_matrix:
            if method is 'long':
                out = pd.DataFrame(self.data).to_csv(file_name, index=None)
            elif method is 'square':
                out = pd.DataFrame(self.squareform()).to_csv(file_name,
                                                             index=None)
        else:
            if method is 'long':
                out = pd.DataFrame(self.data).to_csv(file_name, index=None)
            elif method is 'square':
                raise NotImplementedError('Need to decide how we should write '
                                          'out multiple matrices.  As separate '
                                          'files?')
    def similarity(self, data, plot=False, perm_type='2d', n_permute=5000, metric='spearman', **kwargs):
        ''' Calculate similarity between two Adjacency matrices.
        Default is to use spearman correlation and permutation test.
        Args:
            data: Adjacency data, or 1-d array same size as self.data
            perm_type: '1d','2d', or None
            metric: 'spearman','pearson','kendall'
        '''
        if not isinstance(data, Adjacency):
            data2 = Adjacency(data)
        else:
            data2 = data.copy()
        if perm_type is None:
            n_permute=0
            similarity_func = correlation_permutation
        elif perm_type == '1d':
            similarity_func = correlation_permutation
        elif perm_type == '2d':
            similarity_func = matrix_permutation
        if self.is_single_matrix:
            if plot:
                plot_stacked_adjacency(self, data)
            return similarity_func(self.data, data2.data, metric=metric, n_permute=n_permute, **kwargs)
        else:
            if plot:
                _, a = plt.subplots(len(self))
                for i in a:
                    plot_stacked_adjacency(self, data, ax=i)
            return [similarity_func(x.data, data2.data, metric=metric, n_permute=n_permute, **kwargs) for x in self]
    def distance(self, method='correlation', **kwargs):
        ''' Calculate distance between images within an Adjacency() instance.
        Args:
            method: type of distance metric (can use any scikit learn or
                    sciypy metric)
        Returns:
            dist: Outputs a 2D distance matrix.
        '''
        return Adjacency(pairwise_distances(self.data, metric=method, **kwargs),
                         matrix_type='distance')
    def threshold(self, upper=None, lower=None, binarize=False):
        '''Threshold Adjacency instance. Provide upper and lower values or
           percentages to perform two-sided thresholding. Binarize will return
           a mask image respecting thresholds if provided, otherwise respecting
           every non-zero value.
        Args:
            upper: (float or str) Upper cutoff for thresholding. If string
                    will interpret as percentile; can be None for one-sided
                    thresholding.
            lower: (float or str) Lower cutoff for thresholding. If string
                    will interpret as percentile; can be None for one-sided
                    thresholding.
            binarize (bool): return binarized image respecting thresholds if
                    provided, otherwise binarize on every non-zero value;
                    default False
        Returns:
            Adjacency: thresholded Adjacency instance
        '''
        b = self.copy()
        if isinstance(upper, six.string_types):
            if upper[-1] is '%':
                upper = np.percentile(b.data, float(upper[:-1]))
        if isinstance(lower, six.string_types):
            if lower[-1] is '%':
                lower = np.percentile(b.data, float(lower[:-1]))
        if upper and lower:
            b.data[(b.data < upper) & (b.data > lower)] = 0
        elif upper and not lower:
            b.data[b.data < upper] = 0
        elif lower and not upper:
            b.data[b.data > lower] = 0
        if binarize:
            b.data[b.data != 0] = 1
        return b
    def to_graph(self):
        ''' Convert Adjacency into networkx graph.  only works on
            single_matrix for now.'''
        if self.is_single_matrix:
            if self.matrix_type == 'directed':
                G = nx.DiGraph(self.squareform())
            else:
                G = nx.Graph(self.squareform())
            if self.labels is not None:
                labels = {x:y for x,y in zip(G.nodes,self.labels)}
                nx.relabel_nodes(G, labels, copy=False)
            return G
        else:
            raise NotImplementedError('This function currently only works on '
                                      'single matrices.')
    def ttest(self, permutation=False, **kwargs):
        ''' Calculate ttest across samples.
        Args:
            permutation: (bool) Run ttest as permutation. Note this can be very slow.
        Returns:
            out: (dict) contains Adjacency instances of t values (or mean if
                 running permutation) and Adjacency instance of p values.
        '''
        if self.is_single_matrix:
            raise ValueError('t-test cannot be run on single matrices.')
        if permutation:
            t = []; p = []
            for i in range(self.data.shape[1]):
                stats = one_sample_permutation(self.data[:, i], **kwargs)
                t.append(stats['mean'])
                p.append(stats['p'])
            t = Adjacency(np.array(t))
            p = Adjacency(np.array(p))
        else:
            t = self.mean().copy()
            p = deepcopy(t)
            t.data, p.data = ttest_1samp(self.data, 0, 0)
        return {'t': t, 'p':p}
    def plot_label_distance(self, labels=None, ax=None):
        ''' Create a violin plot indicating within and between label distance
            Args:
                labels (np.array):  numpy array of labels to plot
            Returns:
                violin plot handles
        '''
        if not self.is_single_matrix:
            raise ValueError('This function only works on single adjacency '
                             'matrices.')
        distance = pd.DataFrame(self.squareform())
        if labels is None:
            labels = np.array(deepcopy(self.labels))
        else:
            if len(labels) != distance.shape[0]:
                raise ValueError('Labels must be same length as distance matrix')
        out = pd.DataFrame(columns=['Distance', 'Group', 'Type'], index=None)
        for i in np.unique(labels):
            tmp_w = pd.DataFrame(columns=out.columns, index=None)
            tmp_w['Distance'] = distance.loc[labels == i, labels == i].values[np.triu_indices(sum(labels == i), k=1)]
            tmp_w['Type'] = 'Within'
            tmp_w['Group'] = i
            tmp_b = pd.DataFrame(columns=out.columns, index=None)
            tmp_b['Distance'] = distance.loc[labels != i, labels != i].values[np.triu_indices(sum(labels == i), k=1)]
            tmp_b['Type'] = 'Between'
            tmp_b['Group'] = i
            out = out.append(tmp_w).append(tmp_b)
        f = sns.violinplot(x="Group", y="Distance", hue="Type", data=out, split=True, inner='quartile',
              palette={"Within": "lightskyblue", "Between": "red"}, ax=ax)
        f.set_ylabel('Average Distance')
        f.set_title('Average Group Distance')
        return f
    def stats_label_distance(self, labels=None, n_permute=5000, n_jobs=-1):
        ''' Calculate permutation tests on within and between label distance.
            Args:
                labels (np.array):  numpy array of labels to plot
                n_permute (int): number of permutations to run (default=5000)
            Returns:
                dict:  dictionary of within and between group differences
                        and p-values
        '''
        if not self.is_single_matrix:
            raise ValueError('This function only works on single adjacency '
                             'matrices.')
        distance = pd.DataFrame(self.squareform())
        if labels is not None:
            labels = deepcopy(self.labels)
        else:
            if len(labels) != distance.shape[0]:
                raise ValueError('Labels must be same length as distance matrix')
        out = pd.DataFrame(columns=['Distance', 'Group', 'Type'], index=None)
        for i in np.unique(labels):
            tmp_w = pd.DataFrame(columns=out.columns, index=None)
            tmp_w['Distance'] = distance.loc[labels == i, labels == i].values[np.triu_indices(sum(labels == i), k=1)]
            tmp_w['Type'] = 'Within'
            tmp_w['Group'] = i
            tmp_b = pd.DataFrame(columns=out.columns, index=None)
            tmp_b['Distance'] = distance.loc[labels == i, labels != i].values.flatten()
            tmp_b['Type'] = 'Between'
            tmp_b['Group'] = i
            out = out.append(tmp_w).append(tmp_b)
        stats = dict()
        for i in np.unique(labels):
            # Within group test
            tmp1 = out.loc[(out['Group'] == i) & (out['Type'] == 'Within'), 'Distance']
            tmp2 = out.loc[(out['Group'] == i) & (out['Type'] == 'Between'), 'Distance']
            stats[str(i)] = two_sample_permutation(tmp1, tmp2,
                                        n_permute=n_permute, n_jobs=n_jobs)
        return stats
    def plot_silhouette(self, labels=None, ax=None, permutation_test=True,
                        n_permute=5000, **kwargs):
        '''Create a silhouette plot'''
        distance = pd.DataFrame(self.squareform())
        if labels is None:
            labels = np.array(deepcopy(self.labels))
        else:
            if len(labels) != distance.shape[0]:
                raise ValueError('Labels must be same length as distance matrix')
        (f, outAll) = plot_silhouette(distance, labels, ax=None,
                                      permutation_test=True,
                                      n_permute=5000, **kwargs)
        return (f,outAll)
    def bootstrap(self, function, n_samples=5000, save_weights=False,
                    n_jobs=-1, random_state=None, *args, **kwargs):
        '''Bootstrap an Adjacency method.
            Example Useage:
            b = dat.bootstrap('mean', n_samples=5000)
            b = dat.bootstrap('predict', n_samples=5000, algorithm='ridge')
            b = dat.bootstrap('predict', n_samples=5000, save_weights=True)
        Args:
            function: (str) method to apply to data for each bootstrap
            n_samples: (int) number of samples to bootstrap with replacement
            save_weights: (bool) Save each bootstrap iteration
                        (useful for aggregating many bootstraps on a cluster)
            n_jobs: (int) The number of CPUs to use to do the computation.
                        -1 means all CPUs.Returns:
        output: summarized studentized bootstrap output
        '''
        random_state = check_random_state(random_state)
        seeds = random_state.randint(MAX_INT, size=n_samples)
        bootstrapped = Parallel(n_jobs=n_jobs)(
                        delayed(_bootstrap_apply_func)(self,
                        function, random_state=seeds[i], *args, **kwargs)
                        for i in range(n_samples))
        bootstrapped = Adjacency(bootstrapped)
        return summarize_bootstrap(bootstrapped, save_weights=save_weights)
    def plot_mds(self, n_components=2, metric=True, labels_color=None,
                 cmap=plt.cm.hot_r, n_jobs=-1, view=(30, 20),
                 figsize = [12,8], ax = None, *args, **kwargs):
        ''' Plot Multidimensional Scaling
            Args:
                n_components: (int) Number of dimensions to project (can be 2 or 3)
                metric: (bool) Perform metric or non-metric dimensional scaling; default
                labels_color: (str) list of colors for labels, if len(1) then make all same color
                n_jobs: (int) Number of parallel jobs
                view: (tuple) view for 3-Dimensional plot; default (30,20)
            Returns:
                fig: returns matplotlib figure
        '''
        if self.matrix_type != 'distance':
            raise ValueError("MDS only works on distance matrices.")
        if not self.is_single_matrix:
            raise ValueError("MDS only works on single matrices.")
        if n_components not in [2,3]:
            raise ValueError('Cannot plot {0}-d image'.format(n_components))
        if labels_color is not None:
            if self.labels is None:
                raise ValueError("Make sure that Adjacency object has labels specified.")
            if len(self.labels) != len(labels_color):
                raise ValueError("Length of labels_color must match self.labels.")
        # Run MDS
        mds = MDS(n_components=n_components, metric=metric, n_jobs=n_jobs,
                           dissimilarity="precomputed", *args, **kwargs)
        proj = mds.fit_transform(self.squareform())
        # Create Plot
        if ax == None: # Create axis
            returnFig = True
            fig = plt.figure(figsize=figsize)
            if n_components == 3:
                ax = fig.add_subplot(111, projection='3d')
                ax.view_init(*view)
            elif n_components == 2:
                ax = fig.add_subplot(111)
        # Plot dots
        if n_components == 3:
            ax.scatter(proj[:, 0], proj[:, 1], proj[:, 2], s=1, c='k')
        elif n_components == 2:
            ax.scatter(proj[:, 0], proj[:, 1], s=1, c='k')
        # Plot labels
        if labels_color is None:
            labels_color = ['black'] * len(self.labels)
        if n_components == 3:
            for ((x, y, z), label, color) in zip(proj, self.labels, labels_color):
                 ax.text(x, y, z, label, color='white', #color,
                         bbox=dict(facecolor=color, alpha=1, boxstyle="round,pad=0.3"))
        else:
            for ((x, y), label, color) in zip(proj, self.labels, labels_color):
                ax.text(x, y, label, color='white', #color,
                        bbox=dict(facecolor=color, alpha=1, boxstyle="round,pad=0.3"))
        ax.xaxis.set_visible(False)
        ax.yaxis.set_visible(False)
        if returnFig:
          return fig
    def distance_to_similarity(self, beta=1):
        '''Convert distance matrix to similarity matrix
        Args:
            beta: parameter to scale exponential function (default: 1)
        Returns:
            Adjacency object
        '''
        if self.matrix_type == 'distance':
            return Adjacency(np.exp(-beta*self.squareform()/self.squareform().std()),
                             labels=self.labels, matrix_type='similarity')
        else:
            raise ValueError('Matrix is not a distance matrix.')
    def similarity_to_distance(self):
        '''Convert similarity matrix to distance matrix'''
        if self.matrix_type == 'similarity':
            return Adjacency(1-self.squareform(),
                             labels=self.labels, matrix_type='distance')
        else:
            raise ValueError('Matrix is not a similarity matrix.')
    def within_cluster_mean(self, clusters = None):
        ''' This function calculates mean within cluster labels
        Args:
            clusters: list of cluster labels
        Returns:
            dict: within cluster means
        '''
        distance=pd.DataFrame(self.squareform())
        clusters = np.array(clusters)
        if len(clusters) != distance.shape[0]:
            raise ValueError('Cluster labels must be same length as distance matrix')
        out = pd.DataFrame(columns=['Mean','Label'],index=None)
        out = {}
        for i in list(set(clusters)):
            out[i] = np.mean(distance.loc[clusters==i,clusters==i].values[np.triu_indices(sum(clusters==i),k=1)])
        return out
    def regress(self, X, mode='ols', **kwargs):
        ''' Run a regression on an adjacency instance.
            You can decompose an adjacency instance with another adjacency instance.
            You can also decompose each pixel by passing a design_matrix instance.
            Args:
                X: Design matrix can be an Adjacency or Design_Matrix instance
                method: type of regression (default: ols)
            Returns:
        '''
        stats = {}
        if isinstance(X, Adjacency):
            if X.square_shape()[0] != self.square_shape()[0]:
                raise ValueError('Adjacency instances must be the same size.')
            b,t,p,_,res = regression(X.data.T, self.data, mode=mode, **kwargs)
            stats['beta'],stats['t'],stats['p'],stats['residual'] = (b,t,p,res)
        elif isinstance(X, Design_Matrix):
            if X.shape[0] != len(self):
                raise ValueError('Design matrix must have same number of observations as Adjacency')
            b,t,p,df,res = regression(X, self.data, mode=mode, **kwargs)
            mode = 'ols'
            stats['beta'], stats['t'], stats['p'] = [x for x in self[:3]]
            stats['beta'].data, stats['t'].data, stats['p'].data = b.squeeze(), t.squeeze(), p.squeeze()
            stats['residual'] = self.copy()
            stats['residual'].data = res
        else:
            raise ValueError('X must be a Design_Matrix or Adjacency Instance.')
        return stats
 | 
	mit | 
| 
	tmhm/scikit-learn | 
	examples/svm/plot_weighted_samples.py | 
	188 | 
	1943 | 
	"""
=====================
SVM: Weighted samples
=====================
Plot decision function of a weighted dataset, where the size of points
is proportional to its weight.
The sample weighting rescales the C parameter, which means that the classifier
puts more emphasis on getting these points right. The effect might often be
subtle.
To emphasize the effect here, we particularly weight outliers, making the
deformation of the decision boundary very visible.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm
def plot_decision_function(classifier, sample_weight, axis, title):
    # plot the decision function
    xx, yy = np.meshgrid(np.linspace(-4, 5, 500), np.linspace(-4, 5, 500))
    Z = classifier.decision_function(np.c_[xx.ravel(), yy.ravel()])
    Z = Z.reshape(xx.shape)
    # plot the line, the points, and the nearest vectors to the plane
    axis.contourf(xx, yy, Z, alpha=0.75, cmap=plt.cm.bone)
    axis.scatter(X[:, 0], X[:, 1], c=Y, s=100 * sample_weight, alpha=0.9,
                 cmap=plt.cm.bone)
    axis.axis('off')
    axis.set_title(title)
# we create 20 points
np.random.seed(0)
X = np.r_[np.random.randn(10, 2) + [1, 1], np.random.randn(10, 2)]
Y = [1] * 10 + [-1] * 10
sample_weight_last_ten = abs(np.random.randn(len(X)))
sample_weight_constant = np.ones(len(X))
# and bigger weights to some outliers
sample_weight_last_ten[15:] *= 5
sample_weight_last_ten[9] *= 15
# for reference, first fit without class weights
# fit the model
clf_weights = svm.SVC()
clf_weights.fit(X, Y, sample_weight=sample_weight_last_ten)
clf_no_weights = svm.SVC()
clf_no_weights.fit(X, Y)
fig, axes = plt.subplots(1, 2, figsize=(14, 6))
plot_decision_function(clf_no_weights, sample_weight_constant, axes[0],
                       "Constant weights")
plot_decision_function(clf_weights, sample_weight_last_ten, axes[1],
                       "Modified weights")
plt.show()
 | 
	bsd-3-clause | 
| 
	siutanwong/scikit-learn | 
	examples/cluster/plot_mini_batch_kmeans.py | 
	265 | 
	4081 | 
	"""
====================================================================
Comparison of the K-Means and MiniBatchKMeans clustering algorithms
====================================================================
We want to compare the performance of the MiniBatchKMeans and KMeans:
the MiniBatchKMeans is faster, but gives slightly different results (see
:ref:`mini_batch_kmeans`).
We will cluster a set of data, first with KMeans and then with
MiniBatchKMeans, and plot the results.
We will also plot the points that are labelled differently between the two
algorithms.
"""
print(__doc__)
import time
import numpy as np
import matplotlib.pyplot as plt
from sklearn.cluster import MiniBatchKMeans, KMeans
from sklearn.metrics.pairwise import pairwise_distances_argmin
from sklearn.datasets.samples_generator import make_blobs
##############################################################################
# Generate sample data
np.random.seed(0)
batch_size = 45
centers = [[1, 1], [-1, -1], [1, -1]]
n_clusters = len(centers)
X, labels_true = make_blobs(n_samples=3000, centers=centers, cluster_std=0.7)
##############################################################################
# Compute clustering with Means
k_means = KMeans(init='k-means++', n_clusters=3, n_init=10)
t0 = time.time()
k_means.fit(X)
t_batch = time.time() - t0
k_means_labels = k_means.labels_
k_means_cluster_centers = k_means.cluster_centers_
k_means_labels_unique = np.unique(k_means_labels)
##############################################################################
# Compute clustering with MiniBatchKMeans
mbk = MiniBatchKMeans(init='k-means++', n_clusters=3, batch_size=batch_size,
                      n_init=10, max_no_improvement=10, verbose=0)
t0 = time.time()
mbk.fit(X)
t_mini_batch = time.time() - t0
mbk_means_labels = mbk.labels_
mbk_means_cluster_centers = mbk.cluster_centers_
mbk_means_labels_unique = np.unique(mbk_means_labels)
##############################################################################
# Plot result
fig = plt.figure(figsize=(8, 3))
fig.subplots_adjust(left=0.02, right=0.98, bottom=0.05, top=0.9)
colors = ['#4EACC5', '#FF9C34', '#4E9A06']
# We want to have the same colors for the same cluster from the
# MiniBatchKMeans and the KMeans algorithm. Let's pair the cluster centers per
# closest one.
order = pairwise_distances_argmin(k_means_cluster_centers,
                                  mbk_means_cluster_centers)
# KMeans
ax = fig.add_subplot(1, 3, 1)
for k, col in zip(range(n_clusters), colors):
    my_members = k_means_labels == k
    cluster_center = k_means_cluster_centers[k]
    ax.plot(X[my_members, 0], X[my_members, 1], 'w',
            markerfacecolor=col, marker='.')
    ax.plot(cluster_center[0], cluster_center[1], 'o', markerfacecolor=col,
            markeredgecolor='k', markersize=6)
ax.set_title('KMeans')
ax.set_xticks(())
ax.set_yticks(())
plt.text(-3.5, 1.8,  'train time: %.2fs\ninertia: %f' % (
    t_batch, k_means.inertia_))
# MiniBatchKMeans
ax = fig.add_subplot(1, 3, 2)
for k, col in zip(range(n_clusters), colors):
    my_members = mbk_means_labels == order[k]
    cluster_center = mbk_means_cluster_centers[order[k]]
    ax.plot(X[my_members, 0], X[my_members, 1], 'w',
            markerfacecolor=col, marker='.')
    ax.plot(cluster_center[0], cluster_center[1], 'o', markerfacecolor=col,
            markeredgecolor='k', markersize=6)
ax.set_title('MiniBatchKMeans')
ax.set_xticks(())
ax.set_yticks(())
plt.text(-3.5, 1.8, 'train time: %.2fs\ninertia: %f' %
         (t_mini_batch, mbk.inertia_))
# Initialise the different array to all False
different = (mbk_means_labels == 4)
ax = fig.add_subplot(1, 3, 3)
for l in range(n_clusters):
    different += ((k_means_labels == k) != (mbk_means_labels == order[k]))
identic = np.logical_not(different)
ax.plot(X[identic, 0], X[identic, 1], 'w',
        markerfacecolor='#bbbbbb', marker='.')
ax.plot(X[different, 0], X[different, 1], 'w',
        markerfacecolor='m', marker='.')
ax.set_title('Difference')
ax.set_xticks(())
ax.set_yticks(())
plt.show()
 | 
	bsd-3-clause | 
| 
	BiaDarkia/scikit-learn | 
	examples/tree/plot_iris.py | 
	30 | 
	2062 | 
	"""
================================================================
Plot the decision surface of a decision tree on the iris dataset
================================================================
Plot the decision surface of a decision tree trained on pairs
of features of the iris dataset.
See :ref:`decision tree <tree>` for more information on the estimator.
For each pair of iris features, the decision tree learns decision
boundaries made of combinations of simple thresholding rules inferred from
the training samples.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import load_iris
from sklearn.tree import DecisionTreeClassifier
# Parameters
n_classes = 3
plot_colors = "ryb"
plot_step = 0.02
# Load data
iris = load_iris()
for pairidx, pair in enumerate([[0, 1], [0, 2], [0, 3],
                                [1, 2], [1, 3], [2, 3]]):
    # We only take the two corresponding features
    X = iris.data[:, pair]
    y = iris.target
    # Train
    clf = DecisionTreeClassifier().fit(X, y)
    # Plot the decision boundary
    plt.subplot(2, 3, pairidx + 1)
    x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
    y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
    xx, yy = np.meshgrid(np.arange(x_min, x_max, plot_step),
                         np.arange(y_min, y_max, plot_step))
    plt.tight_layout(h_pad=0.5, w_pad=0.5, pad=2.5)
    Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
    Z = Z.reshape(xx.shape)
    cs = plt.contourf(xx, yy, Z, cmap=plt.cm.RdYlBu)
    plt.xlabel(iris.feature_names[pair[0]])
    plt.ylabel(iris.feature_names[pair[1]])
    # Plot the training points
    for i, color in zip(range(n_classes), plot_colors):
        idx = np.where(y == i)
        plt.scatter(X[idx, 0], X[idx, 1], c=color, label=iris.target_names[i],
                    cmap=plt.cm.RdYlBu, edgecolor='black', s=15)
plt.suptitle("Decision surface of a decision tree using paired features")
plt.legend(loc='lower right', borderpad=0, handletextpad=0)
plt.axis("tight")
plt.show()
 | 
	bsd-3-clause | 
| 
	erh3cq/hyperspy | 
	hyperspy/_signals/signal1d.py | 
	2 | 
	61717 | 
	# -*- coding: utf-8 -*-
# Copyright 2007-2020 The HyperSpy developers
#
# This file is part of  HyperSpy.
#
#  HyperSpy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
#  HyperSpy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with  HyperSpy.  If not, see <http://www.gnu.org/licenses/>.
import os
import logging
import math
import matplotlib.pyplot as plt
import numpy as np
import dask.array as da
import scipy.interpolate
import scipy as sp
from scipy.signal import savgol_filter
from scipy.ndimage.filters import gaussian_filter1d
from hyperspy.signal import BaseSignal
from hyperspy._signals.common_signal1d import CommonSignal1D
from hyperspy.signal_tools import SpikesRemoval, SpikesRemovalInteractive
from hyperspy.models.model1d import Model1D
from hyperspy.misc.lowess_smooth import lowess
from hyperspy.defaults_parser import preferences
from hyperspy.signal_tools import (
    Signal1DCalibration,
    SmoothingSavitzkyGolay,
    SmoothingLowess,
    SmoothingTV,
    ButterworthFilter)
from hyperspy.ui_registry import DISPLAY_DT, TOOLKIT_DT
from hyperspy.misc.tv_denoise import _tv_denoise_1d
from hyperspy.signal_tools import BackgroundRemoval
from hyperspy.decorators import interactive_range_selector
from hyperspy.signal_tools import IntegrateArea, _get_background_estimator
from hyperspy._signals.lazy import LazySignal
from hyperspy.docstrings.signal1d import CROP_PARAMETER_DOC, SPIKES_REMOVAL_TOOL_DOCSTRING
from hyperspy.docstrings.signal import (SHOW_PROGRESSBAR_ARG, PARALLEL_ARG, MAX_WORKERS_ARG,
                                        SIGNAL_MASK_ARG, NAVIGATION_MASK_ARG)
from hyperspy.docstrings.plot import (
    BASE_PLOT_DOCSTRING, BASE_PLOT_DOCSTRING_PARAMETERS, PLOT1D_DOCSTRING)
_logger = logging.getLogger(__name__)
def find_peaks_ohaver(y, x=None, slope_thresh=0., amp_thresh=None,
                      medfilt_radius=5, maxpeakn=30000, peakgroup=10,
                      subchannel=True,):
    """Find peaks along a 1D line.
    Function to locate the positive peaks in a noisy x-y data set.
    Detects peaks by looking for downward zero-crossings in the first
    derivative that exceed 'slope_thresh'.
    Returns an array containing position, height, and width of each peak.
    Sorted by position.
    'slope_thresh' and 'amp_thresh', control sensitivity: higher values
    will neglect wider peaks (slope) and smaller features (amp),
    respectively.
    Parameters
    ----------
    y : array
        1D input array, e.g. a spectrum
    x : array (optional)
        1D array describing the calibration of y (must have same shape as y)
    slope_thresh : float (optional)
                   1st derivative threshold to count the peak;
                   higher values will neglect broader features;
                   default is set to 0.
    amp_thresh : float (optional)
                 intensity threshold below which peaks are ignored;
                 higher values will neglect smaller features;
                 default is set to 10% of max(y).
    medfilt_radius : int (optional)
                     median filter window to apply to smooth the data
                     (see scipy.signal.medfilt);
                     if 0, no filter will be applied;
                     default is set to 5.
    peakgroup : int (optional)
                number of points around the "top part" of the peak that
                are taken to estimate the peak height; for spikes or
                very narrow peaks, keep PeakGroup=1 or 2; for broad or
                noisy peaks, make PeakGroup larger to reduce the effect
                of noise;
                default is set to 10.
    maxpeakn : int (optional)
              number of maximum detectable peaks;
              default is set to 30000.
    subchannel : bool (optional)
             default is set to True.
    Returns
    -------
    P : structured array of shape (npeaks)
        contains fields: 'position', 'width', and 'height' for each peak.
    Examples
    --------
    >>> x = np.arange(0,50,0.01)
    >>> y = np.cos(x)
    >>> peaks = find_peaks_ohaver(y, x, 0, 0)
    Notes
    -----
    Original code from T. C. O'Haver, 1995.
    Version 2  Last revised Oct 27, 2006 Converted to Python by
    Michael Sarahan, Feb 2011.
    Revised to handle edges better.  MCS, Mar 2011
    """
    if x is None:
        x = np.arange(len(y), dtype=np.int64)
    if not amp_thresh:
        amp_thresh = 0.1 * y.max()
    peakgroup = np.round(peakgroup)
    if medfilt_radius:
        d = np.gradient(scipy.signal.medfilt(y, medfilt_radius))
    else:
        d = np.gradient(y)
    n = np.round(peakgroup / 2 + 1)
    peak_dt = np.dtype([('position', np.float),
                        ('height', np.float),
                        ('width', np.float)])
    P = np.array([], dtype=peak_dt)
    peak = 0
    for j in range(len(y) - 4):
        if np.sign(d[j]) > np.sign(d[j + 1]):  # Detects zero-crossing
            if np.sign(d[j + 1]) == 0:
                continue
            # if slope of derivative is larger than slope_thresh
            if d[j] - d[j + 1] > slope_thresh:
                # if height of peak is larger than amp_thresh
                if y[j] > amp_thresh:
                    # the next section is very slow, and actually messes
                    # things up for images (discrete pixels),
                    # so by default, don't do subchannel precision in the
                    # 1D peakfind step.
                    if subchannel:
                        xx = np.zeros(peakgroup)
                        yy = np.zeros(peakgroup)
                        s = 0
                        for k in range(peakgroup):
                            groupindex = int(j + k - n + 1)
                            if groupindex < 1:
                                xx = xx[1:]
                                yy = yy[1:]
                                s += 1
                                continue
                            elif groupindex > y.shape[0] - 1:
                                xx = xx[:groupindex - 1]
                                yy = yy[:groupindex - 1]
                                break
                            xx[k - s] = x[groupindex]
                            yy[k - s] = y[groupindex]
                        avg = np.average(xx)
                        stdev = np.std(xx)
                        xxf = (xx - avg) / stdev
                        # Fit parabola to log10 of sub-group with
                        # centering and scaling
                        yynz = yy != 0
                        coef = np.polyfit(
                            xxf[yynz], np.log10(np.abs(yy[yynz])), 2)
                        c1 = coef[2]
                        c2 = coef[1]
                        c3 = coef[0]
                        with np.errstate(invalid='ignore'):
                            width = np.linalg.norm(stdev * 2.35703 /
                                                   (np.sqrt(2) * np.sqrt(-1 *
                                                                         c3)))
                        # if the peak is too narrow for least-squares
                        # technique to work  well, just use the max value
                        # of y in the sub-group of points near peak.
                        if peakgroup < 7:
                            height = np.max(yy)
                            position = xx[np.argmin(np.abs(yy - height))]
                        else:
                            position = - ((stdev * c2 / (2 * c3)) - avg)
                            height = np.exp(c1 - c3 * (c2 / (2 * c3)) ** 2)
                    # Fill results array P. One row for each peak
                    # detected, containing the
                    # peak position (x-value) and peak height (y-value).
                    else:
                        position = x[j]
                        height = y[j]
                        # no way to know peak width without
                        # the above measurements.
                        width = 0
                    if (not np.isnan(position) and 0 < position < x[-1]):
                        P = np.hstack((P,
                                       np.array([(position, height, width)],
                                                dtype=peak_dt)))
                        peak += 1
    # return only the part of the array that contains peaks
    # (not the whole maxpeakn x 3 array)
    if len(P) > maxpeakn:
        minh = np.sort(P['height'])[-maxpeakn]
        P = P[P['height'] >= minh]
    # Sorts the values as a function of position
    P.sort(0)
    return P
def interpolate1D(number_of_interpolation_points, data):
    ip = number_of_interpolation_points
    ch = len(data)
    old_ax = np.linspace(0, 100, ch)
    new_ax = np.linspace(0, 100, ch * ip - (ip - 1))
    interpolator = scipy.interpolate.interp1d(old_ax, data)
    return interpolator(new_ax)
def _estimate_shift1D(data, **kwargs):
    mask = kwargs.get('mask', None)
    ref = kwargs.get('ref', None)
    interpolate = kwargs.get('interpolate', True)
    ip = kwargs.get('ip', 5)
    data_slice = kwargs.get('data_slice', slice(None))
    if bool(mask):
        # asarray is required for consistensy as argmax
        # returns a numpy scalar array
        return np.asarray(np.nan)
    data = data[data_slice]
    if interpolate is True:
        data = interpolate1D(ip, data)
    return np.argmax(np.correlate(ref, data, 'full')) - len(ref) + 1
def _shift1D(data, **kwargs):
    shift = kwargs.get('shift', 0.)
    original_axis = kwargs.get('original_axis', None)
    fill_value = kwargs.get('fill_value', np.nan)
    kind = kwargs.get('kind', 'linear')
    offset = kwargs.get('offset', 0.)
    scale = kwargs.get('scale', 1.)
    size = kwargs.get('size', 2)
    if np.isnan(shift) or shift == 0:
        return data
    axis = np.linspace(offset, offset + scale * (size - 1), size)
    si = sp.interpolate.interp1d(original_axis,
                                 data,
                                 bounds_error=False,
                                 fill_value=fill_value,
                                 kind=kind)
    offset = float(offset - shift)
    axis = np.linspace(offset, offset + scale * (size - 1), size)
    return si(axis)
class Signal1D(BaseSignal, CommonSignal1D):
    """
    """
    _signal_dimension = 1
    def __init__(self, *args, **kwargs):
        super().__init__(*args, **kwargs)
        if self.axes_manager.signal_dimension != 1:
            self.axes_manager.set_signal_dimension(1)
    def _get_spikes_diagnosis_histogram_data(self, signal_mask=None,
                                             navigation_mask=None,
                                             **kwargs):
        self._check_signal_dimension_equals_one()
        dc = self.data
        if signal_mask is not None:
            dc = dc[..., ~signal_mask]
        if navigation_mask is not None:
            dc = dc[~navigation_mask, :]
        der = np.abs(np.diff(dc, 1, -1))
        n = ((~navigation_mask).sum() if navigation_mask else
             self.axes_manager.navigation_size)
        # arbitrary cutoff for number of spectra necessary before histogram
        # data is compressed by finding maxima of each spectrum
        tmp = BaseSignal(der) if n < 2000 else BaseSignal(
            np.ravel(der.max(-1)))
        # get histogram signal using smart binning and plot
        return tmp.get_histogram(**kwargs)
    def spikes_diagnosis(self, signal_mask=None,
                         navigation_mask=None,
                         **kwargs):
        """Plots a histogram to help in choosing the threshold for
        spikes removal.
        Parameters
        ----------
        %s
        %s
        **kwargs : dict
            Keyword arguments pass to
            :py:meth:`~hyperspy.signal.signal.BaseSignal.get_histogram`
        See also
        --------
        spikes_removal_tool
        """
        tmph = self._get_spikes_diagnosis_histogram_data(signal_mask,
                                                         navigation_mask,
                                                         **kwargs)
        tmph.plot()
        # Customize plot appearance
        plt.gca().set_title('')
        plt.gca().fill_between(tmph.axes_manager[0].axis,
                               tmph.data,
                               facecolor='#fddbc7',
                               interpolate=True,
                               color='none')
        ax = tmph._plot.signal_plot.ax
        axl = tmph._plot.signal_plot.ax_lines[0]
        axl.set_line_properties(color='#b2182b')
        plt.xlabel('Derivative magnitude')
        plt.ylabel('Log(Counts)')
        ax.set_yscale('log')
        ax.set_ylim(10 ** -1, plt.ylim()[1])
        ax.set_xlim(plt.xlim()[0], 1.1 * plt.xlim()[1])
        plt.draw()
    spikes_diagnosis.__doc__ %= (SIGNAL_MASK_ARG, NAVIGATION_MASK_ARG)
    def spikes_removal_tool(self, signal_mask=None, navigation_mask=None,
                            threshold='auto', interactive=True,
                            display=True, toolkit=None):
        self._check_signal_dimension_equals_one()
        if interactive:
            sr = SpikesRemovalInteractive(self,
                                          signal_mask=signal_mask,
                                          navigation_mask=navigation_mask,
                                          threshold=threshold)
            return sr.gui(display=display, toolkit=toolkit)
        else:
            SpikesRemoval(self,
                          signal_mask=signal_mask,
                          navigation_mask=navigation_mask,
                          threshold=threshold)
    spikes_removal_tool.__doc__ = SPIKES_REMOVAL_TOOL_DOCSTRING % (
        SIGNAL_MASK_ARG, NAVIGATION_MASK_ARG, "", DISPLAY_DT, TOOLKIT_DT)
    def create_model(self, dictionary=None):
        """Create a model for the current data.
        Returns
        -------
        model : `Model1D` instance.
        """
        model = Model1D(self, dictionary=dictionary)
        return model
    def shift1D(
        self,
        shift_array,
        interpolation_method='linear',
        crop=True,
        expand=False,
        fill_value=np.nan,
        parallel=None,
        show_progressbar=None,
        max_workers=None,
    ):
        """Shift the data in place over the signal axis by the amount specified
        by an array.
        Parameters
        ----------
        shift_array : numpy array
            An array containing the shifting amount. It must have
            `axes_manager._navigation_shape_in_array` shape.
        interpolation_method : str or int
            Specifies the kind of interpolation as a string ('linear',
            'nearest', 'zero', 'slinear', 'quadratic, 'cubic') or as an
            integer specifying the order of the spline interpolator to
            use.
        %s
        expand : bool
            If True, the data will be expanded to fit all data after alignment.
            Overrides `crop`.
        fill_value : float
            If crop is False fill the data outside of the original
            interval with the given value where needed.
        %s
        %s
        %s
        Raises
        ------
        SignalDimensionError
            If the signal dimension is not 1.
        """
        if not np.any(shift_array):
            # Nothing to do, the shift array if filled with zeros
            return
        if show_progressbar is None:
            show_progressbar = preferences.General.show_progressbar
        self._check_signal_dimension_equals_one()
        axis = self.axes_manager.signal_axes[0]
        # Figure out min/max shifts, and translate to shifts in index as well
        minimum, maximum = np.nanmin(shift_array), np.nanmax(shift_array)
        if minimum < 0:
            ihigh = 1 + axis.value2index(
                axis.high_value + minimum,
                rounding=math.floor)
        else:
            ihigh = axis.high_index + 1
        if maximum > 0:
            ilow = axis.value2index(axis.offset + maximum,
                                    rounding=math.ceil)
        else:
            ilow = axis.low_index
        if expand:
            if self._lazy:
                ind = axis.index_in_array
                pre_shape = list(self.data.shape)
                post_shape = list(self.data.shape)
                pre_chunks = list(self.data.chunks)
                post_chunks = list(self.data.chunks)
                pre_shape[ind] = axis.high_index - ihigh + 1
                post_shape[ind] = ilow - axis.low_index
                for chunks, shape in zip((pre_chunks, post_chunks),
                                         (pre_shape, post_shape)):
                    maxsize = min(np.max(chunks[ind]), shape[ind])
                    num = np.ceil(shape[ind] / maxsize)
                    chunks[ind] = tuple(len(ar) for ar in
                                        np.array_split(np.arange(shape[ind]),
                                                       num))
                pre_array = da.full(tuple(pre_shape),
                                    fill_value,
                                    chunks=tuple(pre_chunks))
                post_array = da.full(tuple(post_shape),
                                     fill_value,
                                     chunks=tuple(post_chunks))
                self.data = da.concatenate((pre_array, self.data, post_array),
                                           axis=ind)
            else:
                padding = []
                for i in range(self.data.ndim):
                    if i == axis.index_in_array:
                        padding.append((axis.high_index - ihigh + 1,
                                        ilow - axis.low_index))
                    else:
                        padding.append((0, 0))
                self.data = np.pad(self.data, padding, mode='constant',
                                   constant_values=(fill_value,))
            axis.offset += minimum
            axis.size += axis.high_index - ihigh + 1 + ilow - axis.low_index
        self._map_iterate(_shift1D, (('shift', shift_array.ravel()),),
                          original_axis=axis.axis,
                          fill_value=fill_value,
                          kind=interpolation_method,
                          offset=axis.offset,
                          scale=axis.scale,
                          size=axis.size,
                          show_progressbar=show_progressbar,
                          parallel=parallel,
                          max_workers=max_workers,
                          ragged=False)
        if crop and not expand:
            _logger.debug("Cropping %s from index %i to %i"
                          % (self, ilow, ihigh))
            self.crop(axis.index_in_axes_manager,
                      ilow,
                      ihigh)
        self.events.data_changed.trigger(obj=self)
    shift1D.__doc__ %= (CROP_PARAMETER_DOC, SHOW_PROGRESSBAR_ARG, PARALLEL_ARG, MAX_WORKERS_ARG)
    def interpolate_in_between(
        self,
        start,
        end,
        delta=3,
        show_progressbar=None,
        parallel=None,
        max_workers=None,
        **kwargs,
    ):
        """Replace the data in a given range by interpolation.
        The operation is performed in place.
        Parameters
        ----------
        start, end : int or float
            The limits of the interval. If int they are taken as the
            axis index. If float they are taken as the axis value.
        delta : int or float
            The windows around the (start, end) to use for interpolation
        %s
        %s
        %s
        **kwargs :
            All extra keyword arguments are passed to
            :py:func:`scipy.interpolate.interp1d`. See the function documentation
            for details.
        Raises
        ------
        SignalDimensionError
            If the signal dimension is not 1.
        """
        if show_progressbar is None:
            show_progressbar = preferences.General.show_progressbar
        self._check_signal_dimension_equals_one()
        axis = self.axes_manager.signal_axes[0]
        i1 = axis._get_index(start)
        i2 = axis._get_index(end)
        if isinstance(delta, float):
            delta = int(delta / axis.scale)
        i0 = int(np.clip(i1 - delta, 0, np.inf))
        i3 = int(np.clip(i2 + delta, 0, axis.size))
        def interpolating_function(dat):
            dat_int = sp.interpolate.interp1d(
                list(range(i0, i1)) + list(range(i2, i3)),
                dat[i0:i1].tolist() + dat[i2:i3].tolist(),
                **kwargs)
            dat[i1:i2] = dat_int(list(range(i1, i2)))
            return dat
        self._map_iterate(interpolating_function,
                          ragged=False,
                          parallel=parallel,
                          show_progressbar=show_progressbar,
                          max_workers=max_workers)
        self.events.data_changed.trigger(obj=self)
    interpolate_in_between.__doc__ %= (SHOW_PROGRESSBAR_ARG, PARALLEL_ARG, MAX_WORKERS_ARG)
    def estimate_shift1D(
        self,
        start=None,
        end=None,
        reference_indices=None,
        max_shift=None,
        interpolate=True,
        number_of_interpolation_points=5,
        mask=None,
        show_progressbar=None,
        parallel=None,
        max_workers=None,
    ):
        """Estimate the shifts in the current signal axis using
        cross-correlation.
        This method can only estimate the shift by comparing
        unidimensional features that should not change the position in
        the signal axis. To decrease the memory usage, the time of
        computation and the accuracy of the results it is convenient to
        select the feature of interest providing sensible values for
        `start` and `end`. By default interpolation is used to obtain
        subpixel precision.
        Parameters
        ----------
        start, end : int, float or None
            The limits of the interval. If int they are taken as the
            axis index. If float they are taken as the axis value.
        reference_indices : tuple of ints or None
            Defines the coordinates of the spectrum that will be used
            as eference. If None the spectrum at the current
            coordinates is used for this purpose.
        max_shift : int
            "Saturation limit" for the shift.
        interpolate : bool
            If True, interpolation is used to provide sub-pixel
            accuracy.
        number_of_interpolation_points : int
            Number of interpolation points. Warning: making this number
            too big can saturate the memory
        mask : `BaseSignal` of bool.
            It must have signal_dimension = 0 and navigation_shape equal to the
            current signal. Where mask is True the shift is not computed
            and set to nan.
        %s
        %s
        %s
        Returns
        -------
        An array with the result of the estimation in the axis units.
        Although the computation is performed in batches if the signal is
        lazy, the result is computed in memory because it depends on the
        current state of the axes that could change later on in the workflow.
        Raises
        ------
        SignalDimensionError
            If the signal dimension is not 1.
        """
        if show_progressbar is None:
            show_progressbar = preferences.General.show_progressbar
        self._check_signal_dimension_equals_one()
        ip = number_of_interpolation_points + 1
        axis = self.axes_manager.signal_axes[0]
        self._check_navigation_mask(mask)
        # we compute for now
        if isinstance(start, da.Array):
            start = start.compute()
        if isinstance(end, da.Array):
            end = end.compute()
        i1, i2 = axis._get_index(start), axis._get_index(end)
        if reference_indices is None:
            reference_indices = self.axes_manager.indices
        ref = self.inav[reference_indices].data[i1:i2]
        if interpolate is True:
            ref = interpolate1D(ip, ref)
        iterating_kwargs = ()
        if mask is not None:
            iterating_kwargs += (('mask', mask),)
        shift_signal = self._map_iterate(
            _estimate_shift1D,
            iterating_kwargs=iterating_kwargs,
            data_slice=slice(i1, i2),
            ref=ref,
            ip=ip,
            interpolate=interpolate,
            ragged=False,
            parallel=parallel,
            inplace=False,
            show_progressbar=show_progressbar,
            max_workers=max_workers,
        )
        shift_array = shift_signal.data
        if max_shift is not None:
            if interpolate is True:
                max_shift *= ip
            shift_array.clip(-max_shift, max_shift)
        if interpolate is True:
            shift_array = shift_array / ip
        shift_array *= axis.scale
        if self._lazy:
            # We must compute right now because otherwise any changes to the
            # axes_manager of the signal later in the workflow may result in
            # a wrong shift_array
            shift_array = shift_array.compute()
        return shift_array
    estimate_shift1D.__doc__ %= (SHOW_PROGRESSBAR_ARG, PARALLEL_ARG, MAX_WORKERS_ARG)
    def align1D(self,
                start=None,
                end=None,
                reference_indices=None,
                max_shift=None,
                interpolate=True,
                number_of_interpolation_points=5,
                interpolation_method='linear',
                crop=True,
                expand=False,
                fill_value=np.nan,
                also_align=None,
                mask=None,
                show_progressbar=None):
        """Estimate the shifts in the signal axis using
        cross-correlation and use the estimation to align the data in place.
        This method can only estimate the shift by comparing
        unidimensional
        features that should not change the position.
        To decrease memory usage, time of computation and improve
        accuracy it is convenient to select the feature of interest
        setting the `start` and `end` keywords. By default interpolation is
        used to obtain subpixel precision.
        Parameters
        ----------
        start, end : int, float or None
            The limits of the interval. If int they are taken as the
            axis index. If float they are taken as the axis value.
        reference_indices : tuple of ints or None
            Defines the coordinates of the spectrum that will be used
            as eference. If None the spectrum at the current
            coordinates is used for this purpose.
        max_shift : int
            "Saturation limit" for the shift.
        interpolate : bool
            If True, interpolation is used to provide sub-pixel
            accuracy.
        number_of_interpolation_points : int
            Number of interpolation points. Warning: making this number
            too big can saturate the memory
        interpolation_method : str or int
            Specifies the kind of interpolation as a string ('linear',
            'nearest', 'zero', 'slinear', 'quadratic, 'cubic') or as an
            integer specifying the order of the spline interpolator to
            use.
        %s
        expand : bool
            If True, the data will be expanded to fit all data after alignment.
            Overrides `crop`.
        fill_value : float
            If crop is False fill the data outside of the original
            interval with the given value where needed.
        also_align : list of signals, None
            A list of BaseSignal instances that has exactly the same
            dimensions as this one and that will be aligned using the shift map
            estimated using the this signal.
        mask : `BaseSignal` or bool data type.
            It must have signal_dimension = 0 and navigation_shape equal to the
            current signal. Where mask is True the shift is not computed
            and set to nan.
        %s
        Returns
        -------
        An array with the result of the estimation.
        Raises
        ------
        SignalDimensionError
            If the signal dimension is not 1.
        See also
        --------
        estimate_shift1D
        """
        if also_align is None:
            also_align = []
        self._check_signal_dimension_equals_one()
        if self._lazy:
            _logger.warning('In order to properly expand, the lazy '
                            'reference signal will be read twice (once to '
                            'estimate shifts, and second time to shift '
                            'appropriatelly), which might take a long time. '
                            'Use expand=False to only pass through the data '
                            'once.')
        shift_array = self.estimate_shift1D(
            start=start,
            end=end,
            reference_indices=reference_indices,
            max_shift=max_shift,
            interpolate=interpolate,
            number_of_interpolation_points=number_of_interpolation_points,
            mask=mask,
            show_progressbar=show_progressbar)
        signals_to_shift = [self] + also_align
        for signal in signals_to_shift:
            signal.shift1D(shift_array=shift_array,
                           interpolation_method=interpolation_method,
                           crop=crop,
                           fill_value=fill_value,
                           expand=expand,
                           show_progressbar=show_progressbar)
    align1D.__doc__ %= (CROP_PARAMETER_DOC, SHOW_PROGRESSBAR_ARG)
    def integrate_in_range(self, signal_range='interactive',
                           display=True, toolkit=None):
        """Sums the spectrum over an energy range, giving the integrated
        area.
        The energy range can either be selected through a GUI or the command
        line.
        Parameters
        ----------
        signal_range : a tuple of this form (l, r) or "interactive"
            l and r are the left and right limits of the range. They can be
            numbers or None, where None indicates the extremes of the interval.
            If l and r are floats the `signal_range` will be in axis units (for
            example eV). If l and r are integers the `signal_range` will be in
            index units. When `signal_range` is "interactive" (default) the
            range is selected using a GUI. Note that ROIs can be used
            in place of a tuple.
        Returns
        --------
        integrated_spectrum : `BaseSignal` subclass
        See Also
        --------
        integrate_simpson
        Examples
        --------
        Using the GUI
        >>> s = hs.signals.Signal1D(range(1000))
        >>> s.integrate_in_range() #doctest: +SKIP
        Using the CLI
        >>> s_int = s.integrate_in_range(signal_range=(560,None))
        Selecting a range in the axis units, by specifying the
        signal range with floats.
        >>> s_int = s.integrate_in_range(signal_range=(560.,590.))
        Selecting a range using the index, by specifying the
        signal range with integers.
        >>> s_int = s.integrate_in_range(signal_range=(100,120))
        """
        from hyperspy.misc.utils import deprecation_warning
        msg = (
            "The `Signal1D.integrate_in_range` method is deprecated and will "
            "be removed in v2.0. Use a `roi.SpanRoi` followed by `integrate1D` "
            "instead.")
        deprecation_warning(msg)
        if signal_range == 'interactive':
            self_copy = self.deepcopy()
            ia = IntegrateArea(self_copy, signal_range)
            ia.gui(display=display, toolkit=toolkit)
            integrated_signal1D = self_copy
        else:
            integrated_signal1D = self._integrate_in_range_commandline(
                signal_range)
        return integrated_signal1D
    def _integrate_in_range_commandline(self, signal_range):
        e1 = signal_range[0]
        e2 = signal_range[1]
        integrated_signal1D = self.isig[e1:e2].integrate1D(-1)
        return integrated_signal1D
    def calibrate(self, display=True, toolkit=None):
        """
        Calibrate the spectral dimension using a gui.
        It displays a window where the new calibration can be set by:
        * setting the values of offset, units and scale directly
        * or selecting a range by dragging the mouse on the spectrum figure
          and setting the new values for the given range limits
        Parameters
        ----------
        %s
        %s
        Notes
        -----
        For this method to work the output_dimension must be 1.
        Raises
        ------
        SignalDimensionError
            If the signal dimension is not 1.
        """
        self._check_signal_dimension_equals_one()
        calibration = Signal1DCalibration(self)
        return calibration.gui(display=display, toolkit=toolkit)
    calibrate.__doc__ %= (DISPLAY_DT, TOOLKIT_DT)
    def smooth_savitzky_golay(
        self,
        polynomial_order=None,
        window_length=None,
        differential_order=0,
        parallel=None,
        max_workers=None,
        display=True,
        toolkit=None,
    ):
        """
        Apply a Savitzky-Golay filter to the data in place.
        If `polynomial_order` or `window_length` or `differential_order` are
        None the method is run in interactive mode.
        Parameters
        ----------
        polynomial_order : int, optional
            The order of the polynomial used to fit the samples.
            `polyorder` must be less than `window_length`.
        window_length : int, optional
            The length of the filter window (i.e. the number of coefficients).
            `window_length` must be a positive odd integer.
        differential_order: int, optional
            The order of the derivative to compute.  This must be a
            nonnegative integer.  The default is 0, which means to filter
            the data without differentiating.
        %s
        %s
        %s
        %s
        Notes
        -----
        More information about the filter in `scipy.signal.savgol_filter`.
        """
        self._check_signal_dimension_equals_one()
        if (polynomial_order is not None and
                window_length is not None):
            axis = self.axes_manager.signal_axes[0]
            self.map(savgol_filter, window_length=window_length,
                     polyorder=polynomial_order, deriv=differential_order,
                     delta=axis.scale, ragged=False, parallel=parallel, max_workers=max_workers)
        else:
            # Interactive mode
            smoother = SmoothingSavitzkyGolay(self)
            smoother.differential_order = differential_order
            if polynomial_order is not None:
                smoother.polynomial_order = polynomial_order
            if window_length is not None:
                smoother.window_length = window_length
            return smoother.gui(display=display, toolkit=toolkit)
    smooth_savitzky_golay.__doc__ %= (PARALLEL_ARG, MAX_WORKERS_ARG, DISPLAY_DT, TOOLKIT_DT)
    def smooth_lowess(
        self,
        smoothing_parameter=None,
        number_of_iterations=None,
        show_progressbar=None,
        parallel=None,
        max_workers=None,
        display=True,
        toolkit=None,
    ):
        """
        Lowess data smoothing in place.
        If `smoothing_parameter` or `number_of_iterations` are None the method
        is run in interactive mode.
        Parameters
        ----------
        smoothing_parameter: float or None
            Between 0 and 1. The fraction of the data used
            when estimating each y-value.
        number_of_iterations: int or None
            The number of residual-based reweightings
            to perform.
        %s
        %s
        %s
        %s
        %s
        Raises
        ------
        SignalDimensionError
            If the signal dimension is not 1.
        """
        self._check_signal_dimension_equals_one()
        if smoothing_parameter is None or number_of_iterations is None:
            smoother = SmoothingLowess(self)
            if smoothing_parameter is not None:
                smoother.smoothing_parameter = smoothing_parameter
            if number_of_iterations is not None:
                smoother.number_of_iterations = number_of_iterations
            return smoother.gui(display=display, toolkit=toolkit)
        else:
            self.map(lowess,
                     x=self.axes_manager[-1].axis,
                     f=smoothing_parameter,
                     n_iter=number_of_iterations,
                     show_progressbar=show_progressbar,
                     ragged=False,
                     parallel=parallel,
                     max_workers=max_workers)
    smooth_lowess.__doc__ %= (SHOW_PROGRESSBAR_ARG, PARALLEL_ARG, MAX_WORKERS_ARG, DISPLAY_DT, TOOLKIT_DT)
    def smooth_tv(
        self,
        smoothing_parameter=None,
        show_progressbar=None,
        parallel=None,
        max_workers=None,
        display=True,
        toolkit=None,
    ):
        """
        Total variation data smoothing in place.
        Parameters
        ----------
        smoothing_parameter: float or None
           Denoising weight relative to L2 minimization. If None the method
           is run in interactive mode.
        %s
        %s
        %s
        %s
        %s
        Raises
        ------
        SignalDimensionError
            If the signal dimension is not 1.
        """
        self._check_signal_dimension_equals_one()
        if smoothing_parameter is None:
            smoother = SmoothingTV(self)
            return smoother.gui(display=display, toolkit=toolkit)
        else:
            self.map(_tv_denoise_1d, weight=smoothing_parameter,
                     ragged=False,
                     show_progressbar=show_progressbar,
                     parallel=parallel,
                     max_workers=max_workers)
    smooth_tv.__doc__ %= (SHOW_PROGRESSBAR_ARG, PARALLEL_ARG, MAX_WORKERS_ARG, DISPLAY_DT, TOOLKIT_DT)
    def filter_butterworth(self,
                           cutoff_frequency_ratio=None,
                           type='low',
                           order=2, display=True, toolkit=None):
        """
        Butterworth filter in place.
        Parameters
        ----------
        %s
        %s
        Raises
        ------
        SignalDimensionError
            If the signal dimension is not 1.
        """
        self._check_signal_dimension_equals_one()
        smoother = ButterworthFilter(self)
        if cutoff_frequency_ratio is not None:
            smoother.cutoff_frequency_ratio = cutoff_frequency_ratio
            smoother.type = type
            smoother.order = order
            smoother.apply()
        else:
            return smoother.gui(display=display, toolkit=toolkit)
    filter_butterworth.__doc__ %= (DISPLAY_DT, TOOLKIT_DT)
    def _remove_background_cli(
            self, signal_range, background_estimator, fast=True,
            zero_fill=False, show_progressbar=None, model=None,
            return_model=False):
        """ See :py:meth:`~hyperspy._signal1d.signal1D.remove_background`. """
        if model is None:
            from hyperspy.models.model1d import Model1D
            model = Model1D(self)
        if background_estimator not in model:
            model.append(background_estimator)
        background_estimator.estimate_parameters(
            self,
            signal_range[0],
            signal_range[1],
            only_current=False)
        if not fast:
            model.set_signal_range(signal_range[0], signal_range[1])
            model.multifit(show_progressbar=show_progressbar,
                           iterpath='serpentine')
            model.reset_signal_range()
        if self._lazy:
            result = self - model.as_signal(show_progressbar=show_progressbar)
        else:
            try:
                axis = self.axes_manager.signal_axes[0]
                scale_factor = axis.scale if self.metadata.Signal.binned else 1
                bkg = background_estimator.function_nd(axis.axis) * scale_factor
                result = self - bkg
            except MemoryError:
                result = self - model.as_signal(
                    show_progressbar=show_progressbar)
        if zero_fill:
            if self._lazy:
                low_idx = result.axes_manager[-1].value2index(signal_range[0])
                z = da.zeros(low_idx, chunks=(low_idx,))
                cropped_da = result.data[low_idx:]
                result.data = da.concatenate([z, cropped_da])
            else:
                result.isig[:signal_range[0]] = 0
        if return_model:
            if fast:
                # Calculate the variance for each navigation position only when
                # using fast, otherwise the chisq is already calculated when
                # doing the multifit
                d = result.data[..., np.where(model.channel_switches)[0]]
                variance = model._get_variance(only_current=False)
                d *= d / (1. * variance)  # d = difference^2 / variance.
                model.chisq.data = d.sum(-1)
            result = (result, model)
        return result
    def remove_background(
            self,
            signal_range='interactive',
            background_type='Power law',
            polynomial_order=2,
            fast=True,
            zero_fill=False,
            plot_remainder=True,
            show_progressbar=None,
            return_model=False,
            display=True,
            toolkit=None):
        """
        Remove the background, either in place using a GUI or returned as a new
        spectrum using the command line. The fast option is not accurate for
        most background types - except Gaussian, Offset and
        Power law - but it is useful to estimate the initial fitting parameters
        before performing a full fit.
        Parameters
        ----------
        signal_range : "interactive", tuple of ints or floats, optional
            If this argument is not specified, the signal range has to be
            selected using a GUI. And the original spectrum will be replaced.
            If tuple is given, the a spectrum will be returned.
        background_type : str
            The type of component which should be used to fit the background.
            Possible components: Doniach, Gaussian, Lorentzian, Offset,
            Polynomial, PowerLaw, Exponential, SkewNormal, SplitVoigt, Voigt.
            If Polynomial is used, the polynomial order can be specified
        polynomial_order : int, default 2
            Specify the polynomial order if a Polynomial background is used.
        fast : bool
            If True, perform an approximative estimation of the parameters.
            If False, the signal is fitted using non-linear least squares
            afterwards. This is slower compared to the estimation but
            often more accurate.
        zero_fill : bool
            If True, all spectral channels lower than the lower bound of the
            fitting range will be set to zero (this is the default behavior
            of Gatan's DigitalMicrograph). Setting this value to False
            allows for inspection of the quality of background fit throughout
            the pre-fitting region.
        plot_remainder : bool
            If True, add a (green) line previewing the remainder signal after
            background removal. This preview is obtained from a Fast calculation
            so the result may be different if a NLLS calculation is finally
            performed.
        return_model : bool
            If True, the background model is returned. The chi² can be obtained
            from this model using
            :py:meth:`~hyperspy.models.model1d.Model1D.chisqd`.
        %s
        %s
        %s
        Returns
        -------
        {None, signal, background_model or (signal, background_model)}
            If signal_range is not 'interactive', the signal with background
            substracted is returned. If return_model is True, returns the
            background model, otherwise, the GUI widget dictionary is returned
            if `display=False` - see the display parameter documentation.
        Examples
        --------
        Using GUI, replaces spectrum s
        >>> s = hs.signals.Signal1D(range(1000))
        >>> s.remove_background() #doctest: +SKIP
        Using command line, returns a Signal1D:
        >>> s.remove_background(signal_range=(400,450),
                                background_type='PowerLaw')
        <Signal1D, title: , dimensions: (|1000)>
        Using a full model to fit the background:
        >>> s.remove_background(signal_range=(400,450), fast=False)
        <Signal1D, title: , dimensions: (|1000)>
        Returns background substracted and the model:
        >>> s.remove_background(signal_range=(400,450),
                                fast=False,
                                return_model=True)
        (<Signal1D, title: , dimensions: (|1000)>, <Model1D>)
        Raises
        ------
        SignalDimensionError
            If the signal dimension is not 1.
        """
        self._check_signal_dimension_equals_one()
        # Create model here, so that we can return it
        from hyperspy.models.model1d import Model1D
        model = Model1D(self)
        if signal_range == 'interactive':
            br = BackgroundRemoval(self, background_type=background_type,
                                   polynomial_order=polynomial_order,
                                   fast=fast,
                                   plot_remainder=plot_remainder,
                                   show_progressbar=show_progressbar,
                                   zero_fill=zero_fill,
                                   model=model)
            gui_dict = br.gui(display=display, toolkit=toolkit)
            if return_model:
                return model
            else:
                # for testing purposes
                return gui_dict
        else:
            background_estimator = _get_background_estimator(
                background_type, polynomial_order)[0]
            result = self._remove_background_cli(
                signal_range=signal_range,
                background_estimator=background_estimator,
                fast=fast,
                zero_fill=zero_fill,
                show_progressbar=show_progressbar,
                model=model,
                return_model=return_model)
            return result
    remove_background.__doc__ %= (SHOW_PROGRESSBAR_ARG, DISPLAY_DT, TOOLKIT_DT)
    @interactive_range_selector
    def crop_signal1D(self, left_value=None, right_value=None,):
        """Crop in place the spectral dimension.
        Parameters
        ----------
        left_value, righ_value : int, float or None
            If int the values are taken as indices. If float they are
            converted to indices using the spectral axis calibration.
            If left_value is None crops from the beginning of the axis.
            If right_value is None crops up to the end of the axis. If
            both are
            None the interactive cropping interface is activated
            enabling
            cropping the spectrum using a span selector in the signal
            plot.
        Raises
        ------
        SignalDimensionError
            If the signal dimension is not 1.
        """
        self._check_signal_dimension_equals_one()
        try:
            left_value, right_value = left_value
        except TypeError:
            # It was not a ROI, we carry on
            pass
        self.crop(axis=self.axes_manager.signal_axes[0].index_in_axes_manager,
                  start=left_value, end=right_value)
    def gaussian_filter(self, FWHM):
        """Applies a Gaussian filter in the spectral dimension in place.
        Parameters
        ----------
        FWHM : float
            The Full Width at Half Maximum of the gaussian in the
            spectral axis units
        Raises
        ------
        ValueError
            If FWHM is equal or less than zero.
        SignalDimensionError
            If the signal dimension is not 1.
        """
        self._check_signal_dimension_equals_one()
        if FWHM <= 0:
            raise ValueError(
                "FWHM must be greater than zero")
        axis = self.axes_manager.signal_axes[0]
        FWHM *= 1 / axis.scale
        self.map(gaussian_filter1d, sigma=FWHM / 2.35482, ragged=False)
    def hanning_taper(self, side='both', channels=None, offset=0):
        """Apply a hanning taper to the data in place.
        Parameters
        ----------
        side : 'left', 'right' or 'both'
            Specify which side to use.
        channels : None or int
            The number of channels to taper. If None 5% of the total
            number of channels are tapered.
        offset : int
        Returns
        -------
        channels
        Raises
        ------
        SignalDimensionError
            If the signal dimension is not 1.
        """
        if not np.issubdtype(self.data.dtype, np.floating):
            raise TypeError("The data dtype should be `float`. It can be "
                            "changed by using the `change_dtype('float')` "
                            "method of the signal.")
        # TODO: generalize it
        self._check_signal_dimension_equals_one()
        if channels is None:
            channels = int(round(len(self()) * 0.02))
            if channels < 20:
                channels = 20
        dc = self._data_aligned_with_axes
        if self._lazy and offset != 0:
            shp = dc.shape
            if len(shp) == 1:
                nav_shape = ()
                nav_chunks = ()
            else:
                nav_shape = shp[:-1]
                nav_chunks = dc.chunks[:-1]
            zeros = da.zeros(nav_shape + (offset,),
                             chunks=nav_chunks + ((offset,),))
        if side == 'left' or side == 'both':
            if self._lazy:
                tapered = dc[..., offset:channels + offset]
                tapered *= np.hanning(2 * channels)[:channels]
                therest = dc[..., channels + offset:]
                thelist = [] if offset == 0 else [zeros]
                thelist.extend([tapered, therest])
                dc = da.concatenate(thelist, axis=-1)
            else:
                dc[..., offset:channels + offset] *= (
                    np.hanning(2 * channels)[:channels])
                dc[..., :offset] *= 0.
        if side == 'right' or side == 'both':
            rl = None if offset == 0 else -offset
            if self._lazy:
                therest = dc[..., :-channels - offset]
                tapered = dc[..., -channels - offset:rl]
                tapered *= np.hanning(2 * channels)[-channels:]
                thelist = [therest, tapered]
                if offset != 0:
                    thelist.append(zeros)
                dc = da.concatenate(thelist, axis=-1)
            else:
                dc[..., -channels - offset:rl] *= (
                    np.hanning(2 * channels)[-channels:])
                if offset != 0:
                    dc[..., -offset:] *= 0.
        if self._lazy:
            self.data = dc
        self.events.data_changed.trigger(obj=self)
        return channels
    def find_peaks1D_ohaver(self, xdim=None,
                            slope_thresh=0,
                            amp_thresh=None,
                            subchannel=True,
                            medfilt_radius=5,
                            maxpeakn=30000,
                            peakgroup=10,
                            parallel=None,
                            max_workers=None):
        """Find positive peaks along a 1D Signal. It detects peaks by looking
        for downward zero-crossings in the first derivative that exceed
        'slope_thresh'.
        'slope_thresh' and 'amp_thresh', control sensitivity: higher
        values will neglect broad peaks (slope) and smaller features (amp),
        respectively.
        `peakgroup` is the number of points around the top of the peak
        that are taken to estimate the peak height. For spikes or very
        narrow peaks, set `peakgroup` to 1 or 2; for broad or noisy peaks,
        make `peakgroup` larger to reduce the effect of noise.
        Parameters
        ----------
        slope_thresh : float, optional
            1st derivative threshold to count the peak;
            higher values will neglect broader features;
            default is set to 0.
        amp_thresh : float, optional
            intensity threshold below which peaks are ignored;
            higher values will neglect smaller features;
            default is set to 10%% of max(y).
        medfilt_radius : int, optional
            median filter window to apply to smooth the data
            (see :py:func:`scipy.signal.medfilt`);
            if 0, no filter will be applied;
            default is set to 5.
        peakgroup : int, optional
            number of points around the "top part" of the peak
            that are taken to estimate the peak height;
            default is set to 10
        maxpeakn : int, optional
            number of maximum detectable peaks;
            default is set to 5000.
        subchannel : bool, default True
            default is set to True.
        %s
        %s
        Returns
        -------
        structured array of shape (npeaks) containing fields: 'position',
        'width', and 'height' for each peak.
        Raises
        ------
        SignalDimensionError
            If the signal dimension is not 1.
        """
        # TODO: add scipy.signal.find_peaks_cwt
        self._check_signal_dimension_equals_one()
        axis = self.axes_manager.signal_axes[0].axis
        peaks = self.map(find_peaks_ohaver,
                         x=axis,
                         slope_thresh=slope_thresh,
                         amp_thresh=amp_thresh,
                         medfilt_radius=medfilt_radius,
                         maxpeakn=maxpeakn,
                         peakgroup=peakgroup,
                         subchannel=subchannel,
                         ragged=True,
                         parallel=parallel,
                         max_workers=max_workers,
                         inplace=False)
        return peaks.data
    find_peaks1D_ohaver.__doc__ %= (PARALLEL_ARG, MAX_WORKERS_ARG)
    def estimate_peak_width(
        self,
        factor=0.5,
        window=None,
        return_interval=False,
        parallel=None,
        show_progressbar=None,
        max_workers=None,
    ):
        """Estimate the width of the highest intensity of peak
        of the spectra at a given fraction of its maximum.
        It can be used with asymmetric peaks. For accurate results any
        background must be previously substracted.
        The estimation is performed by interpolation using cubic splines.
        Parameters
        ----------
        factor : 0 < float < 1
            The default, 0.5, estimates the FWHM.
        window : None or float
            The size of the window centred at the peak maximum
            used to perform the estimation.
            The window size must be chosen with care: if it is narrower
            than the width of the peak at some positions or if it is
            so wide that it includes other more intense peaks this
            method cannot compute the width and a NaN is stored instead.
        return_interval: bool
            If True, returns 2 extra signals with the positions of the
            desired height fraction at the left and right of the
            peak.
        %s
        %s
        %s
        Returns
        -------
        width or [width, left, right], depending on the value of
        `return_interval`.
        Notes
        -----
        Parallel operation of this function is not supported
        on Windows platforms.
        """
        if show_progressbar is None:
            show_progressbar = preferences.General.show_progressbar
        self._check_signal_dimension_equals_one()
        if not 0 < factor < 1:
            raise ValueError("factor must be between 0 and 1.")
        if parallel != False and os.name in ["nt", "dos"]:  # pragma: no cover
            # Due to a scipy bug where scipy.interpolate.UnivariateSpline
            # appears to not be thread-safe on Windows, we raise a warning
            # here. See https://github.com/hyperspy/hyperspy/issues/2320
            # Until/if the scipy bug is fixed, we should do this.
            _logger.warning(
                "Parallel operation is not supported on Windows. "
                "Setting `parallel=False`"
            )
            parallel = False
        axis = self.axes_manager.signal_axes[0]
        # x = axis.axis
        maxval = self.axes_manager.navigation_size
        show_progressbar = show_progressbar and maxval > 0
        def estimating_function(spectrum,
                                window=None,
                                factor=0.5,
                                axis=None):
            x = axis.axis
            if window is not None:
                vmax = axis.index2value(spectrum.argmax())
                slices = axis._get_array_slices(
                    slice(vmax - window * 0.5, vmax + window * 0.5))
                spectrum = spectrum[slices]
                x = x[slices]
            spline = scipy.interpolate.UnivariateSpline(
                x,
                spectrum - factor * spectrum.max(),
                s=0)
            roots = spline.roots()
            if len(roots) == 2:
                return np.array(roots)
            else:
                return np.full((2,), np.nan)
        both = self._map_iterate(estimating_function,
                                 window=window,
                                 factor=factor,
                                 axis=axis,
                                 ragged=False,
                                 inplace=False,
                                 parallel=parallel,
                                 show_progressbar=show_progressbar,
                                 max_workers=None)
        left, right = both.T.split()
        width = right - left
        if factor == 0.5:
            width.metadata.General.title = (
                self.metadata.General.title + " FWHM")
            left.metadata.General.title = (
                self.metadata.General.title + " FWHM left position")
            right.metadata.General.title = (
                self.metadata.General.title + " FWHM right position")
        else:
            width.metadata.General.title = (
                self.metadata.General.title +
                " full-width at %.1f maximum" % factor)
            left.metadata.General.title = (
                self.metadata.General.title +
                " full-width at %.1f maximum left position" % factor)
            right.metadata.General.title = (
                self.metadata.General.title +
                " full-width at %.1f maximum right position" % factor)
        for signal in (left, width, right):
            signal.axes_manager.set_signal_dimension(0)
            signal.set_signal_type("")
        if return_interval is True:
            return [width, left, right]
        else:
            return width
    estimate_peak_width.__doc__ %= (SHOW_PROGRESSBAR_ARG, PARALLEL_ARG, MAX_WORKERS_ARG)
    def plot(self,
             navigator="auto",
             plot_markers=True,
             autoscale='v',
             norm="auto",
             axes_manager=None,
             navigator_kwds={},
             **kwargs):
        """%s
        %s
        %s
        """
        for c in autoscale:
            if c not in ['x', 'v']:
                raise ValueError("`autoscale` only accepts 'x', 'v' as "
                                 "valid characters.")
        super().plot(navigator=navigator,
                     plot_markers=plot_markers,
                     autoscale=autoscale,
                     norm=norm,
                     axes_manager=axes_manager,
                     navigator_kwds=navigator_kwds,
                     **kwargs)
    plot.__doc__ %= (BASE_PLOT_DOCSTRING, BASE_PLOT_DOCSTRING_PARAMETERS,
                     PLOT1D_DOCSTRING)
class LazySignal1D(LazySignal, Signal1D):
    """
    """
    _lazy = True
    def __init__(self, *args, **kwargs):
        super().__init__(*args, **kwargs)
        self.axes_manager.set_signal_dimension(1)
 | 
	gpl-3.0 | 
| 
	kobejean/tensorflow | 
	tensorflow/contrib/metrics/python/ops/metric_ops.py | 
	5 | 
	178391 | 
	# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains metric-computing operations on streamed tensors.
Module documentation, including "@@" callouts, should be put in
third_party/tensorflow/contrib/metrics/__init__.py
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections as collections_lib
from tensorflow.python.eager import context
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import confusion_matrix
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import metrics
from tensorflow.python.ops import metrics_impl
from tensorflow.python.ops import nn
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import weights_broadcast_ops
from tensorflow.python.ops.distributions.normal import Normal
from tensorflow.python.util.deprecation import deprecated
# Epsilon constant used to represent extremely small quantity.
_EPSILON = 1e-7
def _safe_div(numerator, denominator, name):
  """Divides two values, returning 0 if the denominator is <= 0.
  Args:
    numerator: A real `Tensor`.
    denominator: A real `Tensor`, with dtype matching `numerator`.
    name: Name for the returned op.
  Returns:
    0 if `denominator` <= 0, else `numerator` / `denominator`
  """
  return array_ops.where(
      math_ops.greater(denominator, 0),
      math_ops.truediv(numerator, denominator),
      0,
      name=name)
@deprecated(None, 'Please switch to tf.metrics.true_positives. Note that the '
            'order of the labels and predictions arguments has been switched.')
def streaming_true_positives(predictions,
                             labels,
                             weights=None,
                             metrics_collections=None,
                             updates_collections=None,
                             name=None):
  """Sum the weights of true_positives.
  If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
  Args:
    predictions: The predicted values, a `Tensor` of arbitrary dimensions. Will
      be cast to `bool`.
    labels: The ground truth values, a `Tensor` whose dimensions must match
      `predictions`. Will be cast to `bool`.
    weights: Optional `Tensor` whose rank is either 0, or the same rank as
      `labels`, and must be broadcastable to `labels` (i.e., all dimensions
      must be either `1`, or the same as the corresponding `labels`
      dimension).
    metrics_collections: An optional list of collections that the metric
      value variable should be added to.
    updates_collections: An optional list of collections that the metric update
      ops should be added to.
    name: An optional variable_scope name.
  Returns:
    value_tensor: A `Tensor` representing the current value of the metric.
    update_op: An operation that accumulates the error from a batch of data.
  Raises:
    ValueError: If `predictions` and `labels` have mismatched shapes, or if
      `weights` is not `None` and its shape doesn't match `predictions`, or if
      either `metrics_collections` or `updates_collections` are not a list or
      tuple.
  """
  return metrics.true_positives(
      predictions=predictions,
      labels=labels,
      weights=weights,
      metrics_collections=metrics_collections,
      updates_collections=updates_collections,
      name=name)
@deprecated(None, 'Please switch to tf.metrics.true_negatives. Note that the '
            'order of the labels and predictions arguments has been switched.')
def streaming_true_negatives(predictions,
                             labels,
                             weights=None,
                             metrics_collections=None,
                             updates_collections=None,
                             name=None):
  """Sum the weights of true_negatives.
  If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
  Args:
    predictions: The predicted values, a `Tensor` of arbitrary dimensions. Will
      be cast to `bool`.
    labels: The ground truth values, a `Tensor` whose dimensions must match
      `predictions`. Will be cast to `bool`.
    weights: Optional `Tensor` whose rank is either 0, or the same rank as
      `labels`, and must be broadcastable to `labels` (i.e., all dimensions
      must be either `1`, or the same as the corresponding `labels`
      dimension).
    metrics_collections: An optional list of collections that the metric
      value variable should be added to.
    updates_collections: An optional list of collections that the metric update
      ops should be added to.
    name: An optional variable_scope name.
  Returns:
    value_tensor: A `Tensor` representing the current value of the metric.
    update_op: An operation that accumulates the error from a batch of data.
  Raises:
    ValueError: If `predictions` and `labels` have mismatched shapes, or if
      `weights` is not `None` and its shape doesn't match `predictions`, or if
      either `metrics_collections` or `updates_collections` are not a list or
      tuple.
  """
  return metrics.true_negatives(
      predictions=predictions,
      labels=labels,
      weights=weights,
      metrics_collections=metrics_collections,
      updates_collections=updates_collections,
      name=name)
@deprecated(None, 'Please switch to tf.metrics.false_positives. Note that the '
            'order of the labels and predictions arguments has been switched.')
def streaming_false_positives(predictions,
                              labels,
                              weights=None,
                              metrics_collections=None,
                              updates_collections=None,
                              name=None):
  """Sum the weights of false positives.
  If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
  Args:
    predictions: The predicted values, a `Tensor` of arbitrary dimensions. Will
      be cast to `bool`.
    labels: The ground truth values, a `Tensor` whose dimensions must match
      `predictions`. Will be cast to `bool`.
    weights: Optional `Tensor` whose rank is either 0, or the same rank as
      `labels`, and must be broadcastable to `labels` (i.e., all dimensions
      must be either `1`, or the same as the corresponding `labels`
      dimension).
    metrics_collections: An optional list of collections that the metric
      value variable should be added to.
    updates_collections: An optional list of collections that the metric update
      ops should be added to.
    name: An optional variable_scope name.
  Returns:
    value_tensor: A `Tensor` representing the current value of the metric.
    update_op: An operation that accumulates the error from a batch of data.
  Raises:
    ValueError: If `predictions` and `labels` have mismatched shapes, or if
      `weights` is not `None` and its shape doesn't match `predictions`, or if
      either `metrics_collections` or `updates_collections` are not a list or
      tuple.
  """
  return metrics.false_positives(
      predictions=predictions,
      labels=labels,
      weights=weights,
      metrics_collections=metrics_collections,
      updates_collections=updates_collections,
      name=name)
@deprecated(None, 'Please switch to tf.metrics.false_negatives. Note that the '
            'order of the labels and predictions arguments has been switched.')
def streaming_false_negatives(predictions,
                              labels,
                              weights=None,
                              metrics_collections=None,
                              updates_collections=None,
                              name=None):
  """Computes the total number of false negatives.
  If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
  Args:
    predictions: The predicted values, a `Tensor` of arbitrary dimensions. Will
      be cast to `bool`.
    labels: The ground truth values, a `Tensor` whose dimensions must match
      `predictions`. Will be cast to `bool`.
    weights: Optional `Tensor` whose rank is either 0, or the same rank as
      `labels`, and must be broadcastable to `labels` (i.e., all dimensions
      must be either `1`, or the same as the corresponding `labels`
      dimension).
    metrics_collections: An optional list of collections that the metric
      value variable should be added to.
    updates_collections: An optional list of collections that the metric update
      ops should be added to.
    name: An optional variable_scope name.
  Returns:
    value_tensor: A `Tensor` representing the current value of the metric.
    update_op: An operation that accumulates the error from a batch of data.
  Raises:
    ValueError: If `weights` is not `None` and its shape doesn't match `values`,
      or if either `metrics_collections` or `updates_collections` are not a list
      or tuple.
  """
  return metrics.false_negatives(
      predictions=predictions,
      labels=labels,
      weights=weights,
      metrics_collections=metrics_collections,
      updates_collections=updates_collections,
      name=name)
@deprecated(None, 'Please switch to tf.metrics.mean')
def streaming_mean(values,
                   weights=None,
                   metrics_collections=None,
                   updates_collections=None,
                   name=None):
  """Computes the (weighted) mean of the given values.
  The `streaming_mean` function creates two local variables, `total` and `count`
  that are used to compute the average of `values`. This average is ultimately
  returned as `mean` which is an idempotent operation that simply divides
  `total` by `count`.
  For estimation of the metric over a stream of data, the function creates an
  `update_op` operation that updates these variables and returns the `mean`.
  `update_op` increments `total` with the reduced sum of the product of `values`
  and `weights`, and it increments `count` with the reduced sum of `weights`.
  If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
  Args:
    values: A `Tensor` of arbitrary dimensions.
    weights: `Tensor` whose rank is either 0, or the same rank as `values`, and
      must be broadcastable to `values` (i.e., all dimensions must be either
      `1`, or the same as the corresponding `values` dimension).
    metrics_collections: An optional list of collections that `mean`
      should be added to.
    updates_collections: An optional list of collections that `update_op`
      should be added to.
    name: An optional variable_scope name.
  Returns:
    mean: A `Tensor` representing the current mean, the value of `total` divided
      by `count`.
    update_op: An operation that increments the `total` and `count` variables
      appropriately and whose value matches `mean`.
  Raises:
    ValueError: If `weights` is not `None` and its shape doesn't match `values`,
      or if either `metrics_collections` or `updates_collections` are not a list
      or tuple.
  """
  return metrics.mean(
      values=values,
      weights=weights,
      metrics_collections=metrics_collections,
      updates_collections=updates_collections,
      name=name)
@deprecated(None, 'Please switch to tf.metrics.mean_tensor')
def streaming_mean_tensor(values,
                          weights=None,
                          metrics_collections=None,
                          updates_collections=None,
                          name=None):
  """Computes the element-wise (weighted) mean of the given tensors.
  In contrast to the `streaming_mean` function which returns a scalar with the
  mean,  this function returns an average tensor with the same shape as the
  input tensors.
  The `streaming_mean_tensor` function creates two local variables,
  `total_tensor` and `count_tensor` that are used to compute the average of
  `values`. This average is ultimately returned as `mean` which is an idempotent
  operation that simply divides `total` by `count`.
  For estimation of the metric over a stream of data, the function creates an
  `update_op` operation that updates these variables and returns the `mean`.
  `update_op` increments `total` with the reduced sum of the product of `values`
  and `weights`, and it increments `count` with the reduced sum of `weights`.
  If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
  Args:
    values: A `Tensor` of arbitrary dimensions.
    weights: `Tensor` whose rank is either 0, or the same rank as `values`, and
      must be broadcastable to `values` (i.e., all dimensions must be either
      `1`, or the same as the corresponding `values` dimension).
    metrics_collections: An optional list of collections that `mean`
      should be added to.
    updates_collections: An optional list of collections that `update_op`
      should be added to.
    name: An optional variable_scope name.
  Returns:
    mean: A float `Tensor` representing the current mean, the value of `total`
      divided by `count`.
    update_op: An operation that increments the `total` and `count` variables
      appropriately and whose value matches `mean`.
  Raises:
    ValueError: If `weights` is not `None` and its shape doesn't match `values`,
      or if either `metrics_collections` or `updates_collections` are not a list
      or tuple.
  """
  return metrics.mean_tensor(
      values=values,
      weights=weights,
      metrics_collections=metrics_collections,
      updates_collections=updates_collections,
      name=name)
@deprecated(None, 'Please switch to tf.metrics.accuracy. Note that the order '
            'of the labels and predictions arguments has been switched.')
def streaming_accuracy(predictions,
                       labels,
                       weights=None,
                       metrics_collections=None,
                       updates_collections=None,
                       name=None):
  """Calculates how often `predictions` matches `labels`.
  The `streaming_accuracy` function creates two local variables, `total` and
  `count` that are used to compute the frequency with which `predictions`
  matches `labels`. This frequency is ultimately returned as `accuracy`: an
  idempotent operation that simply divides `total` by `count`.
  For estimation of the metric over a stream of data, the function creates an
  `update_op` operation that updates these variables and returns the `accuracy`.
  Internally, an `is_correct` operation computes a `Tensor` with elements 1.0
  where the corresponding elements of `predictions` and `labels` match and 0.0
  otherwise. Then `update_op` increments `total` with the reduced sum of the
  product of `weights` and `is_correct`, and it increments `count` with the
  reduced sum of `weights`.
  If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
  Args:
    predictions: The predicted values, a `Tensor` of any shape.
    labels: The ground truth values, a `Tensor` whose shape matches
      `predictions`.
    weights: `Tensor` whose rank is either 0, or the same rank as `labels`, and
      must be broadcastable to `labels` (i.e., all dimensions must be either
      `1`, or the same as the corresponding `labels` dimension).
    metrics_collections: An optional list of collections that `accuracy` should
      be added to.
    updates_collections: An optional list of collections that `update_op` should
      be added to.
    name: An optional variable_scope name.
  Returns:
    accuracy: A `Tensor` representing the accuracy, the value of `total` divided
      by `count`.
    update_op: An operation that increments the `total` and `count` variables
      appropriately and whose value matches `accuracy`.
  Raises:
    ValueError: If `predictions` and `labels` have mismatched shapes, or if
      `weights` is not `None` and its shape doesn't match `predictions`, or if
      either `metrics_collections` or `updates_collections` are not a list or
      tuple.
  """
  return metrics.accuracy(
      predictions=predictions,
      labels=labels,
      weights=weights,
      metrics_collections=metrics_collections,
      updates_collections=updates_collections,
      name=name)
@deprecated(None, 'Please switch to tf.metrics.precision. Note that the order '
            'of the labels and predictions arguments has been switched.')
def streaming_precision(predictions,
                        labels,
                        weights=None,
                        metrics_collections=None,
                        updates_collections=None,
                        name=None):
  """Computes the precision of the predictions with respect to the labels.
  The `streaming_precision` function creates two local variables,
  `true_positives` and `false_positives`, that are used to compute the
  precision. This value is ultimately returned as `precision`, an idempotent
  operation that simply divides `true_positives` by the sum of `true_positives`
  and `false_positives`.
  For estimation of the metric over a stream of data, the function creates an
  `update_op` operation that updates these variables and returns the
  `precision`. `update_op` weights each prediction by the corresponding value in
  `weights`.
  If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
  Args:
    predictions: The predicted values, a `bool` `Tensor` of arbitrary shape.
    labels: The ground truth values, a `bool` `Tensor` whose dimensions must
      match `predictions`.
    weights: `Tensor` whose rank is either 0, or the same rank as `labels`, and
      must be broadcastable to `labels` (i.e., all dimensions must be either
      `1`, or the same as the corresponding `labels` dimension).
    metrics_collections: An optional list of collections that `precision` should
      be added to.
    updates_collections: An optional list of collections that `update_op` should
      be added to.
    name: An optional variable_scope name.
  Returns:
    precision: Scalar float `Tensor` with the value of `true_positives`
      divided by the sum of `true_positives` and `false_positives`.
    update_op: `Operation` that increments `true_positives` and
      `false_positives` variables appropriately and whose value matches
      `precision`.
  Raises:
    ValueError: If `predictions` and `labels` have mismatched shapes, or if
      `weights` is not `None` and its shape doesn't match `predictions`, or if
      either `metrics_collections` or `updates_collections` are not a list or
      tuple.
  """
  return metrics.precision(
      predictions=predictions,
      labels=labels,
      weights=weights,
      metrics_collections=metrics_collections,
      updates_collections=updates_collections,
      name=name)
@deprecated(None, 'Please switch to tf.metrics.recall. Note that the order '
            'of the labels and predictions arguments has been switched.')
def streaming_recall(predictions,
                     labels,
                     weights=None,
                     metrics_collections=None,
                     updates_collections=None,
                     name=None):
  """Computes the recall of the predictions with respect to the labels.
  The `streaming_recall` function creates two local variables, `true_positives`
  and `false_negatives`, that are used to compute the recall. This value is
  ultimately returned as `recall`, an idempotent operation that simply divides
  `true_positives` by the sum of `true_positives`  and `false_negatives`.
  For estimation of the metric over a stream of data, the function creates an
  `update_op` that updates these variables and returns the `recall`. `update_op`
  weights each prediction by the corresponding value in `weights`.
  If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
  Args:
    predictions: The predicted values, a `bool` `Tensor` of arbitrary shape.
    labels: The ground truth values, a `bool` `Tensor` whose dimensions must
      match `predictions`.
    weights: `Tensor` whose rank is either 0, or the same rank as `labels`, and
      must be broadcastable to `labels` (i.e., all dimensions must be either
      `1`, or the same as the corresponding `labels` dimension).
    metrics_collections: An optional list of collections that `recall` should
      be added to.
    updates_collections: An optional list of collections that `update_op` should
      be added to.
    name: An optional variable_scope name.
  Returns:
    recall: Scalar float `Tensor` with the value of `true_positives` divided
      by the sum of `true_positives` and `false_negatives`.
    update_op: `Operation` that increments `true_positives` and
      `false_negatives` variables appropriately and whose value matches
      `recall`.
  Raises:
    ValueError: If `predictions` and `labels` have mismatched shapes, or if
      `weights` is not `None` and its shape doesn't match `predictions`, or if
      either `metrics_collections` or `updates_collections` are not a list or
      tuple.
  """
  return metrics.recall(
      predictions=predictions,
      labels=labels,
      weights=weights,
      metrics_collections=metrics_collections,
      updates_collections=updates_collections,
      name=name)
def streaming_false_positive_rate(predictions,
                                  labels,
                                  weights=None,
                                  metrics_collections=None,
                                  updates_collections=None,
                                  name=None):
  """Computes the false positive rate of predictions with respect to labels.
  The `false_positive_rate` function creates two local variables,
  `false_positives` and `true_negatives`, that are used to compute the
  false positive rate. This value is ultimately returned as
  `false_positive_rate`, an idempotent operation that simply divides
  `false_positives` by the sum of `false_positives` and `true_negatives`.
  For estimation of the metric over a stream of data, the function creates an
  `update_op` operation that updates these variables and returns the
  `false_positive_rate`. `update_op` weights each prediction by the
  corresponding value in `weights`.
  If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
  Args:
    predictions: The predicted values, a `Tensor` of arbitrary dimensions. Will
      be cast to `bool`.
    labels: The ground truth values, a `Tensor` whose dimensions must match
      `predictions`. Will be cast to `bool`.
    weights: Optional `Tensor` whose rank is either 0, or the same rank as
      `labels`, and must be broadcastable to `labels` (i.e., all dimensions must
      be either `1`, or the same as the corresponding `labels` dimension).
    metrics_collections: An optional list of collections that
     `false_positive_rate` should be added to.
    updates_collections: An optional list of collections that `update_op` should
      be added to.
    name: An optional variable_scope name.
  Returns:
    false_positive_rate: Scalar float `Tensor` with the value of
      `false_positives` divided by the sum of `false_positives` and
      `true_negatives`.
    update_op: `Operation` that increments `false_positives` and
      `true_negatives` variables appropriately and whose value matches
      `false_positive_rate`.
  Raises:
    ValueError: If `predictions` and `labels` have mismatched shapes, or if
      `weights` is not `None` and its shape doesn't match `predictions`, or if
      either `metrics_collections` or `updates_collections` are not a list or
      tuple.
  """
  with variable_scope.variable_scope(name, 'false_positive_rate',
                                     (predictions, labels, weights)):
    predictions, labels, weights = metrics_impl._remove_squeezable_dimensions(  # pylint: disable=protected-access
        predictions=math_ops.cast(predictions, dtype=dtypes.bool),
        labels=math_ops.cast(labels, dtype=dtypes.bool),
        weights=weights)
    false_p, false_positives_update_op = metrics.false_positives(
        labels=labels,
        predictions=predictions,
        weights=weights,
        metrics_collections=None,
        updates_collections=None,
        name=None)
    true_n, true_negatives_update_op = metrics.true_negatives(
        labels=labels,
        predictions=predictions,
        weights=weights,
        metrics_collections=None,
        updates_collections=None,
        name=None)
    def compute_fpr(fp, tn, name):
      return array_ops.where(
          math_ops.greater(fp + tn, 0), math_ops.div(fp, fp + tn), 0, name)
    fpr = compute_fpr(false_p, true_n, 'value')
    update_op = compute_fpr(false_positives_update_op, true_negatives_update_op,
                            'update_op')
    if metrics_collections:
      ops.add_to_collections(metrics_collections, fpr)
    if updates_collections:
      ops.add_to_collections(updates_collections, update_op)
    return fpr, update_op
def streaming_false_negative_rate(predictions,
                                  labels,
                                  weights=None,
                                  metrics_collections=None,
                                  updates_collections=None,
                                  name=None):
  """Computes the false negative rate of predictions with respect to labels.
  The `false_negative_rate` function creates two local variables,
  `false_negatives` and `true_positives`, that are used to compute the
  false positive rate. This value is ultimately returned as
  `false_negative_rate`, an idempotent operation that simply divides
  `false_negatives` by the sum of `false_negatives` and `true_positives`.
  For estimation of the metric over a stream of data, the function creates an
  `update_op` operation that updates these variables and returns the
  `false_negative_rate`. `update_op` weights each prediction by the
  corresponding value in `weights`.
  If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
  Args:
    predictions: The predicted values, a `Tensor` of arbitrary dimensions. Will
      be cast to `bool`.
    labels: The ground truth values, a `Tensor` whose dimensions must match
      `predictions`. Will be cast to `bool`.
    weights: Optional `Tensor` whose rank is either 0, or the same rank as
      `labels`, and must be broadcastable to `labels` (i.e., all dimensions must
      be either `1`, or the same as the corresponding `labels` dimension).
    metrics_collections: An optional list of collections that
      `false_negative_rate` should be added to.
    updates_collections: An optional list of collections that `update_op` should
      be added to.
    name: An optional variable_scope name.
  Returns:
    false_negative_rate: Scalar float `Tensor` with the value of
      `false_negatives` divided by the sum of `false_negatives` and
      `true_positives`.
    update_op: `Operation` that increments `false_negatives` and
      `true_positives` variables appropriately and whose value matches
      `false_negative_rate`.
  Raises:
    ValueError: If `predictions` and `labels` have mismatched shapes, or if
      `weights` is not `None` and its shape doesn't match `predictions`, or if
      either `metrics_collections` or `updates_collections` are not a list or
      tuple.
  """
  with variable_scope.variable_scope(name, 'false_negative_rate',
                                     (predictions, labels, weights)):
    predictions, labels, weights = metrics_impl._remove_squeezable_dimensions(  # pylint: disable=protected-access
        predictions=math_ops.cast(predictions, dtype=dtypes.bool),
        labels=math_ops.cast(labels, dtype=dtypes.bool),
        weights=weights)
    false_n, false_negatives_update_op = metrics.false_negatives(
        labels,
        predictions,
        weights,
        metrics_collections=None,
        updates_collections=None,
        name=None)
    true_p, true_positives_update_op = metrics.true_positives(
        labels,
        predictions,
        weights,
        metrics_collections=None,
        updates_collections=None,
        name=None)
    def compute_fnr(fn, tp, name):
      return array_ops.where(
          math_ops.greater(fn + tp, 0), math_ops.div(fn, fn + tp), 0, name)
    fnr = compute_fnr(false_n, true_p, 'value')
    update_op = compute_fnr(false_negatives_update_op, true_positives_update_op,
                            'update_op')
    if metrics_collections:
      ops.add_to_collections(metrics_collections, fnr)
    if updates_collections:
      ops.add_to_collections(updates_collections, update_op)
    return fnr, update_op
def _streaming_confusion_matrix_at_thresholds(predictions,
                                              labels,
                                              thresholds,
                                              weights=None,
                                              includes=None):
  """Computes true_positives, false_negatives, true_negatives, false_positives.
  This function creates up to four local variables, `true_positives`,
  `true_negatives`, `false_positives` and `false_negatives`.
  `true_positive[i]` is defined as the total weight of values in `predictions`
  above `thresholds[i]` whose corresponding entry in `labels` is `True`.
  `false_negatives[i]` is defined as the total weight of values in `predictions`
  at most `thresholds[i]` whose corresponding entry in `labels` is `True`.
  `true_negatives[i]` is defined as the total weight of values in `predictions`
  at most `thresholds[i]` whose corresponding entry in `labels` is `False`.
  `false_positives[i]` is defined as the total weight of values in `predictions`
  above `thresholds[i]` whose corresponding entry in `labels` is `False`.
  For estimation of these metrics over a stream of data, for each metric the
  function respectively creates an `update_op` operation that updates the
  variable and returns its value.
  If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
  Args:
    predictions: A floating point `Tensor` of arbitrary shape and whose values
      are in the range `[0, 1]`.
    labels: A `Tensor` whose shape matches `predictions`. `labels` will be cast
      to `bool`.
    thresholds: A python list or tuple of float thresholds in `[0, 1]`.
    weights: Optional `Tensor` whose rank is either 0, or the same rank as
      `labels`, and must be broadcastable to `labels` (i.e., all dimensions
      must be either `1`, or the same as the corresponding `labels`
      dimension).
    includes: Tuple of keys to return, from 'tp', 'fn', 'tn', fp'. If `None`,
      default to all four.
  Returns:
    values: Dict of variables of shape `[len(thresholds)]`. Keys are from
        `includes`.
    update_ops: Dict of operations that increments the `values`. Keys are from
        `includes`.
  Raises:
    ValueError: If `predictions` and `labels` have mismatched shapes, or if
      `weights` is not `None` and its shape doesn't match `predictions`, or if
      `includes` contains invalid keys.
  """
  all_includes = ('tp', 'fn', 'tn', 'fp')
  if includes is None:
    includes = all_includes
  else:
    for include in includes:
      if include not in all_includes:
        raise ValueError('Invalid key: %s.' % include)
  predictions, labels, weights = metrics_impl._remove_squeezable_dimensions(  # pylint: disable=protected-access
      predictions, labels, weights)
  predictions.get_shape().assert_is_compatible_with(labels.get_shape())
  num_thresholds = len(thresholds)
  # Reshape predictions and labels.
  predictions_2d = array_ops.reshape(predictions, [-1, 1])
  labels_2d = array_ops.reshape(
      math_ops.cast(labels, dtype=dtypes.bool), [1, -1])
  # Use static shape if known.
  num_predictions = predictions_2d.get_shape().as_list()[0]
  # Otherwise use dynamic shape.
  if num_predictions is None:
    num_predictions = array_ops.shape(predictions_2d)[0]
  thresh_tiled = array_ops.tile(
      array_ops.expand_dims(array_ops.constant(thresholds), [1]),
      array_ops.stack([1, num_predictions]))
  # Tile the predictions after thresholding them across different thresholds.
  pred_is_pos = math_ops.greater(
      array_ops.tile(array_ops.transpose(predictions_2d), [num_thresholds, 1]),
      thresh_tiled)
  if ('fn' in includes) or ('tn' in includes):
    pred_is_neg = math_ops.logical_not(pred_is_pos)
  # Tile labels by number of thresholds
  label_is_pos = array_ops.tile(labels_2d, [num_thresholds, 1])
  if ('fp' in includes) or ('tn' in includes):
    label_is_neg = math_ops.logical_not(label_is_pos)
  if weights is not None:
    broadcast_weights = weights_broadcast_ops.broadcast_weights(
        math_ops.to_float(weights), predictions)
    weights_tiled = array_ops.tile(
        array_ops.reshape(broadcast_weights, [1, -1]), [num_thresholds, 1])
    thresh_tiled.get_shape().assert_is_compatible_with(
        weights_tiled.get_shape())
  else:
    weights_tiled = None
  values = {}
  update_ops = {}
  if 'tp' in includes:
    true_positives = metrics_impl.metric_variable(
        [num_thresholds], dtypes.float32, name='true_positives')
    is_true_positive = math_ops.to_float(
        math_ops.logical_and(label_is_pos, pred_is_pos))
    if weights_tiled is not None:
      is_true_positive *= weights_tiled
    update_ops['tp'] = state_ops.assign_add(true_positives,
                                            math_ops.reduce_sum(
                                                is_true_positive, 1))
    values['tp'] = true_positives
  if 'fn' in includes:
    false_negatives = metrics_impl.metric_variable(
        [num_thresholds], dtypes.float32, name='false_negatives')
    is_false_negative = math_ops.to_float(
        math_ops.logical_and(label_is_pos, pred_is_neg))
    if weights_tiled is not None:
      is_false_negative *= weights_tiled
    update_ops['fn'] = state_ops.assign_add(false_negatives,
                                            math_ops.reduce_sum(
                                                is_false_negative, 1))
    values['fn'] = false_negatives
  if 'tn' in includes:
    true_negatives = metrics_impl.metric_variable(
        [num_thresholds], dtypes.float32, name='true_negatives')
    is_true_negative = math_ops.to_float(
        math_ops.logical_and(label_is_neg, pred_is_neg))
    if weights_tiled is not None:
      is_true_negative *= weights_tiled
    update_ops['tn'] = state_ops.assign_add(true_negatives,
                                            math_ops.reduce_sum(
                                                is_true_negative, 1))
    values['tn'] = true_negatives
  if 'fp' in includes:
    false_positives = metrics_impl.metric_variable(
        [num_thresholds], dtypes.float32, name='false_positives')
    is_false_positive = math_ops.to_float(
        math_ops.logical_and(label_is_neg, pred_is_pos))
    if weights_tiled is not None:
      is_false_positive *= weights_tiled
    update_ops['fp'] = state_ops.assign_add(false_positives,
                                            math_ops.reduce_sum(
                                                is_false_positive, 1))
    values['fp'] = false_positives
  return values, update_ops
def streaming_true_positives_at_thresholds(predictions,
                                           labels,
                                           thresholds,
                                           weights=None):
  values, update_ops = _streaming_confusion_matrix_at_thresholds(
      predictions, labels, thresholds, weights=weights, includes=('tp',))
  return values['tp'], update_ops['tp']
def streaming_false_negatives_at_thresholds(predictions,
                                            labels,
                                            thresholds,
                                            weights=None):
  values, update_ops = _streaming_confusion_matrix_at_thresholds(
      predictions, labels, thresholds, weights=weights, includes=('fn',))
  return values['fn'], update_ops['fn']
def streaming_false_positives_at_thresholds(predictions,
                                            labels,
                                            thresholds,
                                            weights=None):
  values, update_ops = _streaming_confusion_matrix_at_thresholds(
      predictions, labels, thresholds, weights=weights, includes=('fp',))
  return values['fp'], update_ops['fp']
def streaming_true_negatives_at_thresholds(predictions,
                                           labels,
                                           thresholds,
                                           weights=None):
  values, update_ops = _streaming_confusion_matrix_at_thresholds(
      predictions, labels, thresholds, weights=weights, includes=('tn',))
  return values['tn'], update_ops['tn']
def streaming_curve_points(labels=None,
                           predictions=None,
                           weights=None,
                           num_thresholds=200,
                           metrics_collections=None,
                           updates_collections=None,
                           curve='ROC',
                           name=None):
  """Computes curve (ROC or PR) values for a prespecified number of points.
  The `streaming_curve_points` function creates four local variables,
  `true_positives`, `true_negatives`, `false_positives` and `false_negatives`
  that are used to compute the curve values. To discretize the curve, a linearly
  spaced set of thresholds is used to compute pairs of recall and precision
  values.
  For best results, `predictions` should be distributed approximately uniformly
  in the range [0, 1] and not peaked around 0 or 1.
  For estimation of the metric over a stream of data, the function creates an
  `update_op` operation that updates these variables.
  If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
  Args:
    labels: A `Tensor` whose shape matches `predictions`. Will be cast to
      `bool`.
    predictions: A floating point `Tensor` of arbitrary shape and whose values
      are in the range `[0, 1]`.
    weights: Optional `Tensor` whose rank is either 0, or the same rank as
      `labels`, and must be broadcastable to `labels` (i.e., all dimensions must
      be either `1`, or the same as the corresponding `labels` dimension).
    num_thresholds: The number of thresholds to use when discretizing the roc
      curve.
    metrics_collections: An optional list of collections that `auc` should be
      added to.
    updates_collections: An optional list of collections that `update_op` should
      be added to.
    curve: Specifies the name of the curve to be computed, 'ROC' [default] or
      'PR' for the Precision-Recall-curve.
    name: An optional variable_scope name.
  Returns:
    points: A `Tensor` with shape [num_thresholds, 2] that contains points of
      the curve.
    update_op: An operation that increments the `true_positives`,
      `true_negatives`, `false_positives` and `false_negatives` variables.
  Raises:
    ValueError: If `predictions` and `labels` have mismatched shapes, or if
      `weights` is not `None` and its shape doesn't match `predictions`, or if
      either `metrics_collections` or `updates_collections` are not a list or
      tuple.
  TODO(chizeng): Consider rewriting this method to make use of logic within the
  precision_recall_at_equal_thresholds method (to improve run time).
  """
  with variable_scope.variable_scope(name, 'curve_points',
                                     (labels, predictions, weights)):
    if curve != 'ROC' and curve != 'PR':
      raise ValueError('curve must be either ROC or PR, %s unknown' % (curve))
    kepsilon = _EPSILON  # to account for floating point imprecisions
    thresholds = [
        (i + 1) * 1.0 / (num_thresholds - 1) for i in range(num_thresholds - 2)
    ]
    thresholds = [0.0 - kepsilon] + thresholds + [1.0 + kepsilon]
    values, update_ops = _streaming_confusion_matrix_at_thresholds(
        labels=labels,
        predictions=predictions,
        thresholds=thresholds,
        weights=weights)
    # Add epsilons to avoid dividing by 0.
    epsilon = 1.0e-6
    def compute_points(tp, fn, tn, fp):
      """Computes the roc-auc or pr-auc based on confusion counts."""
      rec = math_ops.div(tp + epsilon, tp + fn + epsilon)
      if curve == 'ROC':
        fp_rate = math_ops.div(fp, fp + tn + epsilon)
        return fp_rate, rec
      else:  # curve == 'PR'.
        prec = math_ops.div(tp + epsilon, tp + fp + epsilon)
        return rec, prec
    xs, ys = compute_points(values['tp'], values['fn'], values['tn'],
                            values['fp'])
    points = array_ops.stack([xs, ys], axis=1)
    update_op = control_flow_ops.group(*update_ops.values())
    if metrics_collections:
      ops.add_to_collections(metrics_collections, points)
    if updates_collections:
      ops.add_to_collections(updates_collections, update_op)
    return points, update_op
@deprecated(None, 'Please switch to tf.metrics.auc. Note that the order of '
            'the labels and predictions arguments has been switched.')
def streaming_auc(predictions,
                  labels,
                  weights=None,
                  num_thresholds=200,
                  metrics_collections=None,
                  updates_collections=None,
                  curve='ROC',
                  name=None):
  """Computes the approximate AUC via a Riemann sum.
  The `streaming_auc` function creates four local variables, `true_positives`,
  `true_negatives`, `false_positives` and `false_negatives` that are used to
  compute the AUC. To discretize the AUC curve, a linearly spaced set of
  thresholds is used to compute pairs of recall and precision values. The area
  under the ROC-curve is therefore computed using the height of the recall
  values by the false positive rate, while the area under the PR-curve is the
  computed using the height of the precision values by the recall.
  This value is ultimately returned as `auc`, an idempotent operation that
  computes the area under a discretized curve of precision versus recall values
  (computed using the aforementioned variables). The `num_thresholds` variable
  controls the degree of discretization with larger numbers of thresholds more
  closely approximating the true AUC. The quality of the approximation may vary
  dramatically depending on `num_thresholds`.
  For best results, `predictions` should be distributed approximately uniformly
  in the range [0, 1] and not peaked around 0 or 1. The quality of the AUC
  approximation may be poor if this is not the case.
  For estimation of the metric over a stream of data, the function creates an
  `update_op` operation that updates these variables and returns the `auc`.
  If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
  Args:
    predictions: A floating point `Tensor` of arbitrary shape and whose values
      are in the range `[0, 1]`.
    labels: A `bool` `Tensor` whose shape matches `predictions`.
    weights: `Tensor` whose rank is either 0, or the same rank as `labels`, and
      must be broadcastable to `labels` (i.e., all dimensions must be either
      `1`, or the same as the corresponding `labels` dimension).
    num_thresholds: The number of thresholds to use when discretizing the roc
      curve.
    metrics_collections: An optional list of collections that `auc` should be
      added to.
    updates_collections: An optional list of collections that `update_op` should
      be added to.
    curve: Specifies the name of the curve to be computed, 'ROC' [default] or
    'PR' for the Precision-Recall-curve.
    name: An optional variable_scope name.
  Returns:
    auc: A scalar `Tensor` representing the current area-under-curve.
    update_op: An operation that increments the `true_positives`,
      `true_negatives`, `false_positives` and `false_negatives` variables
      appropriately and whose value matches `auc`.
  Raises:
    ValueError: If `predictions` and `labels` have mismatched shapes, or if
      `weights` is not `None` and its shape doesn't match `predictions`, or if
      either `metrics_collections` or `updates_collections` are not a list or
      tuple.
  """
  return metrics.auc(
      predictions=predictions,
      labels=labels,
      weights=weights,
      metrics_collections=metrics_collections,
      num_thresholds=num_thresholds,
      curve=curve,
      updates_collections=updates_collections,
      name=name)
def _compute_dynamic_auc(labels, predictions, curve='ROC', weights=None):
  """Computes the apporixmate AUC by a Riemann sum with data-derived thresholds.
  Computes the area under the ROC or PR curve using each prediction as a
  threshold. This could be slow for large batches, but has the advantage of not
  having its results degrade depending on the distribution of predictions.
  Args:
    labels: A `Tensor` of ground truth labels with the same shape as
      `predictions` with values of 0 or 1 and type `int64`.
    predictions: A 1-D `Tensor` of predictions whose values are `float64`.
    curve: The name of the curve to be computed, 'ROC' for the Receiving
      Operating Characteristic or 'PR' for the Precision-Recall curve.
    weights: A 1-D `Tensor` of weights whose values are `float64`.
  Returns:
    A scalar `Tensor` containing the area-under-curve value for the input.
  """
  # Compute the total weight and the total positive weight.
  size = array_ops.size(predictions)
  if weights is None:
    weights = array_ops.ones_like(labels, dtype=dtypes.float64)
  labels, predictions, weights = metrics_impl._remove_squeezable_dimensions(
      labels, predictions, weights)
  total_weight = math_ops.reduce_sum(weights)
  total_positive = math_ops.reduce_sum(
      array_ops.where(
          math_ops.greater(labels, 0), weights,
          array_ops.zeros_like(labels, dtype=dtypes.float64)))
  def continue_computing_dynamic_auc():
    """Continues dynamic auc computation, entered if labels are not all equal.
    Returns:
      A scalar `Tensor` containing the area-under-curve value.
    """
    # Sort the predictions descending, keeping the same order for the
    # corresponding labels and weights.
    ordered_predictions, indices = nn.top_k(predictions, k=size)
    ordered_labels = array_ops.gather(labels, indices)
    ordered_weights = array_ops.gather(weights, indices)
    # Get the counts of the unique ordered predictions.
    _, _, counts = array_ops.unique_with_counts(ordered_predictions)
    # Compute the indices of the split points between different predictions.
    splits = math_ops.cast(
        array_ops.pad(math_ops.cumsum(counts), paddings=[[1, 0]]), dtypes.int32)
    # Count the positives to the left of the split indices.
    true_positives = array_ops.gather(
        array_ops.pad(
            math_ops.cumsum(
                array_ops.where(
                    math_ops.greater(ordered_labels, 0), ordered_weights,
                    array_ops.zeros_like(ordered_labels,
                                         dtype=dtypes.float64))),
            paddings=[[1, 0]]), splits)
    if curve == 'ROC':
      # Compute the weight of the negatives to the left of every split point and
      # the total weight of the negatives number of negatives for computing the
      # FPR.
      false_positives = array_ops.gather(
          array_ops.pad(
              math_ops.cumsum(
                  array_ops.where(
                      math_ops.less(ordered_labels, 1), ordered_weights,
                      array_ops.zeros_like(
                          ordered_labels, dtype=dtypes.float64))),
              paddings=[[1, 0]]), splits)
      total_negative = total_weight - total_positive
      x_axis_values = math_ops.truediv(false_positives, total_negative)
      y_axis_values = math_ops.truediv(true_positives, total_positive)
    elif curve == 'PR':
      x_axis_values = math_ops.truediv(true_positives, total_positive)
      # For conformance, set precision to 1 when the number of positive
      # classifications is 0.
      positives = array_ops.gather(
          array_ops.pad(math_ops.cumsum(ordered_weights), paddings=[[1, 0]]),
          splits)
      y_axis_values = array_ops.where(
          math_ops.greater(splits, 0),
          math_ops.truediv(true_positives, positives),
          array_ops.ones_like(true_positives, dtype=dtypes.float64))
    # Calculate trapezoid areas.
    heights = math_ops.add(y_axis_values[1:], y_axis_values[:-1]) / 2.0
    widths = math_ops.abs(
        math_ops.subtract(x_axis_values[1:], x_axis_values[:-1]))
    return math_ops.reduce_sum(math_ops.multiply(heights, widths))
  # If all the labels are the same, AUC isn't well-defined (but raising an
  # exception seems excessive) so we return 0, otherwise we finish computing.
  return control_flow_ops.cond(
      math_ops.logical_or(
          math_ops.equal(total_positive, 0), math_ops.equal(
              total_positive, total_weight)),
      true_fn=lambda: array_ops.constant(0, dtypes.float64),
      false_fn=continue_computing_dynamic_auc)
def streaming_dynamic_auc(labels,
                          predictions,
                          curve='ROC',
                          metrics_collections=(),
                          updates_collections=(),
                          name=None,
                          weights=None):
  """Computes the apporixmate AUC by a Riemann sum with data-derived thresholds.
  USAGE NOTE: this approach requires storing all of the predictions and labels
  for a single evaluation in memory, so it may not be usable when the evaluation
  batch size and/or the number of evaluation steps is very large.
  Computes the area under the ROC or PR curve using each prediction as a
  threshold. This has the advantage of being resilient to the distribution of
  predictions by aggregating across batches, accumulating labels and predictions
  and performing the final calculation using all of the concatenated values.
  Args:
    labels: A `Tensor` of ground truth labels with the same shape as `labels`
      and with values of 0 or 1 whose values are castable to `int64`.
    predictions: A `Tensor` of predictions whose values are castable to
      `float64`. Will be flattened into a 1-D `Tensor`.
    curve: The name of the curve for which to compute AUC, 'ROC' for the
      Receiving Operating Characteristic or 'PR' for the Precision-Recall curve.
    metrics_collections: An optional iterable of collections that `auc` should
      be added to.
    updates_collections: An optional iterable of collections that `update_op`
      should be added to.
    name: An optional name for the variable_scope that contains the metric
      variables.
    weights: A 'Tensor' of non-negative weights whose values are castable to
      `float64`. Will be flattened into a 1-D `Tensor`.
  Returns:
    auc: A scalar `Tensor` containing the current area-under-curve value.
    update_op: An operation that concatenates the input labels and predictions
      to the accumulated values.
  Raises:
    ValueError: If `labels` and `predictions` have mismatched shapes or if
      `curve` isn't a recognized curve type.
  """
  if curve not in ['PR', 'ROC']:
    raise ValueError('curve must be either ROC or PR, %s unknown' % curve)
  with variable_scope.variable_scope(name, default_name='dynamic_auc'):
    labels.get_shape().assert_is_compatible_with(predictions.get_shape())
    predictions = array_ops.reshape(
        math_ops.cast(predictions, dtypes.float64), [-1])
    labels = array_ops.reshape(math_ops.cast(labels, dtypes.int64), [-1])
    with ops.control_dependencies([
        check_ops.assert_greater_equal(
            labels,
            array_ops.zeros_like(labels, dtypes.int64),
            message='labels must be 0 or 1, at least one is <0'),
        check_ops.assert_less_equal(
            labels,
            array_ops.ones_like(labels, dtypes.int64),
            message='labels must be 0 or 1, at least one is >1'),
    ]):
      preds_accum, update_preds = streaming_concat(
          predictions, name='concat_preds')
      labels_accum, update_labels = streaming_concat(
          labels, name='concat_labels')
      if weights is not None:
        weights = array_ops.reshape(
            math_ops.cast(weights, dtypes.float64), [-1])
        weights_accum, update_weights = streaming_concat(
            weights, name='concat_weights')
        update_op = control_flow_ops.group(update_labels, update_preds,
                                           update_weights)
      else:
        weights_accum = None
        update_op = control_flow_ops.group(update_labels, update_preds)
      auc = _compute_dynamic_auc(
          labels_accum, preds_accum, curve=curve, weights=weights_accum)
      if updates_collections:
        ops.add_to_collections(updates_collections, update_op)
      if metrics_collections:
        ops.add_to_collections(metrics_collections, auc)
      return auc, update_op
def _compute_placement_auc(labels, predictions, weights, alpha,
                           logit_transformation, is_valid):
  """Computes the AUC and asymptotic normally distributed confidence interval.
  The calculations are achieved using the fact that AUC = P(Y_1>Y_0) and the
  concept of placement values for each labeled group, as presented by Delong and
  Delong (1988). The actual algorithm used is a more computationally efficient
  approach presented by Sun and Xu (2014). This could be slow for large batches,
  but has the advantage of not having its results degrade depending on the
  distribution of predictions.
  Args:
    labels: A `Tensor` of ground truth labels with the same shape as
      `predictions` with values of 0 or 1 and type `int64`.
    predictions: A 1-D `Tensor` of predictions whose values are `float64`.
    weights: `Tensor` whose rank is either 0, or the same rank as `labels`.
    alpha: Confidence interval level desired.
    logit_transformation: A boolean value indicating whether the estimate should
      be logit transformed prior to calculating the confidence interval. Doing
      so enforces the restriction that the AUC should never be outside the
      interval [0,1].
    is_valid: A bool tensor describing whether the input is valid.
  Returns:
    A 1-D `Tensor` containing the area-under-curve, lower, and upper confidence
    interval values.
  """
  # Disable the invalid-name checker so that we can capitalize the name.
  # pylint: disable=invalid-name
  AucData = collections_lib.namedtuple('AucData', ['auc', 'lower', 'upper'])
  # pylint: enable=invalid-name
  # If all the labels are the same or if number of observations are too few,
  # AUC isn't well-defined
  size = array_ops.size(predictions, out_type=dtypes.int32)
  # Count the total number of positive and negative labels in the input.
  total_0 = math_ops.reduce_sum(
      math_ops.cast(1 - labels, weights.dtype) * weights)
  total_1 = math_ops.reduce_sum(
      math_ops.cast(labels, weights.dtype) * weights)
  # Sort the predictions ascending, as well as
  # (i) the corresponding labels and
  # (ii) the corresponding weights.
  ordered_predictions, indices = nn.top_k(predictions, k=size, sorted=True)
  ordered_predictions = array_ops.reverse(
      ordered_predictions, axis=array_ops.zeros(1, dtypes.int32))
  indices = array_ops.reverse(indices, axis=array_ops.zeros(1, dtypes.int32))
  ordered_labels = array_ops.gather(labels, indices)
  ordered_weights = array_ops.gather(weights, indices)
  # We now compute values required for computing placement values.
  # We generate a list of indices (segmented_indices) of increasing order. An
  # index is assigned for each unique prediction float value. Prediction
  # values that are the same share the same index.
  _, segmented_indices = array_ops.unique(ordered_predictions)
  # We create 2 tensors of weights. weights_for_true is non-zero for true
  # labels. weights_for_false is non-zero for false labels.
  float_labels_for_true = math_ops.cast(ordered_labels, dtypes.float32)
  float_labels_for_false = 1.0 - float_labels_for_true
  weights_for_true = ordered_weights * float_labels_for_true
  weights_for_false = ordered_weights * float_labels_for_false
  # For each set of weights with the same segmented indices, we add up the
  # weight values. Note that for each label, we deliberately rely on weights
  # for the opposite label.
  weight_totals_for_true = math_ops.segment_sum(weights_for_false,
                                                segmented_indices)
  weight_totals_for_false = math_ops.segment_sum(weights_for_true,
                                                 segmented_indices)
  # These cumulative sums of weights importantly exclude the current weight
  # sums.
  cum_weight_totals_for_true = math_ops.cumsum(weight_totals_for_true,
                                               exclusive=True)
  cum_weight_totals_for_false = math_ops.cumsum(weight_totals_for_false,
                                                exclusive=True)
  # Compute placement values using the formula. Values with the same segmented
  # indices and labels share the same placement values.
  placements_for_true = (
      (cum_weight_totals_for_true + weight_totals_for_true / 2.0) /
      (math_ops.reduce_sum(weight_totals_for_true) + _EPSILON))
  placements_for_false = (
      (cum_weight_totals_for_false + weight_totals_for_false / 2.0) /
      (math_ops.reduce_sum(weight_totals_for_false) + _EPSILON))
  # We expand the tensors of placement values (for each label) so that their
  # shapes match that of predictions.
  placements_for_true = array_ops.gather(placements_for_true, segmented_indices)
  placements_for_false = array_ops.gather(placements_for_false,
                                          segmented_indices)
  # Select placement values based on the label for each index.
  placement_values = (
      placements_for_true * float_labels_for_true +
      placements_for_false * float_labels_for_false)
  # Split placement values by labeled groups.
  placement_values_0 = placement_values * math_ops.cast(
      1 - ordered_labels, weights.dtype)
  weights_0 = ordered_weights * math_ops.cast(
      1 - ordered_labels, weights.dtype)
  placement_values_1 = placement_values * math_ops.cast(
      ordered_labels, weights.dtype)
  weights_1 = ordered_weights * math_ops.cast(
      ordered_labels, weights.dtype)
  # Calculate AUC using placement values
  auc_0 = (math_ops.reduce_sum(weights_0 * (1. - placement_values_0)) /
           (total_0 + _EPSILON))
  auc_1 = (math_ops.reduce_sum(weights_1 * (placement_values_1)) /
           (total_1 + _EPSILON))
  auc = array_ops.where(math_ops.less(total_0, total_1), auc_1, auc_0)
  # Calculate variance and standard error using the placement values.
  var_0 = (
      math_ops.reduce_sum(
          weights_0 * math_ops.square(1. - placement_values_0 - auc_0)) /
      (total_0 - 1. + _EPSILON))
  var_1 = (
      math_ops.reduce_sum(
          weights_1 * math_ops.square(placement_values_1 - auc_1)) /
      (total_1 - 1. + _EPSILON))
  auc_std_err = math_ops.sqrt(
      (var_0 / (total_0 + _EPSILON)) + (var_1 / (total_1 + _EPSILON)))
  # Calculate asymptotic normal confidence intervals
  std_norm_dist = Normal(loc=0., scale=1.)
  z_value = std_norm_dist.quantile((1.0 - alpha) / 2.0)
  if logit_transformation:
    estimate = math_ops.log(auc / (1. - auc + _EPSILON))
    std_err = auc_std_err / (auc * (1. - auc + _EPSILON))
    transformed_auc_lower = estimate + (z_value * std_err)
    transformed_auc_upper = estimate - (z_value * std_err)
    def inverse_logit_transformation(x):
      exp_negative = math_ops.exp(math_ops.negative(x))
      return 1. / (1. + exp_negative + _EPSILON)
    auc_lower = inverse_logit_transformation(transformed_auc_lower)
    auc_upper = inverse_logit_transformation(transformed_auc_upper)
  else:
    estimate = auc
    std_err = auc_std_err
    auc_lower = estimate + (z_value * std_err)
    auc_upper = estimate - (z_value * std_err)
  ## If estimate is 1 or 0, no variance is present so CI = 1
  ## n.b. This can be misleading, since number obs can just be too low.
  lower = array_ops.where(
      math_ops.logical_or(
          math_ops.equal(auc, array_ops.ones_like(auc)),
          math_ops.equal(auc, array_ops.zeros_like(auc))),
      auc, auc_lower)
  upper = array_ops.where(
      math_ops.logical_or(
          math_ops.equal(auc, array_ops.ones_like(auc)),
          math_ops.equal(auc, array_ops.zeros_like(auc))),
      auc, auc_upper)
  # If all the labels are the same, AUC isn't well-defined (but raising an
  # exception seems excessive) so we return 0, otherwise we finish computing.
  trivial_value = array_ops.constant(0.0)
  return AucData(*control_flow_ops.cond(
      is_valid, lambda: [auc, lower, upper], lambda: [trivial_value]*3))
def auc_with_confidence_intervals(labels,
                                  predictions,
                                  weights=None,
                                  alpha=0.95,
                                  logit_transformation=True,
                                  metrics_collections=(),
                                  updates_collections=(),
                                  name=None):
  """Computes the AUC and asymptotic normally distributed confidence interval.
  USAGE NOTE: this approach requires storing all of the predictions and labels
  for a single evaluation in memory, so it may not be usable when the evaluation
  batch size and/or the number of evaluation steps is very large.
  Computes the area under the ROC curve and its confidence interval using
  placement values. This has the advantage of being resilient to the
  distribution of predictions by aggregating across batches, accumulating labels
  and predictions and performing the final calculation using all of the
  concatenated values.
  Args:
    labels: A `Tensor` of ground truth labels with the same shape as `labels`
      and with values of 0 or 1 whose values are castable to `int64`.
    predictions: A `Tensor` of predictions whose values are castable to
      `float64`. Will be flattened into a 1-D `Tensor`.
    weights: Optional `Tensor` whose rank is either 0, or the same rank as
      `labels`.
    alpha: Confidence interval level desired.
    logit_transformation: A boolean value indicating whether the estimate should
      be logit transformed prior to calculating the confidence interval. Doing
      so enforces the restriction that the AUC should never be outside the
      interval [0,1].
    metrics_collections: An optional iterable of collections that `auc` should
      be added to.
    updates_collections: An optional iterable of collections that `update_op`
      should be added to.
    name: An optional name for the variable_scope that contains the metric
      variables.
  Returns:
    auc: A 1-D `Tensor` containing the current area-under-curve, lower, and
      upper confidence interval values.
    update_op: An operation that concatenates the input labels and predictions
      to the accumulated values.
  Raises:
    ValueError: If `labels`, `predictions`, and `weights` have mismatched shapes
    or if `alpha` isn't in the range (0,1).
  """
  if not (alpha > 0 and alpha < 1):
    raise ValueError('alpha must be between 0 and 1; currently %.02f' % alpha)
  if weights is None:
    weights = array_ops.ones_like(predictions)
  with variable_scope.variable_scope(
      name,
      default_name='auc_with_confidence_intervals',
      values=[labels, predictions, weights]):
    predictions, labels, weights = metrics_impl._remove_squeezable_dimensions(  # pylint: disable=protected-access
        predictions=predictions,
        labels=labels,
        weights=weights)
    total_weight = math_ops.reduce_sum(weights)
    weights = array_ops.reshape(weights, [-1])
    predictions = array_ops.reshape(
        math_ops.cast(predictions, dtypes.float64), [-1])
    labels = array_ops.reshape(math_ops.cast(labels, dtypes.int64), [-1])
    with ops.control_dependencies([
        check_ops.assert_greater_equal(
            labels,
            array_ops.zeros_like(labels, dtypes.int64),
            message='labels must be 0 or 1, at least one is <0'),
        check_ops.assert_less_equal(
            labels,
            array_ops.ones_like(labels, dtypes.int64),
            message='labels must be 0 or 1, at least one is >1'),
    ]):
      preds_accum, update_preds = streaming_concat(
          predictions, name='concat_preds')
      labels_accum, update_labels = streaming_concat(labels,
                                                     name='concat_labels')
      weights_accum, update_weights = streaming_concat(
          weights, name='concat_weights')
      update_op_for_valid_case = control_flow_ops.group(
          update_labels, update_preds, update_weights)
      # Only perform updates if this case is valid.
      all_labels_positive_or_0 = math_ops.logical_and(
          math_ops.equal(math_ops.reduce_min(labels), 0),
          math_ops.equal(math_ops.reduce_max(labels), 1))
      sums_of_weights_at_least_1 = math_ops.greater_equal(total_weight, 1.0)
      is_valid = math_ops.logical_and(all_labels_positive_or_0,
                                      sums_of_weights_at_least_1)
      update_op = control_flow_ops.cond(
          sums_of_weights_at_least_1,
          lambda: update_op_for_valid_case, control_flow_ops.no_op)
      auc = _compute_placement_auc(
          labels_accum,
          preds_accum,
          weights_accum,
          alpha=alpha,
          logit_transformation=logit_transformation,
          is_valid=is_valid)
      if updates_collections:
        ops.add_to_collections(updates_collections, update_op)
      if metrics_collections:
        ops.add_to_collections(metrics_collections, auc)
      return auc, update_op
def precision_recall_at_equal_thresholds(labels,
                                         predictions,
                                         weights=None,
                                         num_thresholds=None,
                                         use_locking=None,
                                         name=None):
  """A helper method for creating metrics related to precision-recall curves.
  These values are true positives, false negatives, true negatives, false
  positives, precision, and recall. This function returns a data structure that
  contains ops within it.
  Unlike _streaming_confusion_matrix_at_thresholds (which exhibits O(T * N)
  space and run time), this op exhibits O(T + N) space and run time, where T is
  the number of thresholds and N is the size of the predictions tensor. Hence,
  it may be advantageous to use this function when `predictions` is big.
  For instance, prefer this method for per-pixel classification tasks, for which
  the predictions tensor may be very large.
  Each number in `predictions`, a float in `[0, 1]`, is compared with its
  corresponding label in `labels`, and counts as a single tp/fp/tn/fn value at
  each threshold. This is then multiplied with `weights` which can be used to
  reweight certain values, or more commonly used for masking values.
  Args:
    labels: A bool `Tensor` whose shape matches `predictions`.
    predictions: A floating point `Tensor` of arbitrary shape and whose values
      are in the range `[0, 1]`.
    weights: Optional; If provided, a `Tensor` that has the same dtype as,
      and broadcastable to, `predictions`. This tensor is multiplied by counts.
    num_thresholds: Optional; Number of thresholds, evenly distributed in
      `[0, 1]`. Should be `>= 2`. Defaults to 201. Note that the number of bins
      is 1 less than `num_thresholds`. Using an even `num_thresholds` value
      instead of an odd one may yield unfriendly edges for bins.
    use_locking: Optional; If True, the op will be protected by a lock.
      Otherwise, the behavior is undefined, but may exhibit less contention.
      Defaults to True.
    name: Optional; variable_scope name. If not provided, the string
      'precision_recall_at_equal_threshold' is used.
  Returns:
    result: A named tuple (See PrecisionRecallData within the implementation of
      this function) with properties that are variables of shape
      `[num_thresholds]`. The names of the properties are tp, fp, tn, fn,
      precision, recall, thresholds. Types are same as that of predictions.
    update_op: An op that accumulates values.
  Raises:
    ValueError: If `predictions` and `labels` have mismatched shapes, or if
      `weights` is not `None` and its shape doesn't match `predictions`, or if
      `includes` contains invalid keys.
  """
  # Disable the invalid-name checker so that we can capitalize the name.
  # pylint: disable=invalid-name
  PrecisionRecallData = collections_lib.namedtuple(
      'PrecisionRecallData',
      ['tp', 'fp', 'tn', 'fn', 'precision', 'recall', 'thresholds'])
  # pylint: enable=invalid-name
  if num_thresholds is None:
    num_thresholds = 201
  if weights is None:
    weights = 1.0
  if use_locking is None:
    use_locking = True
  check_ops.assert_type(labels, dtypes.bool)
  with variable_scope.variable_scope(name,
                                     'precision_recall_at_equal_thresholds',
                                     (labels, predictions, weights)):
    # Make sure that predictions are within [0.0, 1.0].
    with ops.control_dependencies([
        check_ops.assert_greater_equal(
            predictions,
            math_ops.cast(0.0, dtype=predictions.dtype),
            message='predictions must be in [0, 1]'),
        check_ops.assert_less_equal(
            predictions,
            math_ops.cast(1.0, dtype=predictions.dtype),
            message='predictions must be in [0, 1]')
    ]):
      predictions, labels, weights = metrics_impl._remove_squeezable_dimensions(  # pylint: disable=protected-access
          predictions=predictions,
          labels=labels,
          weights=weights)
    predictions.get_shape().assert_is_compatible_with(labels.get_shape())
    # It's important we aggregate using float64 since we're accumulating a lot
    # of 1.0's for the true/false labels, and accumulating to float32 will
    # be quite inaccurate even with just a modest amount of values (~20M).
    # We use float64 instead of integer primarily since GPU scatter kernel
    # only support floats.
    agg_dtype = dtypes.float64
    f_labels = math_ops.cast(labels, agg_dtype)
    weights = math_ops.cast(weights, agg_dtype)
    true_labels = f_labels  * weights
    false_labels = (1.0 - f_labels) * weights
    # Flatten predictions and labels.
    predictions = array_ops.reshape(predictions, [-1])
    true_labels = array_ops.reshape(true_labels, [-1])
    false_labels = array_ops.reshape(false_labels, [-1])
    # To compute TP/FP/TN/FN, we are measuring a binary classifier
    #   C(t) = (predictions >= t)
    # at each threshold 't'. So we have
    #   TP(t) = sum( C(t) * true_labels )
    #   FP(t) = sum( C(t) * false_labels )
    #
    # But, computing C(t) requires computation for each t. To make it fast,
    # observe that C(t) is a cumulative integral, and so if we have
    #   thresholds = [t_0, ..., t_{n-1}];  t_0 < ... < t_{n-1}
    # where n = num_thresholds, and if we can compute the bucket function
    #   B(i) = Sum( (predictions == t), t_i <= t < t{i+1} )
    # then we get
    #   C(t_i) = sum( B(j), j >= i )
    # which is the reversed cumulative sum in tf.cumsum().
    #
    # We can compute B(i) efficiently by taking advantage of the fact that
    # our thresholds are evenly distributed, in that
    #   width = 1.0 / (num_thresholds - 1)
    #   thresholds = [0.0, 1*width, 2*width, 3*width, ..., 1.0]
    # Given a prediction value p, we can map it to its bucket by
    #   bucket_index(p) = floor( p * (num_thresholds - 1) )
    # so we can use tf.scatter_add() to update the buckets in one pass.
    #
    # This implementation exhibits a run time and space complexity of O(T + N),
    # where T is the number of thresholds and N is the size of predictions.
    # Metrics that rely on _streaming_confusion_matrix_at_thresholds instead
    # exhibit a complexity of O(T * N).
    # Compute the bucket indices for each prediction value.
    bucket_indices = math_ops.cast(
        math_ops.floor(predictions * (num_thresholds - 1)), dtypes.int32)
    with ops.name_scope('variables'):
      tp_buckets_v = metrics_impl.metric_variable(
          [num_thresholds], agg_dtype, name='tp_buckets')
      fp_buckets_v = metrics_impl.metric_variable(
          [num_thresholds], agg_dtype, name='fp_buckets')
    with ops.name_scope('update_op'):
      update_tp = state_ops.scatter_add(
          tp_buckets_v, bucket_indices, true_labels, use_locking=use_locking)
      update_fp = state_ops.scatter_add(
          fp_buckets_v, bucket_indices, false_labels, use_locking=use_locking)
    # Set up the cumulative sums to compute the actual metrics.
    tp = math_ops.cumsum(tp_buckets_v, reverse=True, name='tp')
    fp = math_ops.cumsum(fp_buckets_v, reverse=True, name='fp')
    # fn = sum(true_labels) - tp
    #    = sum(tp_buckets) - tp
    #    = tp[0] - tp
    # Similarly,
    # tn = fp[0] - fp
    tn = fp[0] - fp
    fn = tp[0] - tp
    # We use a minimum to prevent division by 0.
    epsilon = ops.convert_to_tensor(1e-7, dtype=agg_dtype)
    precision = tp / math_ops.maximum(epsilon, tp + fp)
    recall = tp / math_ops.maximum(epsilon, tp + fn)
    # Convert all tensors back to predictions' dtype (as per function contract).
    out_dtype = predictions.dtype
    _convert = lambda tensor: math_ops.cast(tensor, out_dtype)
    result = PrecisionRecallData(
        tp=_convert(tp),
        fp=_convert(fp),
        tn=_convert(tn),
        fn=_convert(fn),
        precision=_convert(precision),
        recall=_convert(recall),
        thresholds=_convert(math_ops.lin_space(0.0, 1.0, num_thresholds)))
    update_op = control_flow_ops.group(update_tp, update_fp)
    return result, update_op
def streaming_specificity_at_sensitivity(predictions,
                                         labels,
                                         sensitivity,
                                         weights=None,
                                         num_thresholds=200,
                                         metrics_collections=None,
                                         updates_collections=None,
                                         name=None):
  """Computes the specificity at a given sensitivity.
  The `streaming_specificity_at_sensitivity` function creates four local
  variables, `true_positives`, `true_negatives`, `false_positives` and
  `false_negatives` that are used to compute the specificity at the given
  sensitivity value. The threshold for the given sensitivity value is computed
  and used to evaluate the corresponding specificity.
  For estimation of the metric over a stream of data, the function creates an
  `update_op` operation that updates these variables and returns the
  `specificity`. `update_op` increments the `true_positives`, `true_negatives`,
  `false_positives` and `false_negatives` counts with the weight of each case
  found in the `predictions` and `labels`.
  If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
  For additional information about specificity and sensitivity, see the
  following: https://en.wikipedia.org/wiki/Sensitivity_and_specificity
  Args:
    predictions: A floating point `Tensor` of arbitrary shape and whose values
      are in the range `[0, 1]`.
    labels: A `bool` `Tensor` whose shape matches `predictions`.
    sensitivity: A scalar value in range `[0, 1]`.
    weights: `Tensor` whose rank is either 0, or the same rank as `labels`, and
      must be broadcastable to `labels` (i.e., all dimensions must be either
      `1`, or the same as the corresponding `labels` dimension).
    num_thresholds: The number of thresholds to use for matching the given
      sensitivity.
    metrics_collections: An optional list of collections that `specificity`
      should be added to.
    updates_collections: An optional list of collections that `update_op` should
      be added to.
    name: An optional variable_scope name.
  Returns:
    specificity: A scalar `Tensor` representing the specificity at the given
      `specificity` value.
    update_op: An operation that increments the `true_positives`,
      `true_negatives`, `false_positives` and `false_negatives` variables
      appropriately and whose value matches `specificity`.
  Raises:
    ValueError: If `predictions` and `labels` have mismatched shapes, if
      `weights` is not `None` and its shape doesn't match `predictions`, or if
      `sensitivity` is not between 0 and 1, or if either `metrics_collections`
      or `updates_collections` are not a list or tuple.
  """
  return metrics.specificity_at_sensitivity(
      sensitivity=sensitivity,
      num_thresholds=num_thresholds,
      predictions=predictions,
      labels=labels,
      weights=weights,
      metrics_collections=metrics_collections,
      updates_collections=updates_collections,
      name=name)
def streaming_sensitivity_at_specificity(predictions,
                                         labels,
                                         specificity,
                                         weights=None,
                                         num_thresholds=200,
                                         metrics_collections=None,
                                         updates_collections=None,
                                         name=None):
  """Computes the sensitivity at a given specificity.
  The `streaming_sensitivity_at_specificity` function creates four local
  variables, `true_positives`, `true_negatives`, `false_positives` and
  `false_negatives` that are used to compute the sensitivity at the given
  specificity value. The threshold for the given specificity value is computed
  and used to evaluate the corresponding sensitivity.
  For estimation of the metric over a stream of data, the function creates an
  `update_op` operation that updates these variables and returns the
  `sensitivity`. `update_op` increments the `true_positives`, `true_negatives`,
  `false_positives` and `false_negatives` counts with the weight of each case
  found in the `predictions` and `labels`.
  If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
  For additional information about specificity and sensitivity, see the
  following: https://en.wikipedia.org/wiki/Sensitivity_and_specificity
  Args:
    predictions: A floating point `Tensor` of arbitrary shape and whose values
      are in the range `[0, 1]`.
    labels: A `bool` `Tensor` whose shape matches `predictions`.
    specificity: A scalar value in range `[0, 1]`.
    weights: `Tensor` whose rank is either 0, or the same rank as `labels`, and
      must be broadcastable to `labels` (i.e., all dimensions must be either
      `1`, or the same as the corresponding `labels` dimension).
    num_thresholds: The number of thresholds to use for matching the given
      specificity.
    metrics_collections: An optional list of collections that `sensitivity`
      should be added to.
    updates_collections: An optional list of collections that `update_op` should
      be added to.
    name: An optional variable_scope name.
  Returns:
    sensitivity: A scalar `Tensor` representing the sensitivity at the given
      `specificity` value.
    update_op: An operation that increments the `true_positives`,
      `true_negatives`, `false_positives` and `false_negatives` variables
      appropriately and whose value matches `sensitivity`.
  Raises:
    ValueError: If `predictions` and `labels` have mismatched shapes, if
      `weights` is not `None` and its shape doesn't match `predictions`, or if
      `specificity` is not between 0 and 1, or if either `metrics_collections`
      or `updates_collections` are not a list or tuple.
  """
  return metrics.sensitivity_at_specificity(
      specificity=specificity,
      num_thresholds=num_thresholds,
      predictions=predictions,
      labels=labels,
      weights=weights,
      metrics_collections=metrics_collections,
      updates_collections=updates_collections,
      name=name)
@deprecated(None,
            'Please switch to tf.metrics.precision_at_thresholds. Note that '
            'the order of the labels and predictions arguments are switched.')
def streaming_precision_at_thresholds(predictions,
                                      labels,
                                      thresholds,
                                      weights=None,
                                      metrics_collections=None,
                                      updates_collections=None,
                                      name=None):
  """Computes precision values for different `thresholds` on `predictions`.
  The `streaming_precision_at_thresholds` function creates four local variables,
  `true_positives`, `true_negatives`, `false_positives` and `false_negatives`
  for various values of thresholds. `precision[i]` is defined as the total
  weight of values in `predictions` above `thresholds[i]` whose corresponding
  entry in `labels` is `True`, divided by the total weight of values in
  `predictions` above `thresholds[i]` (`true_positives[i] / (true_positives[i] +
  false_positives[i])`).
  For estimation of the metric over a stream of data, the function creates an
  `update_op` operation that updates these variables and returns the
  `precision`.
  If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
  Args:
    predictions: A floating point `Tensor` of arbitrary shape and whose values
      are in the range `[0, 1]`.
    labels: A `bool` `Tensor` whose shape matches `predictions`.
    thresholds: A python list or tuple of float thresholds in `[0, 1]`.
    weights: `Tensor` whose rank is either 0, or the same rank as `labels`, and
      must be broadcastable to `labels` (i.e., all dimensions must be either
      `1`, or the same as the corresponding `labels` dimension).
    metrics_collections: An optional list of collections that `precision` should
      be added to.
    updates_collections: An optional list of collections that `update_op` should
      be added to.
    name: An optional variable_scope name.
  Returns:
    precision: A float `Tensor` of shape `[len(thresholds)]`.
    update_op: An operation that increments the `true_positives`,
      `true_negatives`, `false_positives` and `false_negatives` variables that
      are used in the computation of `precision`.
  Raises:
    ValueError: If `predictions` and `labels` have mismatched shapes, or if
      `weights` is not `None` and its shape doesn't match `predictions`, or if
      either `metrics_collections` or `updates_collections` are not a list or
      tuple.
  """
  return metrics.precision_at_thresholds(
      thresholds=thresholds,
      predictions=predictions,
      labels=labels,
      weights=weights,
      metrics_collections=metrics_collections,
      updates_collections=updates_collections,
      name=name)
@deprecated(None,
            'Please switch to tf.metrics.recall_at_thresholds. Note that the '
            'order of the labels and predictions arguments has been switched.')
def streaming_recall_at_thresholds(predictions,
                                   labels,
                                   thresholds,
                                   weights=None,
                                   metrics_collections=None,
                                   updates_collections=None,
                                   name=None):
  """Computes various recall values for different `thresholds` on `predictions`.
  The `streaming_recall_at_thresholds` function creates four local variables,
  `true_positives`, `true_negatives`, `false_positives` and `false_negatives`
  for various values of thresholds. `recall[i]` is defined as the total weight
  of values in `predictions` above `thresholds[i]` whose corresponding entry in
  `labels` is `True`, divided by the total weight of `True` values in `labels`
  (`true_positives[i] / (true_positives[i] + false_negatives[i])`).
  For estimation of the metric over a stream of data, the function creates an
  `update_op` operation that updates these variables and returns the `recall`.
  If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
  Args:
    predictions: A floating point `Tensor` of arbitrary shape and whose values
      are in the range `[0, 1]`.
    labels: A `bool` `Tensor` whose shape matches `predictions`.
    thresholds: A python list or tuple of float thresholds in `[0, 1]`.
    weights: `Tensor` whose rank is either 0, or the same rank as `labels`, and
      must be broadcastable to `labels` (i.e., all dimensions must be either
      `1`, or the same as the corresponding `labels` dimension).
    metrics_collections: An optional list of collections that `recall` should be
      added to.
    updates_collections: An optional list of collections that `update_op` should
      be added to.
    name: An optional variable_scope name.
  Returns:
    recall: A float `Tensor` of shape `[len(thresholds)]`.
    update_op: An operation that increments the `true_positives`,
      `true_negatives`, `false_positives` and `false_negatives` variables that
      are used in the computation of `recall`.
  Raises:
    ValueError: If `predictions` and `labels` have mismatched shapes, or if
      `weights` is not `None` and its shape doesn't match `predictions`, or if
      either `metrics_collections` or `updates_collections` are not a list or
      tuple.
  """
  return metrics.recall_at_thresholds(
      thresholds=thresholds,
      predictions=predictions,
      labels=labels,
      weights=weights,
      metrics_collections=metrics_collections,
      updates_collections=updates_collections,
      name=name)
def streaming_false_positive_rate_at_thresholds(predictions,
                                                labels,
                                                thresholds,
                                                weights=None,
                                                metrics_collections=None,
                                                updates_collections=None,
                                                name=None):
  """Computes various fpr values for different `thresholds` on `predictions`.
  The `streaming_false_positive_rate_at_thresholds` function creates two
  local variables, `false_positives`, `true_negatives`, for various values of
  thresholds. `false_positive_rate[i]` is defined as the total weight
  of values in `predictions` above `thresholds[i]` whose corresponding entry in
  `labels` is `False`, divided by the total weight of `False` values in `labels`
  (`false_positives[i] / (false_positives[i] + true_negatives[i])`).
  For estimation of the metric over a stream of data, the function creates an
  `update_op` operation that updates these variables and returns the
  `false_positive_rate`.
  If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
  Args:
    predictions: A floating point `Tensor` of arbitrary shape and whose values
      are in the range `[0, 1]`.
    labels: A `bool` `Tensor` whose shape matches `predictions`.
    thresholds: A python list or tuple of float thresholds in `[0, 1]`.
    weights: `Tensor` whose rank is either 0, or the same rank as `labels`, and
      must be broadcastable to `labels` (i.e., all dimensions must be either
      `1`, or the same as the corresponding `labels` dimension).
    metrics_collections: An optional list of collections that
      `false_positive_rate` should be added to.
    updates_collections: An optional list of collections that `update_op` should
      be added to.
    name: An optional variable_scope name.
  Returns:
    false_positive_rate: A float `Tensor` of shape `[len(thresholds)]`.
    update_op: An operation that increments the `false_positives` and
      `true_negatives` variables that are used in the computation of
      `false_positive_rate`.
  Raises:
    ValueError: If `predictions` and `labels` have mismatched shapes, or if
      `weights` is not `None` and its shape doesn't match `predictions`, or if
      either `metrics_collections` or `updates_collections` are not a list or
      tuple.
  """
  with variable_scope.variable_scope(name, 'false_positive_rate_at_thresholds',
                                     (predictions, labels, weights)):
    values, update_ops = _streaming_confusion_matrix_at_thresholds(
        predictions, labels, thresholds, weights, includes=('fp', 'tn'))
    # Avoid division by zero.
    epsilon = _EPSILON
    def compute_fpr(fp, tn, name):
      return math_ops.div(fp, epsilon + fp + tn, name='fpr_' + name)
    fpr = compute_fpr(values['fp'], values['tn'], 'value')
    update_op = compute_fpr(update_ops['fp'], update_ops['tn'], 'update_op')
    if metrics_collections:
      ops.add_to_collections(metrics_collections, fpr)
    if updates_collections:
      ops.add_to_collections(updates_collections, update_op)
    return fpr, update_op
def streaming_false_negative_rate_at_thresholds(predictions,
                                                labels,
                                                thresholds,
                                                weights=None,
                                                metrics_collections=None,
                                                updates_collections=None,
                                                name=None):
  """Computes various fnr values for different `thresholds` on `predictions`.
  The `streaming_false_negative_rate_at_thresholds` function creates two
  local variables, `false_negatives`, `true_positives`, for various values of
  thresholds. `false_negative_rate[i]` is defined as the total weight
  of values in `predictions` above `thresholds[i]` whose corresponding entry in
  `labels` is `False`, divided by the total weight of `True` values in `labels`
  (`false_negatives[i] / (false_negatives[i] + true_positives[i])`).
  For estimation of the metric over a stream of data, the function creates an
  `update_op` operation that updates these variables and returns the
  `false_positive_rate`.
  If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
  Args:
    predictions: A floating point `Tensor` of arbitrary shape and whose values
      are in the range `[0, 1]`.
    labels: A `bool` `Tensor` whose shape matches `predictions`.
    thresholds: A python list or tuple of float thresholds in `[0, 1]`.
    weights: `Tensor` whose rank is either 0, or the same rank as `labels`, and
      must be broadcastable to `labels` (i.e., all dimensions must be either
      `1`, or the same as the corresponding `labels` dimension).
    metrics_collections: An optional list of collections that
      `false_negative_rate` should be added to.
    updates_collections: An optional list of collections that `update_op` should
      be added to.
    name: An optional variable_scope name.
  Returns:
    false_negative_rate: A float `Tensor` of shape `[len(thresholds)]`.
    update_op: An operation that increments the `false_negatives` and
      `true_positives` variables that are used in the computation of
      `false_negative_rate`.
  Raises:
    ValueError: If `predictions` and `labels` have mismatched shapes, or if
      `weights` is not `None` and its shape doesn't match `predictions`, or if
      either `metrics_collections` or `updates_collections` are not a list or
      tuple.
  """
  with variable_scope.variable_scope(name, 'false_negative_rate_at_thresholds',
                                     (predictions, labels, weights)):
    values, update_ops = _streaming_confusion_matrix_at_thresholds(
        predictions, labels, thresholds, weights, includes=('fn', 'tp'))
    # Avoid division by zero.
    epsilon = _EPSILON
    def compute_fnr(fn, tp, name):
      return math_ops.div(fn, epsilon + fn + tp, name='fnr_' + name)
    fnr = compute_fnr(values['fn'], values['tp'], 'value')
    update_op = compute_fnr(update_ops['fn'], update_ops['tp'], 'update_op')
    if metrics_collections:
      ops.add_to_collections(metrics_collections, fnr)
    if updates_collections:
      ops.add_to_collections(updates_collections, update_op)
    return fnr, update_op
def _at_k_name(name, k=None, class_id=None):
  if k is not None:
    name = '%s_at_%d' % (name, k)
  else:
    name = '%s_at_k' % (name)
  if class_id is not None:
    name = '%s_class%d' % (name, class_id)
  return name
@deprecated('2016-11-08', 'Please use `streaming_sparse_recall_at_k`, '
            'and reshape labels from [batch_size] to [batch_size, 1].')
def streaming_recall_at_k(predictions,
                          labels,
                          k,
                          weights=None,
                          metrics_collections=None,
                          updates_collections=None,
                          name=None):
  """Computes the recall@k of the predictions with respect to dense labels.
  The `streaming_recall_at_k` function creates two local variables, `total` and
  `count`, that are used to compute the recall@k frequency. This frequency is
  ultimately returned as `recall_at_<k>`: an idempotent operation that simply
  divides `total` by `count`.
  For estimation of the metric over a stream of data, the function creates an
  `update_op` operation that updates these variables and returns the
  `recall_at_<k>`. Internally, an `in_top_k` operation computes a `Tensor` with
  shape [batch_size] whose elements indicate whether or not the corresponding
  label is in the top `k` `predictions`. Then `update_op` increments `total`
  with the reduced sum of `weights` where `in_top_k` is `True`, and it
  increments `count` with the reduced sum of `weights`.
  If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
  Args:
    predictions: A float `Tensor` of dimension [batch_size, num_classes].
    labels: A `Tensor` of dimension [batch_size] whose type is in `int32`,
      `int64`.
    k: The number of top elements to look at for computing recall.
    weights: `Tensor` whose rank is either 0, or the same rank as `labels`, and
      must be broadcastable to `labels` (i.e., all dimensions must be either
      `1`, or the same as the corresponding `labels` dimension).
    metrics_collections: An optional list of collections that `recall_at_k`
      should be added to.
    updates_collections: An optional list of collections `update_op` should be
      added to.
    name: An optional variable_scope name.
  Returns:
    recall_at_k: A `Tensor` representing the recall@k, the fraction of labels
      which fall into the top `k` predictions.
    update_op: An operation that increments the `total` and `count` variables
      appropriately and whose value matches `recall_at_k`.
  Raises:
    ValueError: If `predictions` and `labels` have mismatched shapes, or if
      `weights` is not `None` and its shape doesn't match `predictions`, or if
      either `metrics_collections` or `updates_collections` are not a list or
      tuple.
  """
  in_top_k = math_ops.to_float(nn.in_top_k(predictions, labels, k))
  return streaming_mean(in_top_k, weights, metrics_collections,
                        updates_collections, name or _at_k_name('recall', k))
# TODO(ptucker): Validate range of values in labels?
def streaming_sparse_recall_at_k(predictions,
                                 labels,
                                 k,
                                 class_id=None,
                                 weights=None,
                                 metrics_collections=None,
                                 updates_collections=None,
                                 name=None):
  """Computes recall@k of the predictions with respect to sparse labels.
  If `class_id` is not specified, we'll calculate recall as the ratio of true
      positives (i.e., correct predictions, items in the top `k` highest
      `predictions` that are found in the corresponding row in `labels`) to
      actual positives (the full `labels` row).
  If `class_id` is specified, we calculate recall by considering only the rows
      in the batch for which `class_id` is in `labels`, and computing the
      fraction of them for which `class_id` is in the corresponding row in
      `labels`.
  `streaming_sparse_recall_at_k` creates two local variables,
  `true_positive_at_<k>` and `false_negative_at_<k>`, that are used to compute
  the recall_at_k frequency. This frequency is ultimately returned as
  `recall_at_<k>`: an idempotent operation that simply divides
  `true_positive_at_<k>` by total (`true_positive_at_<k>` +
  `false_negative_at_<k>`).
  For estimation of the metric over a stream of data, the function creates an
  `update_op` operation that updates these variables and returns the
  `recall_at_<k>`. Internally, a `top_k` operation computes a `Tensor`
  indicating the top `k` `predictions`. Set operations applied to `top_k` and
  `labels` calculate the true positives and false negatives weighted by
  `weights`. Then `update_op` increments `true_positive_at_<k>` and
  `false_negative_at_<k>` using these values.
  If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
  Args:
    predictions: Float `Tensor` with shape [D1, ... DN, num_classes] where
      N >= 1. Commonly, N=1 and predictions has shape [batch size, num_classes].
      The final dimension contains the logit values for each class. [D1, ... DN]
      must match `labels`.
    labels: `int64` `Tensor` or `SparseTensor` with shape
      [D1, ... DN, num_labels], where N >= 1 and num_labels is the number of
      target classes for the associated prediction. Commonly, N=1 and `labels`
      has shape [batch_size, num_labels]. [D1, ... DN] must match `predictions`.
      Values should be in range [0, num_classes), where num_classes is the last
      dimension of `predictions`. Values outside this range always count
      towards `false_negative_at_<k>`.
    k: Integer, k for @k metric.
    class_id: Integer class ID for which we want binary metrics. This should be
      in range [0, num_classes), where num_classes is the last dimension of
      `predictions`. If class_id is outside this range, the method returns NAN.
    weights: `Tensor` whose rank is either 0, or n-1, where n is the rank of
      `labels`. If the latter, it must be broadcastable to `labels` (i.e., all
      dimensions must be either `1`, or the same as the corresponding `labels`
      dimension).
    metrics_collections: An optional list of collections that values should
      be added to.
    updates_collections: An optional list of collections that updates should
      be added to.
    name: Name of new update operation, and namespace for other dependent ops.
  Returns:
    recall: Scalar `float64` `Tensor` with the value of `true_positives` divided
      by the sum of `true_positives` and `false_negatives`.
    update_op: `Operation` that increments `true_positives` and
      `false_negatives` variables appropriately, and whose value matches
      `recall`.
  Raises:
    ValueError: If `weights` is not `None` and its shape doesn't match
    `predictions`, or if either `metrics_collections` or `updates_collections`
    are not a list or tuple.
  """
  return metrics.recall_at_k(
      k=k,
      class_id=class_id,
      predictions=predictions,
      labels=labels,
      weights=weights,
      metrics_collections=metrics_collections,
      updates_collections=updates_collections,
      name=name)
# TODO(ptucker): Validate range of values in labels?
def streaming_sparse_precision_at_k(predictions,
                                    labels,
                                    k,
                                    class_id=None,
                                    weights=None,
                                    metrics_collections=None,
                                    updates_collections=None,
                                    name=None):
  """Computes precision@k of the predictions with respect to sparse labels.
  If `class_id` is not specified, we calculate precision as the ratio of true
      positives (i.e., correct predictions, items in the top `k` highest
      `predictions` that are found in the corresponding row in `labels`) to
      positives (all top `k` `predictions`).
  If `class_id` is specified, we calculate precision by considering only the
      rows in the batch for which `class_id` is in the top `k` highest
      `predictions`, and computing the fraction of them for which `class_id` is
      in the corresponding row in `labels`.
  We expect precision to decrease as `k` increases.
  `streaming_sparse_precision_at_k` creates two local variables,
  `true_positive_at_<k>` and `false_positive_at_<k>`, that are used to compute
  the precision@k frequency. This frequency is ultimately returned as
  `precision_at_<k>`: an idempotent operation that simply divides
  `true_positive_at_<k>` by total (`true_positive_at_<k>` +
  `false_positive_at_<k>`).
  For estimation of the metric over a stream of data, the function creates an
  `update_op` operation that updates these variables and returns the
  `precision_at_<k>`. Internally, a `top_k` operation computes a `Tensor`
  indicating the top `k` `predictions`. Set operations applied to `top_k` and
  `labels` calculate the true positives and false positives weighted by
  `weights`. Then `update_op` increments `true_positive_at_<k>` and
  `false_positive_at_<k>` using these values.
  If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
  Args:
    predictions: Float `Tensor` with shape [D1, ... DN, num_classes] where
      N >= 1. Commonly, N=1 and predictions has shape [batch size, num_classes].
      The final dimension contains the logit values for each class. [D1, ... DN]
      must match `labels`.
    labels: `int64` `Tensor` or `SparseTensor` with shape
      [D1, ... DN, num_labels], where N >= 1 and num_labels is the number of
      target classes for the associated prediction. Commonly, N=1 and `labels`
      has shape [batch_size, num_labels]. [D1, ... DN] must match
      `predictions`. Values should be in range [0, num_classes), where
      num_classes is the last dimension of `predictions`. Values outside this
      range are ignored.
    k: Integer, k for @k metric.
    class_id: Integer class ID for which we want binary metrics. This should be
      in range [0, num_classes], where num_classes is the last dimension of
      `predictions`. If `class_id` is outside this range, the method returns
      NAN.
    weights: `Tensor` whose rank is either 0, or n-1, where n is the rank of
      `labels`. If the latter, it must be broadcastable to `labels` (i.e., all
      dimensions must be either `1`, or the same as the corresponding `labels`
      dimension).
    metrics_collections: An optional list of collections that values should
      be added to.
    updates_collections: An optional list of collections that updates should
      be added to.
    name: Name of new update operation, and namespace for other dependent ops.
  Returns:
    precision: Scalar `float64` `Tensor` with the value of `true_positives`
      divided by the sum of `true_positives` and `false_positives`.
    update_op: `Operation` that increments `true_positives` and
      `false_positives` variables appropriately, and whose value matches
      `precision`.
  Raises:
    ValueError: If `weights` is not `None` and its shape doesn't match
      `predictions`, or if either `metrics_collections` or `updates_collections`
      are not a list or tuple.
  """
  return metrics.precision_at_k(
      k=k,
      class_id=class_id,
      predictions=predictions,
      labels=labels,
      weights=weights,
      metrics_collections=metrics_collections,
      updates_collections=updates_collections,
      name=name)
# TODO(ptucker): Validate range of values in labels?
def streaming_sparse_precision_at_top_k(top_k_predictions,
                                        labels,
                                        class_id=None,
                                        weights=None,
                                        metrics_collections=None,
                                        updates_collections=None,
                                        name=None):
  """Computes precision@k of top-k predictions with respect to sparse labels.
  If `class_id` is not specified, we calculate precision as the ratio of
      true positives (i.e., correct predictions, items in `top_k_predictions`
      that are found in the corresponding row in `labels`) to positives (all
      `top_k_predictions`).
  If `class_id` is specified, we calculate precision by considering only the
      rows in the batch for which `class_id` is in the top `k` highest
      `predictions`, and computing the fraction of them for which `class_id` is
      in the corresponding row in `labels`.
  We expect precision to decrease as `k` increases.
  `streaming_sparse_precision_at_top_k` creates two local variables,
  `true_positive_at_k` and `false_positive_at_k`, that are used to compute
  the precision@k frequency. This frequency is ultimately returned as
  `precision_at_k`: an idempotent operation that simply divides
  `true_positive_at_k` by total (`true_positive_at_k` + `false_positive_at_k`).
  For estimation of the metric over a stream of data, the function creates an
  `update_op` operation that updates these variables and returns the
  `precision_at_k`. Internally, set operations applied to `top_k_predictions`
  and `labels` calculate the true positives and false positives weighted by
  `weights`. Then `update_op` increments `true_positive_at_k` and
  `false_positive_at_k` using these values.
  If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
  Args:
    top_k_predictions: Integer `Tensor` with shape [D1, ... DN, k] where
      N >= 1. Commonly, N=1 and top_k_predictions has shape [batch size, k].
      The final dimension contains the indices of top-k labels. [D1, ... DN]
      must match `labels`.
    labels: `int64` `Tensor` or `SparseTensor` with shape
      [D1, ... DN, num_labels], where N >= 1 and num_labels is the number of
      target classes for the associated prediction. Commonly, N=1 and `labels`
      has shape [batch_size, num_labels]. [D1, ... DN] must match
      `top_k_predictions`. Values should be in range [0, num_classes), where
      num_classes is the last dimension of `predictions`. Values outside this
      range are ignored.
    class_id: Integer class ID for which we want binary metrics. This should be
      in range [0, num_classes), where num_classes is the last dimension of
      `predictions`. If `class_id` is outside this range, the method returns
      NAN.
    weights: `Tensor` whose rank is either 0, or n-1, where n is the rank of
      `labels`. If the latter, it must be broadcastable to `labels` (i.e., all
      dimensions must be either `1`, or the same as the corresponding `labels`
      dimension).
    metrics_collections: An optional list of collections that values should
      be added to.
    updates_collections: An optional list of collections that updates should
      be added to.
    name: Name of new update operation, and namespace for other dependent ops.
  Returns:
    precision: Scalar `float64` `Tensor` with the value of `true_positives`
      divided by the sum of `true_positives` and `false_positives`.
    update_op: `Operation` that increments `true_positives` and
      `false_positives` variables appropriately, and whose value matches
      `precision`.
  Raises:
    ValueError: If `weights` is not `None` and its shape doesn't match
      `predictions`, or if either `metrics_collections` or `updates_collections`
      are not a list or tuple.
    ValueError: If `top_k_predictions` has rank < 2.
  """
  default_name = _at_k_name('precision', class_id=class_id)
  with ops.name_scope(name, default_name,
                      (top_k_predictions, labels, weights)) as name_scope:
    return metrics_impl.precision_at_top_k(
        labels=labels,
        predictions_idx=top_k_predictions,
        class_id=class_id,
        weights=weights,
        metrics_collections=metrics_collections,
        updates_collections=updates_collections,
        name=name_scope)
def sparse_recall_at_top_k(labels,
                           top_k_predictions,
                           class_id=None,
                           weights=None,
                           metrics_collections=None,
                           updates_collections=None,
                           name=None):
  """Computes recall@k of top-k predictions with respect to sparse labels.
  If `class_id` is specified, we calculate recall by considering only the
      entries in the batch for which `class_id` is in the label, and computing
      the fraction of them for which `class_id` is in the top-k `predictions`.
  If `class_id` is not specified, we'll calculate recall as how often on
      average a class among the labels of a batch entry is in the top-k
      `predictions`.
  `sparse_recall_at_top_k` creates two local variables, `true_positive_at_<k>`
  and `false_negative_at_<k>`, that are used to compute the recall_at_k
  frequency. This frequency is ultimately returned as `recall_at_<k>`: an
  idempotent operation that simply divides `true_positive_at_<k>` by total
  (`true_positive_at_<k>` + `false_negative_at_<k>`).
  For estimation of the metric over a stream of data, the function creates an
  `update_op` operation that updates these variables and returns the
  `recall_at_<k>`. Set operations applied to `top_k` and `labels` calculate the
  true positives and false negatives weighted by `weights`. Then `update_op`
  increments `true_positive_at_<k>` and `false_negative_at_<k>` using these
  values.
  If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
  Args:
    labels: `int64` `Tensor` or `SparseTensor` with shape
      [D1, ... DN, num_labels], where N >= 1 and num_labels is the number of
      target classes for the associated prediction. Commonly, N=1 and `labels`
      has shape [batch_size, num_labels]. [D1, ... DN] must match
      `top_k_predictions`. Values should be in range [0, num_classes), where
      num_classes is the last dimension of `predictions`. Values outside this
      range always count towards `false_negative_at_<k>`.
    top_k_predictions: Integer `Tensor` with shape [D1, ... DN, k] where
      N >= 1. Commonly, N=1 and top_k_predictions has shape [batch size, k].
      The final dimension contains the indices of top-k labels. [D1, ... DN]
      must match `labels`.
    class_id: Integer class ID for which we want binary metrics. This should be
      in range [0, num_classes), where num_classes is the last dimension of
      `predictions`. If class_id is outside this range, the method returns NAN.
    weights: `Tensor` whose rank is either 0, or n-1, where n is the rank of
      `labels`. If the latter, it must be broadcastable to `labels` (i.e., all
      dimensions must be either `1`, or the same as the corresponding `labels`
      dimension).
    metrics_collections: An optional list of collections that values should
      be added to.
    updates_collections: An optional list of collections that updates should
      be added to.
    name: Name of new update operation, and namespace for other dependent ops.
  Returns:
    recall: Scalar `float64` `Tensor` with the value of `true_positives` divided
      by the sum of `true_positives` and `false_negatives`.
    update_op: `Operation` that increments `true_positives` and
      `false_negatives` variables appropriately, and whose value matches
      `recall`.
  Raises:
    ValueError: If `weights` is not `None` and its shape doesn't match
    `predictions`, or if either `metrics_collections` or `updates_collections`
    are not a list or tuple.
  """
  default_name = _at_k_name('recall', class_id=class_id)
  with ops.name_scope(name, default_name,
                      (top_k_predictions, labels, weights)) as name_scope:
    return metrics_impl.recall_at_top_k(
        labels=labels,
        predictions_idx=top_k_predictions,
        class_id=class_id,
        weights=weights,
        metrics_collections=metrics_collections,
        updates_collections=updates_collections,
        name=name_scope)
def _compute_recall_at_precision(tp, fp, fn, precision, name,
                                 strict_mode=False):
  """Helper function to compute recall at a given `precision`.
  Args:
    tp: The number of true positives.
    fp: The number of false positives.
    fn: The number of false negatives.
    precision: The precision for which the recall will be calculated.
    name: An optional variable_scope name.
    strict_mode: If true and there exists a threshold where the precision is
      no smaller than the target precision, return the corresponding recall at
      the threshold. Otherwise, return 0. If false, find the threshold where the
      precision is closest to the target precision and return the recall at the
      threshold.
  Returns:
    The recall at a given `precision`.
  """
  precisions = math_ops.div(tp, tp + fp + _EPSILON)
  if not strict_mode:
    tf_index = math_ops.argmin(
        math_ops.abs(precisions - precision), 0, output_type=dtypes.int32)
    # Now, we have the implicit threshold, so compute the recall:
    return math_ops.div(tp[tf_index], tp[tf_index] + fn[tf_index] + _EPSILON,
                        name)
  else:
    # We aim to find the threshold where the precision is minimum but no smaller
    # than the target precision.
    # The rationale:
    # 1. Compute the difference between precisions (by different thresholds) and
    #   the target precision.
    # 2. Take the reciprocal of the values by the above step. The intention is
    #   to make the positive values rank before negative values and also the
    #   smaller positives rank before larger positives.
    tf_index = math_ops.argmax(
        math_ops.div(1.0, precisions - precision + _EPSILON),
        0,
        output_type=dtypes.int32)
    def _return_good_recall():
      return math_ops.div(tp[tf_index], tp[tf_index] + fn[tf_index] + _EPSILON,
                          name)
    return control_flow_ops.cond(precisions[tf_index] >= precision,
                                 _return_good_recall, lambda: .0)
def recall_at_precision(labels,
                        predictions,
                        precision,
                        weights=None,
                        num_thresholds=200,
                        metrics_collections=None,
                        updates_collections=None,
                        name=None,
                        strict_mode=False):
  """Computes `recall` at `precision`.
  The `recall_at_precision` function creates four local variables,
  `tp` (true positives), `fp` (false positives) and `fn` (false negatives)
  that are used to compute the `recall` at the given `precision` value. The
  threshold for the given `precision` value is computed and used to evaluate the
  corresponding `recall`.
  For estimation of the metric over a stream of data, the function creates an
  `update_op` operation that updates these variables and returns the
  `recall`. `update_op` increments the `tp`, `fp` and `fn` counts with the
  weight of each case found in the `predictions` and `labels`.
  If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
  Args:
    labels: The ground truth values, a `Tensor` whose dimensions must match
      `predictions`. Will be cast to `bool`.
    predictions: A floating point `Tensor` of arbitrary shape and whose values
      are in the range `[0, 1]`.
    precision: A scalar value in range `[0, 1]`.
    weights: Optional `Tensor` whose rank is either 0, or the same rank as
      `labels`, and must be broadcastable to `labels` (i.e., all dimensions must
      be either `1`, or the same as the corresponding `labels` dimension).
    num_thresholds: The number of thresholds to use for matching the given
      `precision`.
    metrics_collections: An optional list of collections that `recall`
      should be added to.
    updates_collections: An optional list of collections that `update_op` should
      be added to.
    name: An optional variable_scope name.
    strict_mode: If true and there exists a threshold where the precision is
      above the target precision, return the corresponding recall at the
      threshold. Otherwise, return 0. If false, find the threshold where the
      precision is closest to the target precision and return the recall at the
      threshold.
  Returns:
    recall: A scalar `Tensor` representing the recall at the given
      `precision` value.
    update_op: An operation that increments the `tp`, `fp` and `fn`
      variables appropriately and whose value matches `recall`.
  Raises:
    ValueError: If `predictions` and `labels` have mismatched shapes, if
      `weights` is not `None` and its shape doesn't match `predictions`, or if
      `precision` is not between 0 and 1, or if either `metrics_collections`
      or `updates_collections` are not a list or tuple.
  """
  if not 0 <= precision <= 1:
    raise ValueError('`precision` must be in the range [0, 1].')
  with variable_scope.variable_scope(name, 'recall_at_precision',
                                     (predictions, labels, weights)):
    thresholds = [
        i * 1.0 / (num_thresholds - 1) for i in range(1, num_thresholds - 1)
    ]
    thresholds = [0.0 - _EPSILON] + thresholds + [1.0 + _EPSILON]
    values, update_ops = _streaming_confusion_matrix_at_thresholds(
        predictions, labels, thresholds, weights)
    recall = _compute_recall_at_precision(values['tp'], values['fp'],
                                          values['fn'], precision, 'value',
                                          strict_mode)
    update_op = _compute_recall_at_precision(update_ops['tp'], update_ops['fp'],
                                             update_ops['fn'], precision,
                                             'update_op', strict_mode)
    if metrics_collections:
      ops.add_to_collections(metrics_collections, recall)
    if updates_collections:
      ops.add_to_collections(updates_collections, update_op)
    return recall, update_op
def precision_at_recall(labels,
                        predictions,
                        target_recall,
                        weights=None,
                        num_thresholds=200,
                        metrics_collections=None,
                        updates_collections=None,
                        name=None):
  """Computes the precision at a given recall.
  This function creates variables to track the true positives, false positives,
  true negatives, and false negatives at a set of thresholds. Among those
  thresholds where recall is at least `target_recall`, precision is computed
  at the threshold where recall is closest to `target_recall`.
  For estimation of the metric over a stream of data, the function creates an
  `update_op` operation that updates these variables and returns the
  precision at `target_recall`. `update_op` increments the counts of true
  positives, false positives, true negatives, and false negatives with the
  weight of each case found in the `predictions` and `labels`.
  If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
  For additional information about precision and recall, see
  http://en.wikipedia.org/wiki/Precision_and_recall
  Args:
    labels: The ground truth values, a `Tensor` whose dimensions must match
      `predictions`. Will be cast to `bool`.
    predictions: A floating point `Tensor` of arbitrary shape and whose values
      are in the range `[0, 1]`.
    target_recall: A scalar value in range `[0, 1]`.
    weights: Optional `Tensor` whose rank is either 0, or the same rank as
      `labels`, and must be broadcastable to `labels` (i.e., all dimensions must
      be either `1`, or the same as the corresponding `labels` dimension).
    num_thresholds: The number of thresholds to use for matching the given
      recall.
    metrics_collections: An optional list of collections to which `precision`
      should be added.
    updates_collections: An optional list of collections to which `update_op`
      should be added.
    name: An optional variable_scope name.
  Returns:
    precision: A scalar `Tensor` representing the precision at the given
      `target_recall` value.
    update_op: An operation that increments the variables for tracking the
      true positives, false positives, true negatives, and false negatives and
      whose value matches `precision`.
  Raises:
    ValueError: If `predictions` and `labels` have mismatched shapes, if
      `weights` is not `None` and its shape doesn't match `predictions`, or if
      `target_recall` is not between 0 and 1, or if either `metrics_collections`
      or `updates_collections` are not a list or tuple.
    RuntimeError: If eager execution is enabled.
  """
  if context.executing_eagerly():
    raise RuntimeError('tf.metrics.precision_at_recall is not '
                       'supported when eager execution is enabled.')
  if target_recall < 0 or target_recall > 1:
    raise ValueError('`target_recall` must be in the range [0, 1].')
  with variable_scope.variable_scope(name, 'precision_at_recall',
                                     (predictions, labels, weights)):
    kepsilon = 1e-7  # Used to avoid division by zero.
    thresholds = [
        (i + 1) * 1.0 / (num_thresholds - 1) for i in range(num_thresholds - 2)
    ]
    thresholds = [0.0 - kepsilon] + thresholds + [1.0 + kepsilon]
    values, update_ops = _streaming_confusion_matrix_at_thresholds(
        predictions, labels, thresholds, weights)
    def compute_precision_at_recall(tp, fp, fn, name):
      """Computes the precision at a given recall.
      Args:
        tp: True positives.
        fp: False positives.
        fn: False negatives.
        name: A name for the operation.
      Returns:
        The precision at the desired recall.
      """
      recalls = math_ops.div(tp, tp + fn + kepsilon)
      # Because recall is monotone decreasing as a function of the threshold,
      # the smallest recall exceeding target_recall occurs at the largest
      # threshold where recall >= target_recall.
      admissible_recalls = math_ops.cast(
          math_ops.greater_equal(recalls, target_recall), dtypes.int64)
      tf_index = math_ops.reduce_sum(admissible_recalls) - 1
      # Now we have the threshold at which to compute precision:
      return math_ops.div(tp[tf_index] + kepsilon,
                          tp[tf_index] + fp[tf_index] + kepsilon,
                          name)
    precision_value = compute_precision_at_recall(
        values['tp'], values['fp'], values['fn'], 'value')
    update_op = compute_precision_at_recall(
        update_ops['tp'], update_ops['fp'], update_ops['fn'], 'update_op')
    if metrics_collections:
      ops.add_to_collections(metrics_collections, precision_value)
    if updates_collections:
      ops.add_to_collections(updates_collections, update_op)
    return precision_value, update_op
def streaming_sparse_average_precision_at_k(predictions,
                                            labels,
                                            k,
                                            weights=None,
                                            metrics_collections=None,
                                            updates_collections=None,
                                            name=None):
  """Computes average precision@k of predictions with respect to sparse labels.
  See `sparse_average_precision_at_k` for details on formula. `weights` are
  applied to the result of `sparse_average_precision_at_k`
  `streaming_sparse_average_precision_at_k` creates two local variables,
  `average_precision_at_<k>/total` and `average_precision_at_<k>/max`, that
  are used to compute the frequency. This frequency is ultimately returned as
  `average_precision_at_<k>`: an idempotent operation that simply divides
  `average_precision_at_<k>/total` by `average_precision_at_<k>/max`.
  For estimation of the metric over a stream of data, the function creates an
  `update_op` operation that updates these variables and returns the
  `precision_at_<k>`. Internally, a `top_k` operation computes a `Tensor`
  indicating the top `k` `predictions`. Set operations applied to `top_k` and
  `labels` calculate the true positives and false positives weighted by
  `weights`. Then `update_op` increments `true_positive_at_<k>` and
  `false_positive_at_<k>` using these values.
  If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
  Args:
    predictions: Float `Tensor` with shape [D1, ... DN, num_classes] where
      N >= 1. Commonly, N=1 and `predictions` has shape
      [batch size, num_classes]. The final dimension contains the logit values
      for each class. [D1, ... DN] must match `labels`.
    labels: `int64` `Tensor` or `SparseTensor` with shape
      [D1, ... DN, num_labels], where N >= 1 and num_labels is the number of
      target classes for the associated prediction. Commonly, N=1 and `labels`
      has shape [batch_size, num_labels]. [D1, ... DN] must match
      `predictions_`. Values should be in range [0, num_classes), where
      num_classes is the last dimension of `predictions`. Values outside this
      range are ignored.
    k: Integer, k for @k metric. This will calculate an average precision for
      range `[1,k]`, as documented above.
    weights: `Tensor` whose rank is either 0, or n-1, where n is the rank of
      `labels`. If the latter, it must be broadcastable to `labels` (i.e., all
      dimensions must be either `1`, or the same as the corresponding `labels`
      dimension).
    metrics_collections: An optional list of collections that values should
      be added to.
    updates_collections: An optional list of collections that updates should
      be added to.
    name: Name of new update operation, and namespace for other dependent ops.
  Returns:
    mean_average_precision: Scalar `float64` `Tensor` with the mean average
      precision values.
    update: `Operation` that increments variables appropriately, and whose
      value matches `metric`.
  """
  return metrics.average_precision_at_k(
      k=k,
      predictions=predictions,
      labels=labels,
      weights=weights,
      metrics_collections=metrics_collections,
      updates_collections=updates_collections,
      name=name)
def streaming_sparse_average_precision_at_top_k(top_k_predictions,
                                                labels,
                                                weights=None,
                                                metrics_collections=None,
                                                updates_collections=None,
                                                name=None):
  """Computes average precision@k of predictions with respect to sparse labels.
  `streaming_sparse_average_precision_at_top_k` creates two local variables,
  `average_precision_at_<k>/total` and `average_precision_at_<k>/max`, that
  are used to compute the frequency. This frequency is ultimately returned as
  `average_precision_at_<k>`: an idempotent operation that simply divides
  `average_precision_at_<k>/total` by `average_precision_at_<k>/max`.
  For estimation of the metric over a stream of data, the function creates an
  `update_op` operation that updates these variables and returns the
  `precision_at_<k>`. Set operations applied to `top_k` and `labels` calculate
  the true positives and false positives weighted by `weights`. Then `update_op`
  increments `true_positive_at_<k>` and `false_positive_at_<k>` using these
  values.
  If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
  Args:
    top_k_predictions: Integer `Tensor` with shape [D1, ... DN, k] where N >= 1.
      Commonly, N=1 and `predictions_idx` has shape [batch size, k]. The final
      dimension must be set and contains the top `k` predicted class indices.
      [D1, ... DN] must match `labels`. Values should be in range
      [0, num_classes).
    labels: `int64` `Tensor` or `SparseTensor` with shape
      [D1, ... DN, num_labels] or [D1, ... DN], where the latter implies
      num_labels=1. N >= 1 and num_labels is the number of target classes for
      the associated prediction. Commonly, N=1 and `labels` has shape
      [batch_size, num_labels]. [D1, ... DN] must match `top_k_predictions`.
      Values should be in range [0, num_classes).
    weights: `Tensor` whose rank is either 0, or n-1, where n is the rank of
      `labels`. If the latter, it must be broadcastable to `labels` (i.e., all
      dimensions must be either `1`, or the same as the corresponding `labels`
      dimension).
    metrics_collections: An optional list of collections that values should
      be added to.
    updates_collections: An optional list of collections that updates should
      be added to.
    name: Name of new update operation, and namespace for other dependent ops.
  Returns:
    mean_average_precision: Scalar `float64` `Tensor` with the mean average
      precision values.
    update: `Operation` that increments variables appropriately, and whose
      value matches `metric`.
  Raises:
    ValueError: if the last dimension of top_k_predictions is not set.
  """
  return metrics_impl._streaming_sparse_average_precision_at_top_k(  # pylint: disable=protected-access
      predictions_idx=top_k_predictions,
      labels=labels,
      weights=weights,
      metrics_collections=metrics_collections,
      updates_collections=updates_collections,
      name=name)
@deprecated(None,
            'Please switch to tf.metrics.mean_absolute_error. Note that the '
            'order of the labels and predictions arguments has been switched.')
def streaming_mean_absolute_error(predictions,
                                  labels,
                                  weights=None,
                                  metrics_collections=None,
                                  updates_collections=None,
                                  name=None):
  """Computes the mean absolute error between the labels and predictions.
  The `streaming_mean_absolute_error` function creates two local variables,
  `total` and `count` that are used to compute the mean absolute error. This
  average is weighted by `weights`, and it is ultimately returned as
  `mean_absolute_error`: an idempotent operation that simply divides `total` by
  `count`.
  For estimation of the metric over a stream of data, the function creates an
  `update_op` operation that updates these variables and returns the
  `mean_absolute_error`. Internally, an `absolute_errors` operation computes the
  absolute value of the differences between `predictions` and `labels`. Then
  `update_op` increments `total` with the reduced sum of the product of
  `weights` and `absolute_errors`, and it increments `count` with the reduced
  sum of `weights`
  If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
  Args:
    predictions: A `Tensor` of arbitrary shape.
    labels: A `Tensor` of the same shape as `predictions`.
    weights: Optional `Tensor` indicating the frequency with which an example is
      sampled. Rank must be 0, or the same rank as `labels`, and must be
      broadcastable to `labels` (i.e., all dimensions must be either `1`, or
      the same as the corresponding `labels` dimension).
    metrics_collections: An optional list of collections that
      `mean_absolute_error` should be added to.
    updates_collections: An optional list of collections that `update_op` should
      be added to.
    name: An optional variable_scope name.
  Returns:
    mean_absolute_error: A `Tensor` representing the current mean, the value of
      `total` divided by `count`.
    update_op: An operation that increments the `total` and `count` variables
      appropriately and whose value matches `mean_absolute_error`.
  Raises:
    ValueError: If `predictions` and `labels` have mismatched shapes, or if
      `weights` is not `None` and its shape doesn't match `predictions`, or if
      either `metrics_collections` or `updates_collections` are not a list or
      tuple.
  """
  return metrics.mean_absolute_error(
      predictions=predictions,
      labels=labels,
      weights=weights,
      metrics_collections=metrics_collections,
      updates_collections=updates_collections,
      name=name)
def streaming_mean_relative_error(predictions,
                                  labels,
                                  normalizer,
                                  weights=None,
                                  metrics_collections=None,
                                  updates_collections=None,
                                  name=None):
  """Computes the mean relative error by normalizing with the given values.
  The `streaming_mean_relative_error` function creates two local variables,
  `total` and `count` that are used to compute the mean relative absolute error.
  This average is weighted by `weights`, and it is ultimately returned as
  `mean_relative_error`: an idempotent operation that simply divides `total` by
  `count`.
  For estimation of the metric over a stream of data, the function creates an
  `update_op` operation that updates these variables and returns the
  `mean_reative_error`. Internally, a `relative_errors` operation divides the
  absolute value of the differences between `predictions` and `labels` by the
  `normalizer`. Then `update_op` increments `total` with the reduced sum of the
  product of `weights` and `relative_errors`, and it increments `count` with the
  reduced sum of `weights`.
  If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
  Args:
    predictions: A `Tensor` of arbitrary shape.
    labels: A `Tensor` of the same shape as `predictions`.
    normalizer: A `Tensor` of the same shape as `predictions`.
    weights: Optional `Tensor` indicating the frequency with which an example is
      sampled. Rank must be 0, or the same rank as `labels`, and must be
      broadcastable to `labels` (i.e., all dimensions must be either `1`, or
      the same as the corresponding `labels` dimension).
    metrics_collections: An optional list of collections that
      `mean_relative_error` should be added to.
    updates_collections: An optional list of collections that `update_op` should
      be added to.
    name: An optional variable_scope name.
  Returns:
    mean_relative_error: A `Tensor` representing the current mean, the value of
      `total` divided by `count`.
    update_op: An operation that increments the `total` and `count` variables
      appropriately and whose value matches `mean_relative_error`.
  Raises:
    ValueError: If `predictions` and `labels` have mismatched shapes, or if
      `weights` is not `None` and its shape doesn't match `predictions`, or if
      either `metrics_collections` or `updates_collections` are not a list or
      tuple.
  """
  return metrics.mean_relative_error(
      normalizer=normalizer,
      predictions=predictions,
      labels=labels,
      weights=weights,
      metrics_collections=metrics_collections,
      updates_collections=updates_collections,
      name=name)
@deprecated(None,
            'Please switch to tf.metrics.mean_squared_error. Note that the '
            'order of the labels and predictions arguments has been switched.')
def streaming_mean_squared_error(predictions,
                                 labels,
                                 weights=None,
                                 metrics_collections=None,
                                 updates_collections=None,
                                 name=None):
  """Computes the mean squared error between the labels and predictions.
  The `streaming_mean_squared_error` function creates two local variables,
  `total` and `count` that are used to compute the mean squared error.
  This average is weighted by `weights`, and it is ultimately returned as
  `mean_squared_error`: an idempotent operation that simply divides `total` by
  `count`.
  For estimation of the metric over a stream of data, the function creates an
  `update_op` operation that updates these variables and returns the
  `mean_squared_error`. Internally, a `squared_error` operation computes the
  element-wise square of the difference between `predictions` and `labels`. Then
  `update_op` increments `total` with the reduced sum of the product of
  `weights` and `squared_error`, and it increments `count` with the reduced sum
  of `weights`.
  If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
  Args:
    predictions: A `Tensor` of arbitrary shape.
    labels: A `Tensor` of the same shape as `predictions`.
    weights: Optional `Tensor` indicating the frequency with which an example is
      sampled. Rank must be 0, or the same rank as `labels`, and must be
      broadcastable to `labels` (i.e., all dimensions must be either `1`, or
      the same as the corresponding `labels` dimension).
    metrics_collections: An optional list of collections that
      `mean_squared_error` should be added to.
    updates_collections: An optional list of collections that `update_op` should
      be added to.
    name: An optional variable_scope name.
  Returns:
    mean_squared_error: A `Tensor` representing the current mean, the value of
      `total` divided by `count`.
    update_op: An operation that increments the `total` and `count` variables
      appropriately and whose value matches `mean_squared_error`.
  Raises:
    ValueError: If `predictions` and `labels` have mismatched shapes, or if
      `weights` is not `None` and its shape doesn't match `predictions`, or if
      either `metrics_collections` or `updates_collections` are not a list or
      tuple.
  """
  return metrics.mean_squared_error(
      predictions=predictions,
      labels=labels,
      weights=weights,
      metrics_collections=metrics_collections,
      updates_collections=updates_collections,
      name=name)
@deprecated(
    None,
    'Please switch to tf.metrics.root_mean_squared_error. Note that the '
    'order of the labels and predictions arguments has been switched.')
def streaming_root_mean_squared_error(predictions,
                                      labels,
                                      weights=None,
                                      metrics_collections=None,
                                      updates_collections=None,
                                      name=None):
  """Computes the root mean squared error between the labels and predictions.
  The `streaming_root_mean_squared_error` function creates two local variables,
  `total` and `count` that are used to compute the root mean squared error.
  This average is weighted by `weights`, and it is ultimately returned as
  `root_mean_squared_error`: an idempotent operation that takes the square root
  of the division of `total` by `count`.
  For estimation of the metric over a stream of data, the function creates an
  `update_op` operation that updates these variables and returns the
  `root_mean_squared_error`. Internally, a `squared_error` operation computes
  the element-wise square of the difference between `predictions` and `labels`.
  Then `update_op` increments `total` with the reduced sum of the product of
  `weights` and `squared_error`, and it increments `count` with the reduced sum
  of `weights`.
  If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
  Args:
    predictions: A `Tensor` of arbitrary shape.
    labels: A `Tensor` of the same shape as `predictions`.
    weights: Optional `Tensor` indicating the frequency with which an example is
      sampled. Rank must be 0, or the same rank as `labels`, and must be
      broadcastable to `labels` (i.e., all dimensions must be either `1`, or
      the same as the corresponding `labels` dimension).
    metrics_collections: An optional list of collections that
      `root_mean_squared_error` should be added to.
    updates_collections: An optional list of collections that `update_op` should
      be added to.
    name: An optional variable_scope name.
  Returns:
    root_mean_squared_error: A `Tensor` representing the current mean, the value
      of `total` divided by `count`.
    update_op: An operation that increments the `total` and `count` variables
      appropriately and whose value matches `root_mean_squared_error`.
  Raises:
    ValueError: If `predictions` and `labels` have mismatched shapes, or if
      `weights` is not `None` and its shape doesn't match `predictions`, or if
      either `metrics_collections` or `updates_collections` are not a list or
      tuple.
  """
  return metrics.root_mean_squared_error(
      predictions=predictions,
      labels=labels,
      weights=weights,
      metrics_collections=metrics_collections,
      updates_collections=updates_collections,
      name=name)
def streaming_covariance(predictions,
                         labels,
                         weights=None,
                         metrics_collections=None,
                         updates_collections=None,
                         name=None):
  """Computes the unbiased sample covariance between `predictions` and `labels`.
  The `streaming_covariance` function creates four local variables,
  `comoment`, `mean_prediction`, `mean_label`, and `count`, which are used to
  compute the sample covariance between predictions and labels across multiple
  batches of data. The covariance is ultimately returned as an idempotent
  operation that simply divides `comoment` by `count` - 1. We use `count` - 1
  in order to get an unbiased estimate.
  The algorithm used for this online computation is described in
  https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance.
  Specifically, the formula used to combine two sample comoments is
  `C_AB = C_A + C_B + (E[x_A] - E[x_B]) * (E[y_A] - E[y_B]) * n_A * n_B / n_AB`
  The comoment for a single batch of data is simply
  `sum((x - E[x]) * (y - E[y]))`, optionally weighted.
  If `weights` is not None, then it is used to compute weighted comoments,
  means, and count. NOTE: these weights are treated as "frequency weights", as
  opposed to "reliability weights". See discussion of the difference on
  https://wikipedia.org/wiki/Weighted_arithmetic_mean#Weighted_sample_variance
  To facilitate the computation of covariance across multiple batches of data,
  the function creates an `update_op` operation, which updates underlying
  variables and returns the updated covariance.
  Args:
    predictions: A `Tensor` of arbitrary size.
    labels: A `Tensor` of the same size as `predictions`.
    weights: Optional `Tensor` indicating the frequency with which an example is
      sampled. Rank must be 0, or the same rank as `labels`, and must be
      broadcastable to `labels` (i.e., all dimensions must be either `1`, or
      the same as the corresponding `labels` dimension).
    metrics_collections: An optional list of collections that the metric
      value variable should be added to.
    updates_collections: An optional list of collections that the metric update
      ops should be added to.
    name: An optional variable_scope name.
  Returns:
    covariance: A `Tensor` representing the current unbiased sample covariance,
      `comoment` / (`count` - 1).
    update_op: An operation that updates the local variables appropriately.
  Raises:
    ValueError: If labels and predictions are of different sizes or if either
      `metrics_collections` or `updates_collections` are not a list or tuple.
  """
  with variable_scope.variable_scope(name, 'covariance',
                                     (predictions, labels, weights)):
    predictions, labels, weights = metrics_impl._remove_squeezable_dimensions(  # pylint: disable=protected-access
        predictions, labels, weights)
    predictions.get_shape().assert_is_compatible_with(labels.get_shape())
    count_ = metrics_impl.metric_variable([], dtypes.float32, name='count')
    mean_prediction = metrics_impl.metric_variable(
        [], dtypes.float32, name='mean_prediction')
    mean_label = metrics_impl.metric_variable(
        [], dtypes.float32, name='mean_label')
    comoment = metrics_impl.metric_variable(  # C_A in update equation
        [], dtypes.float32, name='comoment')
    if weights is None:
      batch_count = math_ops.to_float(array_ops.size(labels))  # n_B in eqn
      weighted_predictions = predictions
      weighted_labels = labels
    else:
      weights = weights_broadcast_ops.broadcast_weights(weights, labels)
      batch_count = math_ops.reduce_sum(weights)  # n_B in eqn
      weighted_predictions = math_ops.multiply(predictions, weights)
      weighted_labels = math_ops.multiply(labels, weights)
    update_count = state_ops.assign_add(count_, batch_count)  # n_AB in eqn
    prev_count = update_count - batch_count  # n_A in update equation
    # We update the means by Delta=Error*BatchCount/(BatchCount+PrevCount)
    # batch_mean_prediction is E[x_B] in the update equation
    batch_mean_prediction = _safe_div(
        math_ops.reduce_sum(weighted_predictions), batch_count,
        'batch_mean_prediction')
    delta_mean_prediction = _safe_div(
        (batch_mean_prediction - mean_prediction) * batch_count, update_count,
        'delta_mean_prediction')
    update_mean_prediction = state_ops.assign_add(mean_prediction,
                                                  delta_mean_prediction)
    # prev_mean_prediction is E[x_A] in the update equation
    prev_mean_prediction = update_mean_prediction - delta_mean_prediction
    # batch_mean_label is E[y_B] in the update equation
    batch_mean_label = _safe_div(
        math_ops.reduce_sum(weighted_labels), batch_count, 'batch_mean_label')
    delta_mean_label = _safe_div((batch_mean_label - mean_label) * batch_count,
                                 update_count, 'delta_mean_label')
    update_mean_label = state_ops.assign_add(mean_label, delta_mean_label)
    # prev_mean_label is E[y_A] in the update equation
    prev_mean_label = update_mean_label - delta_mean_label
    unweighted_batch_coresiduals = ((predictions - batch_mean_prediction) *
                                    (labels - batch_mean_label))
    # batch_comoment is C_B in the update equation
    if weights is None:
      batch_comoment = math_ops.reduce_sum(unweighted_batch_coresiduals)
    else:
      batch_comoment = math_ops.reduce_sum(
          unweighted_batch_coresiduals * weights)
    # View delta_comoment as = C_AB - C_A in the update equation above.
    # Since C_A is stored in a var, by how much do we need to increment that var
    # to make the var = C_AB?
    delta_comoment = (
        batch_comoment + (prev_mean_prediction - batch_mean_prediction) *
        (prev_mean_label - batch_mean_label) *
        (prev_count * batch_count / update_count))
    update_comoment = state_ops.assign_add(comoment, delta_comoment)
    covariance = array_ops.where(
        math_ops.less_equal(count_, 1.),
        float('nan'),
        math_ops.truediv(comoment, count_ - 1),
        name='covariance')
    with ops.control_dependencies([update_comoment]):
      update_op = array_ops.where(
          math_ops.less_equal(count_, 1.),
          float('nan'),
          math_ops.truediv(comoment, count_ - 1),
          name='update_op')
  if metrics_collections:
    ops.add_to_collections(metrics_collections, covariance)
  if updates_collections:
    ops.add_to_collections(updates_collections, update_op)
  return covariance, update_op
def streaming_pearson_correlation(predictions,
                                  labels,
                                  weights=None,
                                  metrics_collections=None,
                                  updates_collections=None,
                                  name=None):
  """Computes Pearson correlation coefficient between `predictions`, `labels`.
  The `streaming_pearson_correlation` function delegates to
  `streaming_covariance` the tracking of three [co]variances:
  - `streaming_covariance(predictions, labels)`, i.e. covariance
  - `streaming_covariance(predictions, predictions)`, i.e. variance
  - `streaming_covariance(labels, labels)`, i.e. variance
  The product-moment correlation ultimately returned is an idempotent operation
  `cov(predictions, labels) / sqrt(var(predictions) * var(labels))`. To
  facilitate correlation computation across multiple batches, the function
  groups the `update_op`s of the underlying streaming_covariance and returns an
  `update_op`.
  If `weights` is not None, then it is used to compute a weighted correlation.
  NOTE: these weights are treated as "frequency weights", as opposed to
  "reliability weights". See discussion of the difference on
  https://wikipedia.org/wiki/Weighted_arithmetic_mean#Weighted_sample_variance
  Args:
    predictions: A `Tensor` of arbitrary size.
    labels: A `Tensor` of the same size as predictions.
    weights: Optional `Tensor` indicating the frequency with which an example is
      sampled. Rank must be 0, or the same rank as `labels`, and must be
      broadcastable to `labels` (i.e., all dimensions must be either `1`, or
      the same as the corresponding `labels` dimension).
    metrics_collections: An optional list of collections that the metric
      value variable should be added to.
    updates_collections: An optional list of collections that the metric update
      ops should be added to.
    name: An optional variable_scope name.
  Returns:
    pearson_r: A `Tensor` representing the current Pearson product-moment
      correlation coefficient, the value of
      `cov(predictions, labels) / sqrt(var(predictions) * var(labels))`.
    update_op: An operation that updates the underlying variables appropriately.
  Raises:
    ValueError: If `labels` and `predictions` are of different sizes, or if
      `weights` is the wrong size, or if either `metrics_collections` or
      `updates_collections` are not a `list` or `tuple`.
  """
  with variable_scope.variable_scope(name, 'pearson_r',
                                     (predictions, labels, weights)):
    predictions, labels, weights = metrics_impl._remove_squeezable_dimensions(  # pylint: disable=protected-access
        predictions, labels, weights)
    predictions.get_shape().assert_is_compatible_with(labels.get_shape())
    # Broadcast weights here to avoid duplicate broadcasting in each call to
    # `streaming_covariance`.
    if weights is not None:
      weights = weights_broadcast_ops.broadcast_weights(weights, labels)
    cov, update_cov = streaming_covariance(
        predictions, labels, weights=weights, name='covariance')
    var_predictions, update_var_predictions = streaming_covariance(
        predictions, predictions, weights=weights, name='variance_predictions')
    var_labels, update_var_labels = streaming_covariance(
        labels, labels, weights=weights, name='variance_labels')
    pearson_r = math_ops.truediv(
        cov,
        math_ops.multiply(
            math_ops.sqrt(var_predictions), math_ops.sqrt(var_labels)),
        name='pearson_r')
    update_op = math_ops.truediv(
        update_cov,
        math_ops.multiply(
            math_ops.sqrt(update_var_predictions),
            math_ops.sqrt(update_var_labels)),
        name='update_op')
  if metrics_collections:
    ops.add_to_collections(metrics_collections, pearson_r)
  if updates_collections:
    ops.add_to_collections(updates_collections, update_op)
  return pearson_r, update_op
# TODO(nsilberman): add a 'normalized' flag so that the user can request
# normalization if the inputs are not normalized.
def streaming_mean_cosine_distance(predictions,
                                   labels,
                                   dim,
                                   weights=None,
                                   metrics_collections=None,
                                   updates_collections=None,
                                   name=None):
  """Computes the cosine distance between the labels and predictions.
  The `streaming_mean_cosine_distance` function creates two local variables,
  `total` and `count` that are used to compute the average cosine distance
  between `predictions` and `labels`. This average is weighted by `weights`,
  and it is ultimately returned as `mean_distance`, which is an idempotent
  operation that simply divides `total` by `count`.
  For estimation of the metric over a stream of data, the function creates an
  `update_op` operation that updates these variables and returns the
  `mean_distance`.
  If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
  Args:
    predictions: A `Tensor` of the same shape as `labels`.
    labels: A `Tensor` of arbitrary shape.
    dim: The dimension along which the cosine distance is computed.
    weights: An optional `Tensor` whose shape is broadcastable to `predictions`,
      and whose dimension `dim` is 1.
    metrics_collections: An optional list of collections that the metric
      value variable should be added to.
    updates_collections: An optional list of collections that the metric update
      ops should be added to.
    name: An optional variable_scope name.
  Returns:
    mean_distance: A `Tensor` representing the current mean, the value of
      `total` divided by `count`.
    update_op: An operation that increments the `total` and `count` variables
      appropriately.
  Raises:
    ValueError: If `predictions` and `labels` have mismatched shapes, or if
      `weights` is not `None` and its shape doesn't match `predictions`, or if
      either `metrics_collections` or `updates_collections` are not a list or
      tuple.
  """
  predictions, labels, weights = metrics_impl._remove_squeezable_dimensions(  # pylint: disable=protected-access
      predictions, labels, weights)
  predictions.get_shape().assert_is_compatible_with(labels.get_shape())
  radial_diffs = math_ops.multiply(predictions, labels)
  radial_diffs = math_ops.reduce_sum(
      radial_diffs, reduction_indices=[
          dim,
      ], keepdims=True)
  mean_distance, update_op = streaming_mean(radial_diffs, weights, None, None,
                                            name or 'mean_cosine_distance')
  mean_distance = math_ops.subtract(1.0, mean_distance)
  update_op = math_ops.subtract(1.0, update_op)
  if metrics_collections:
    ops.add_to_collections(metrics_collections, mean_distance)
  if updates_collections:
    ops.add_to_collections(updates_collections, update_op)
  return mean_distance, update_op
def streaming_percentage_less(values,
                              threshold,
                              weights=None,
                              metrics_collections=None,
                              updates_collections=None,
                              name=None):
  """Computes the percentage of values less than the given threshold.
  The `streaming_percentage_less` function creates two local variables,
  `total` and `count` that are used to compute the percentage of `values` that
  fall below `threshold`. This rate is weighted by `weights`, and it is
  ultimately returned as `percentage` which is an idempotent operation that
  simply divides `total` by `count`.
  For estimation of the metric over a stream of data, the function creates an
  `update_op` operation that updates these variables and returns the
  `percentage`.
  If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
  Args:
    values: A numeric `Tensor` of arbitrary size.
    threshold: A scalar threshold.
    weights: An optional `Tensor` whose shape is broadcastable to `values`.
    metrics_collections: An optional list of collections that the metric
      value variable should be added to.
    updates_collections: An optional list of collections that the metric update
      ops should be added to.
    name: An optional variable_scope name.
  Returns:
    percentage: A `Tensor` representing the current mean, the value of `total`
      divided by `count`.
    update_op: An operation that increments the `total` and `count` variables
      appropriately.
  Raises:
    ValueError: If `weights` is not `None` and its shape doesn't match `values`,
      or if either `metrics_collections` or `updates_collections` are not a list
      or tuple.
  """
  return metrics.percentage_below(
      values=values,
      threshold=threshold,
      weights=weights,
      metrics_collections=metrics_collections,
      updates_collections=updates_collections,
      name=name)
def streaming_mean_iou(predictions,
                       labels,
                       num_classes,
                       weights=None,
                       metrics_collections=None,
                       updates_collections=None,
                       name=None):
  """Calculate per-step mean Intersection-Over-Union (mIOU).
  Mean Intersection-Over-Union is a common evaluation metric for
  semantic image segmentation, which first computes the IOU for each
  semantic class and then computes the average over classes.
  IOU is defined as follows:
    IOU = true_positive / (true_positive + false_positive + false_negative).
  The predictions are accumulated in a confusion matrix, weighted by `weights`,
  and mIOU is then calculated from it.
  For estimation of the metric over a stream of data, the function creates an
  `update_op` operation that updates these variables and returns the `mean_iou`.
  If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
  Args:
    predictions: A `Tensor` of prediction results for semantic labels, whose
      shape is [batch size] and type `int32` or `int64`. The tensor will be
      flattened, if its rank > 1.
    labels: A `Tensor` of ground truth labels with shape [batch size] and of
      type `int32` or `int64`. The tensor will be flattened, if its rank > 1.
    num_classes: The possible number of labels the prediction task can
      have. This value must be provided, since a confusion matrix of
      dimension = [num_classes, num_classes] will be allocated.
    weights: An optional `Tensor` whose shape is broadcastable to `predictions`.
    metrics_collections: An optional list of collections that `mean_iou`
      should be added to.
    updates_collections: An optional list of collections `update_op` should be
      added to.
    name: An optional variable_scope name.
  Returns:
    mean_iou: A `Tensor` representing the mean intersection-over-union.
    update_op: An operation that increments the confusion matrix.
  Raises:
    ValueError: If `predictions` and `labels` have mismatched shapes, or if
      `weights` is not `None` and its shape doesn't match `predictions`, or if
      either `metrics_collections` or `updates_collections` are not a list or
      tuple.
  """
  return metrics.mean_iou(
      num_classes=num_classes,
      predictions=predictions,
      labels=labels,
      weights=weights,
      metrics_collections=metrics_collections,
      updates_collections=updates_collections,
      name=name)
def _next_array_size(required_size, growth_factor=1.5):
  """Calculate the next size for reallocating a dynamic array.
  Args:
    required_size: number or tf.Tensor specifying required array capacity.
    growth_factor: optional number or tf.Tensor specifying the growth factor
      between subsequent allocations.
  Returns:
    tf.Tensor with dtype=int32 giving the next array size.
  """
  exponent = math_ops.ceil(
      math_ops.log(math_ops.cast(required_size, dtypes.float32)) / math_ops.log(
          math_ops.cast(growth_factor, dtypes.float32)))
  return math_ops.cast(math_ops.ceil(growth_factor**exponent), dtypes.int32)
def streaming_concat(values,
                     axis=0,
                     max_size=None,
                     metrics_collections=None,
                     updates_collections=None,
                     name=None):
  """Concatenate values along an axis across batches.
  The function `streaming_concat` creates two local variables, `array` and
  `size`, that are used to store concatenated values. Internally, `array` is
  used as storage for a dynamic array (if `maxsize` is `None`), which ensures
  that updates can be run in amortized constant time.
  For estimation of the metric over a stream of data, the function creates an
  `update_op` operation that appends the values of a tensor and returns the
  length of the concatenated axis.
  This op allows for evaluating metrics that cannot be updated incrementally
  using the same framework as other streaming metrics.
  Args:
    values: `Tensor` to concatenate. Rank and the shape along all axes other
      than the axis to concatenate along must be statically known.
    axis: optional integer axis to concatenate along.
    max_size: optional integer maximum size of `value` along the given axis.
      Once the maximum size is reached, further updates are no-ops. By default,
      there is no maximum size: the array is resized as necessary.
    metrics_collections: An optional list of collections that `value`
      should be added to.
    updates_collections: An optional list of collections `update_op` should be
      added to.
    name: An optional variable_scope name.
  Returns:
    value: A `Tensor` representing the concatenated values.
    update_op: An operation that concatenates the next values.
  Raises:
    ValueError: if `values` does not have a statically known rank, `axis` is
      not in the valid range or the size of `values` is not statically known
      along any axis other than `axis`.
  """
  with variable_scope.variable_scope(name, 'streaming_concat', (values,)):
    # pylint: disable=invalid-slice-index
    values_shape = values.get_shape()
    if values_shape.dims is None:
      raise ValueError('`values` must have known statically known rank')
    ndim = len(values_shape)
    if axis < 0:
      axis += ndim
    if not 0 <= axis < ndim:
      raise ValueError('axis = %r not in [0, %r)' % (axis, ndim))
    fixed_shape = [dim.value for n, dim in enumerate(values_shape) if n != axis]
    if any(value is None for value in fixed_shape):
      raise ValueError('all dimensions of `values` other than the dimension to '
                       'concatenate along must have statically known size')
    # We move `axis` to the front of the internal array so assign ops can be
    # applied to contiguous slices
    init_size = 0 if max_size is None else max_size
    init_shape = [init_size] + fixed_shape
    array = metrics_impl.metric_variable(
        init_shape, values.dtype, validate_shape=False, name='array')
    size = metrics_impl.metric_variable([], dtypes.int32, name='size')
    perm = [0 if n == axis else n + 1 if n < axis else n for n in range(ndim)]
    valid_array = array[:size]
    valid_array.set_shape([None] + fixed_shape)
    value = array_ops.transpose(valid_array, perm, name='concat')
    values_size = array_ops.shape(values)[axis]
    if max_size is None:
      batch_size = values_size
    else:
      batch_size = math_ops.minimum(values_size, max_size - size)
    perm = [axis] + [n for n in range(ndim) if n != axis]
    batch_values = array_ops.transpose(values, perm)[:batch_size]
    def reallocate():
      next_size = _next_array_size(new_size)
      next_shape = array_ops.stack([next_size] + fixed_shape)
      new_value = array_ops.zeros(next_shape, dtype=values.dtype)
      old_value = array.value()
      assign_op = state_ops.assign(array, new_value, validate_shape=False)
      with ops.control_dependencies([assign_op]):
        copy_op = array[:size].assign(old_value[:size])
      # return value needs to be the same dtype as no_op() for cond
      with ops.control_dependencies([copy_op]):
        return control_flow_ops.no_op()
    new_size = size + batch_size
    array_size = array_ops.shape_internal(array, optimize=False)[0]
    maybe_reallocate_op = control_flow_ops.cond(
        new_size > array_size, reallocate, control_flow_ops.no_op)
    with ops.control_dependencies([maybe_reallocate_op]):
      append_values_op = array[size:new_size].assign(batch_values)
    with ops.control_dependencies([append_values_op]):
      update_op = size.assign(new_size)
    if metrics_collections:
      ops.add_to_collections(metrics_collections, value)
    if updates_collections:
      ops.add_to_collections(updates_collections, update_op)
    return value, update_op
    # pylint: enable=invalid-slice-index
def aggregate_metrics(*value_update_tuples):
  """Aggregates the metric value tensors and update ops into two lists.
  Args:
    *value_update_tuples: a variable number of tuples, each of which contain the
      pair of (value_tensor, update_op) from a streaming metric.
  Returns:
    A list of value `Tensor` objects and a list of update ops.
  Raises:
    ValueError: if `value_update_tuples` is empty.
  """
  if not value_update_tuples:
    raise ValueError('Expected at least one value_tensor/update_op pair')
  value_ops, update_ops = zip(*value_update_tuples)
  return list(value_ops), list(update_ops)
def aggregate_metric_map(names_to_tuples):
  """Aggregates the metric names to tuple dictionary.
  This function is useful for pairing metric names with their associated value
  and update ops when the list of metrics is long. For example:
  ```python
    metrics_to_values, metrics_to_updates = slim.metrics.aggregate_metric_map({
        'Mean Absolute Error': new_slim.metrics.streaming_mean_absolute_error(
            predictions, labels, weights),
        'Mean Relative Error': new_slim.metrics.streaming_mean_relative_error(
            predictions, labels, labels, weights),
        'RMSE Linear': new_slim.metrics.streaming_root_mean_squared_error(
            predictions, labels, weights),
        'RMSE Log': new_slim.metrics.streaming_root_mean_squared_error(
            predictions, labels, weights),
    })
  ```
  Args:
    names_to_tuples: a map of metric names to tuples, each of which contain the
      pair of (value_tensor, update_op) from a streaming metric.
  Returns:
    A dictionary from metric names to value ops and a dictionary from metric
    names to update ops.
  """
  metric_names = names_to_tuples.keys()
  value_ops, update_ops = zip(*names_to_tuples.values())
  return dict(zip(metric_names, value_ops)), dict(zip(metric_names, update_ops))
def count(values,
          weights=None,
          metrics_collections=None,
          updates_collections=None,
          name=None):
  """Computes the number of examples, or sum of `weights`.
  This metric keeps track of the denominator in `tf.metrics.mean`.
  When evaluating some metric (e.g. mean) on one or more subsets of the data,
  this auxiliary metric is useful for keeping track of how many examples there
  are in each subset.
  If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
  Args:
    values: A `Tensor` of arbitrary dimensions. Only it's shape is used.
    weights: Optional `Tensor` whose rank is either 0, or the same rank as
      `labels`, and must be broadcastable to `labels` (i.e., all dimensions
      must be either `1`, or the same as the corresponding `labels`
      dimension).
    metrics_collections: An optional list of collections that the metric
      value variable should be added to.
    updates_collections: An optional list of collections that the metric update
      ops should be added to.
    name: An optional variable_scope name.
  Returns:
    count: A `Tensor` representing the current value of the metric.
    update_op: An operation that accumulates the metric from a batch of data.
  Raises:
    ValueError: If `weights` is not `None` and its shape doesn't match `values`,
      or if either `metrics_collections` or `updates_collections` are not a list
      or tuple.
    RuntimeError: If eager execution is enabled.
  """
  if context.executing_eagerly():
    raise RuntimeError('tf.contrib.metrics.count is not supported when eager '
                       'execution is enabled.')
  with variable_scope.variable_scope(name, 'count', (values, weights)):
    count_ = metrics_impl.metric_variable([], dtypes.float32, name='count')
    if weights is None:
      num_values = math_ops.to_float(array_ops.size(values))
    else:
      values = math_ops.to_float(values)
      values, _, weights = metrics_impl._remove_squeezable_dimensions(  # pylint: disable=protected-access
          predictions=values,
          labels=None,
          weights=weights)
      weights = weights_broadcast_ops.broadcast_weights(
          math_ops.to_float(weights), values)
      num_values = math_ops.reduce_sum(weights)
    with ops.control_dependencies([values]):
      update_count_op = state_ops.assign_add(count_, num_values)
    count_ = metrics_impl._aggregate_variable(count_, metrics_collections)  # pylint: disable=protected-access
    if updates_collections:
      ops.add_to_collections(updates_collections, update_count_op)
    return count_, update_count_op
def cohen_kappa(labels,
                predictions_idx,
                num_classes,
                weights=None,
                metrics_collections=None,
                updates_collections=None,
                name=None):
  """Calculates Cohen's kappa.
  [Cohen's kappa](https://en.wikipedia.org/wiki/Cohen's_kappa) is a statistic
  that measures inter-annotator agreement.
  The `cohen_kappa` function calculates the confusion matrix, and creates three
  local variables to compute the Cohen's kappa: `po`, `pe_row`, and `pe_col`,
  which refer to the diagonal part, rows and columns totals of the confusion
  matrix, respectively. This value is ultimately returned as `kappa`, an
  idempotent operation that is calculated by
      pe = (pe_row * pe_col) / N
      k = (sum(po) - sum(pe)) / (N - sum(pe))
  For estimation of the metric over a stream of data, the function creates an
  `update_op` operation that updates these variables and returns the
  `kappa`. `update_op` weights each prediction by the corresponding value in
  `weights`.
  Class labels are expected to start at 0. E.g., if `num_classes`
  was three, then the possible labels would be [0, 1, 2].
  If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
  NOTE: Equivalent to `sklearn.metrics.cohen_kappa_score`, but the method
  doesn't support weighted matrix yet.
  Args:
    labels: 1-D `Tensor` of real labels for the classification task. Must be
      one of the following types: int16, int32, int64.
    predictions_idx: 1-D `Tensor` of predicted class indices for a given
      classification. Must have the same type as `labels`.
    num_classes: The possible number of labels.
    weights: Optional `Tensor` whose shape matches `predictions`.
    metrics_collections: An optional list of collections that `kappa` should
      be added to.
    updates_collections: An optional list of collections that `update_op` should
      be added to.
    name: An optional variable_scope name.
  Returns:
    kappa: Scalar float `Tensor` representing the current Cohen's kappa.
    update_op: `Operation` that increments `po`, `pe_row` and `pe_col`
      variables appropriately and whose value matches `kappa`.
  Raises:
    ValueError: If `num_classes` is less than 2, or `predictions` and `labels`
      have mismatched shapes, or if `weights` is not `None` and its shape
      doesn't match `predictions`, or if either `metrics_collections` or
      `updates_collections` are not a list or tuple.
    RuntimeError: If eager execution is enabled.
  """
  if context.executing_eagerly():
    raise RuntimeError('tf.contrib.metrics.cohen_kappa is not supported '
                       'when eager execution is enabled.')
  if num_classes < 2:
    raise ValueError('`num_classes` must be >= 2.'
                     'Found: {}'.format(num_classes))
  with variable_scope.variable_scope(name, 'cohen_kappa',
                                     (labels, predictions_idx, weights)):
    # Convert 2-dim (num, 1) to 1-dim (num,)
    labels.get_shape().with_rank_at_most(2)
    if labels.get_shape().ndims == 2:
      labels = array_ops.squeeze(labels, axis=[-1])
    predictions_idx, labels, weights = (
        metrics_impl._remove_squeezable_dimensions(  # pylint: disable=protected-access
            predictions=predictions_idx,
            labels=labels,
            weights=weights))
    predictions_idx.get_shape().assert_is_compatible_with(labels.get_shape())
    stat_dtype = (
        dtypes.int64
        if weights is None or weights.dtype.is_integer else dtypes.float32)
    po = metrics_impl.metric_variable((num_classes,), stat_dtype, name='po')
    pe_row = metrics_impl.metric_variable(
        (num_classes,), stat_dtype, name='pe_row')
    pe_col = metrics_impl.metric_variable(
        (num_classes,), stat_dtype, name='pe_col')
    # Table of the counts of agreement:
    counts_in_table = confusion_matrix.confusion_matrix(
        labels,
        predictions_idx,
        num_classes=num_classes,
        weights=weights,
        dtype=stat_dtype,
        name='counts_in_table')
    po_t = array_ops.diag_part(counts_in_table)
    pe_row_t = math_ops.reduce_sum(counts_in_table, axis=0)
    pe_col_t = math_ops.reduce_sum(counts_in_table, axis=1)
    update_po = state_ops.assign_add(po, po_t)
    update_pe_row = state_ops.assign_add(pe_row, pe_row_t)
    update_pe_col = state_ops.assign_add(pe_col, pe_col_t)
    def _calculate_k(po, pe_row, pe_col, name):
      po_sum = math_ops.reduce_sum(po)
      total = math_ops.reduce_sum(pe_row)
      pe_sum = math_ops.reduce_sum(
          metrics_impl._safe_div(  # pylint: disable=protected-access
              pe_row * pe_col, total, None))
      po_sum, pe_sum, total = (math_ops.to_double(po_sum),
                               math_ops.to_double(pe_sum),
                               math_ops.to_double(total))
      # kappa = (po - pe) / (N - pe)
      k = metrics_impl._safe_scalar_div(  # pylint: disable=protected-access
          po_sum - pe_sum,
          total - pe_sum,
          name=name)
      return k
    kappa = _calculate_k(po, pe_row, pe_col, name='value')
    update_op = _calculate_k(
        update_po, update_pe_row, update_pe_col, name='update_op')
    if metrics_collections:
      ops.add_to_collections(metrics_collections, kappa)
    if updates_collections:
      ops.add_to_collections(updates_collections, update_op)
    return kappa, update_op
__all__ = [
    'auc_with_confidence_intervals',
    'aggregate_metric_map',
    'aggregate_metrics',
    'cohen_kappa',
    'count',
    'precision_recall_at_equal_thresholds',
    'recall_at_precision',
    'sparse_recall_at_top_k',
    'streaming_accuracy',
    'streaming_auc',
    'streaming_curve_points',
    'streaming_dynamic_auc',
    'streaming_false_negative_rate',
    'streaming_false_negative_rate_at_thresholds',
    'streaming_false_negatives',
    'streaming_false_negatives_at_thresholds',
    'streaming_false_positive_rate',
    'streaming_false_positive_rate_at_thresholds',
    'streaming_false_positives',
    'streaming_false_positives_at_thresholds',
    'streaming_mean',
    'streaming_mean_absolute_error',
    'streaming_mean_cosine_distance',
    'streaming_mean_iou',
    'streaming_mean_relative_error',
    'streaming_mean_squared_error',
    'streaming_mean_tensor',
    'streaming_percentage_less',
    'streaming_precision',
    'streaming_precision_at_thresholds',
    'streaming_recall',
    'streaming_recall_at_k',
    'streaming_recall_at_thresholds',
    'streaming_root_mean_squared_error',
    'streaming_sensitivity_at_specificity',
    'streaming_sparse_average_precision_at_k',
    'streaming_sparse_average_precision_at_top_k',
    'streaming_sparse_precision_at_k',
    'streaming_sparse_precision_at_top_k',
    'streaming_sparse_recall_at_k',
    'streaming_specificity_at_sensitivity',
    'streaming_true_negatives',
    'streaming_true_negatives_at_thresholds',
    'streaming_true_positives',
    'streaming_true_positives_at_thresholds',
]
 | 
	apache-2.0 | 
| 
	wathen/PhD | 
	MHD/FEniCS/ShiftCurlCurl/CppGradient/Efficient/CurlCurlSecondOrder.py | 
	1 | 
	5726 | 
	import petsc4py, sys
petsc4py.init(sys.argv)
from petsc4py import PETSc
import os, inspect
from dolfin import *
import numpy
import ExactSol
import MatrixOperations as MO
import CheckPetsc4py as CP
import HiptmairPrecond
import HiptmairSetup
from timeit import default_timer as timer
m = 8
errL2b =numpy.zeros((m-1,1))
errCurlb =numpy.zeros((m-1,1))
l2border =  numpy.zeros((m-1,1))
Curlborder =numpy.zeros((m-1,1))
ItsSave = numpy.zeros((m-1,1))
DimSave = numpy.zeros((m-1,1))
TimeSave = numpy.zeros((m-1,1))
NN = numpy.zeros((m-1,1))
Curlgrad = numpy.zeros((m-1,1))
Massgrad = numpy.zeros((m-1,1))
Laplgrad = numpy.zeros((m-1,1))
dim =3
for xx in xrange(1,m):
    NN[xx-1] = xx+0
    nn = int(2**(NN[xx-1][0]))
    # nn = 1
    omega = 1
    if dim == 2:
        esh = UnitSquareMesh(int(nn),int(nn))
#        mesh =  RectangleMesh(0.0, 0.0, 1.0, 1.5, int(nn), int(nn), 'left')
        u0, p0, CurlCurl, gradPres, CurlMass = ExactSol.M2D(2,Show="yes", Mass = omega)
    else:
        mesh = UnitCubeMesh(int(nn),int(nn),int(nn))
        u0, p0, CurlCurl, gradPres, CurlMass = ExactSol.M3D(1,Show="yes", Mass = omega)
    order = 2
    parameters['reorder_dofs_serial'] = False
    Magnetic = FunctionSpace(mesh, "N1curl", order)
    Lagrange = FunctionSpace(mesh, "CG", order)
    parameters['reorder_dofs_serial'] = False
    DimSave[xx-1] = Magnetic.dim()
    print Magnetic.dim()
    parameters['linear_algebra_backend'] = 'uBLAS'
    # tic()
#    C, P = HiptmairSetup.HiptmairMatrixSetupBoundary(mesh, Magnetic.dim(), Lagrange.dim(),dim)
#    G, P = HiptmairSetup.HiptmairBCsetupBoundary(C,P,mesh)
    # endTimeB = toc()
    # print endTimeB
    print "\n"
    # tic()
    # C, P = HiptmairSetup.HiptmairMatrixSetup(mesh, Magnetic.dim(), Lagrange.dim())
    # G, P = HiptmairSetup.HiptmairBCsetup(C,P, mesh, [Magnetic,Lagrange])
    # endTime = toc()
    # print endTime
    # ataaa
    def boundary(x, on_boundary):
        return on_boundary
    bc = DirichletBC(Magnetic,u0, boundary)
    bcu = DirichletBC(Lagrange, Expression(("0.0")), boundary)
    (v) = TestFunction(Magnetic)
    (u) = TrialFunction(Magnetic)
    (p) = TrialFunction(Lagrange)
    (q) = TestFunction(Lagrange)
    a = inner(curl(u),curl(v))*dx + inner(u,v)*dx
    L1  = inner(v, CurlMass)*dx
    tic()
    Acurl,b = assemble_system(a,L1,bc, form_compiler_parameters={"eliminate_zeros": True})
    print "System assembled, time: ", toc()
    tic()
    A,b = CP.Assemble(Acurl,b)
    x = b.duplicate()
    print "PETSc system assembled, time: ", toc()
    MatVec = 'yes'
    if MatVec == "yes":
        tic()
        VecLagrange, kspMass, VectorLaplacian, ScalarLaplacian, B, BC = HiptmairSetup.HiptmairAnyOrder(Magnetic,Lagrange)
        # del b1, b2
        print "Hiptmair Laplacians BC assembled, time: ", toc()
        ksp = PETSc.KSP().create()
        ksp.setTolerances(1e-6)
        ksp.setType('cg')
        ksp.setOperators(A,A)
        pc = ksp.getPC()
        reshist = {}
        def monitor(ksp, its, rnorm):
                reshist[its] = rnorm
                print its, '         ', rnorm
        ksp.setMonitor(monitor)
        pc.setType(PETSc.PC.Type.PYTHON)
        kspVector, kspScalar, diag = HiptmairSetup.HiptmairKSPsetup(VectorLaplacian, ScalarLaplacian,A)
        del A, VectorLaplacian, ScalarLaplacian
        pc.setPythonContext(HiptmairPrecond.HiptmairApply([Magnetic,Lagrange,VecLagrange] ,B, kspMass, kspVector, kspScalar, diag, BC))
        scale = b.norm()
        b = b/scale
        tic()
        ksp.solve(b, x)
        TimeSave[xx-1] = toc()
        x = x*scale
        print ksp.its
        print TimeSave[xx-1]
        ItsSave[xx-1] = ksp.its
        print " \n\n\n\n"
    else:
       # tic()
        C, P = HiptmairSetup.HiptmairMatrixSetupBoundary(mesh, Magnetic.dim(), Lagrange.dim(),dim)
        G, P = HiptmairSetup.HiptmairBCsetupBoundary(C,P,mesh)
        # endTimeB = toc()
        # print endTimeB
        print "\n"
        tic()
        ScalarLaplacian, b1 = assemble_system(inner(grad(p),grad(q))*dx,inner(p0,q)*dx,bcu)
        VectorLaplacian, b2 = assemble_system(inner(grad(p),grad(q))*dx+inner(p,q)*dx,inner(p0,q)*dx,bcu)
        del b1, b2
        print "Hiptmair Laplacians BC assembled, time: ", toc()
        tic()
        VectorLaplacian = PETSc.Mat().createAIJ(size=VectorLaplacian.sparray().shape,csr=(VectorLaplacian.sparray().indptr, VectorLaplacian.sparray().indices, VectorLaplacian.sparray().data))
        ScalarLaplacian = PETSc.Mat().createAIJ(size=ScalarLaplacian.sparray().shape,csr=(ScalarLaplacian.sparray().indptr, ScalarLaplacian.sparray().indices, ScalarLaplacian.sparray().data))
        print "PETSc Laplacians assembled, time: ", toc()
        ksp = PETSc.KSP().create()
        ksp.setTolerances(1e-6)
        ksp.setType('cg')
        ksp.setOperators(A,A)
        pc = ksp.getPC()
        pc.setType(PETSc.PC.Type.PYTHON)
        kspVector, kspScalar, diag = HiptmairSetup.HiptmairKSPsetup(VectorLaplacian, ScalarLaplacian,A)
        del A, VectorLaplacian, ScalarLaplacian
        pc.setPythonContext(HiptmairPrecond.GSvector(G, P, kspVector, kspScalar, diag))
        scale = b.norm()
        b = b/scale
        tic()
        ksp.solve(b, x)
        TimeSave[xx-1] = toc()
        x = x*scale
        print ksp.its
        print TimeSave[xx-1]
        ItsSave[xx-1] = ksp.its
        print " \n\n\n\n"
import pandas as pd
print "\n\n\n"
ItsTitlesB = ["l","B DoF","Time","Iterations"]
ItsValuesB = numpy.concatenate((NN,DimSave,TimeSave,ItsSave),axis=1)
ItsTableB= pd.DataFrame(ItsValuesB, columns = ItsTitlesB)
pd.set_option('precision',5)
print ItsTableB.to_latex()
if m !=2:
    print numpy.abs((TimeSave[1:]/TimeSave[:-1]))/(2*dim)
 | 
	mit | 
| 
	robcarver17/pysystemtrade | 
	systems/provided/futures_chapter15/rules.py | 
	1 | 
	4311 | 
	"""
Trading rules for futures system
"""
from syscore.dateutils import ROOT_BDAYS_INYEAR
import pandas as pd
from sysquant.estimators.vol import robust_vol_calc
def ewmac(price, vol, Lfast, Lslow):
    """
    Calculate the ewmac trading rule forecast, given a price and EWMA speeds Lfast, Lslow and vol_lookback
    Assumes that 'price' and vol is daily data
    This version uses a precalculated price volatility, and does not do capping or scaling
    :param price: The price or other series to use (assumed Tx1)
    :type price: pd.Series
    :param vol: The daily price unit volatility (NOT % vol)
    :type vol: pd.Series aligned to price
    :param Lfast: Lookback for fast in days
    :type Lfast: int
    :param Lslow: Lookback for slow in days
    :type Lslow: int
    :returns: pd.DataFrame -- unscaled, uncapped forecast
    >>> from systems.tests.testdata import get_test_object_futures
    >>> from systems.basesystem import System
    >>> (rawdata, data, config)=get_test_object_futures()
    >>> system=System( [rawdata], data, config)
    >>>
    >>> ewmac(rawdata.get_daily_prices("EDOLLAR"), rawdata.daily_returns_volatility("EDOLLAR"), 64, 256).tail(2)
    2015-12-10    5.327019
    2015-12-11    4.927339
    Freq: B, dtype: float64
    """
    # price: This is the stitched price series
    # We can't use the price of the contract we're trading, or the volatility will be jumpy
    # And we'll miss out on the rolldown. See
    # https://qoppac.blogspot.com/2015/05/systems-building-futures-rolling.html
    # We don't need to calculate the decay parameter, just use the span
    # directly
    fast_ewma = price.ewm(span=Lfast).mean()
    slow_ewma = price.ewm(span=Lslow).mean()
    raw_ewmac = fast_ewma - slow_ewma
    return raw_ewmac / vol.ffill()
def ewmac_calc_vol(price, Lfast, Lslow, vol_days=35):
    """
    Calculate the ewmac trading rule forecast, given a price and EWMA speeds Lfast, Lslow and vol_lookback
    Assumes that 'price' and vol is daily data
    This version recalculates the price volatility, and does not do capping or scaling
    :param price: The price or other series to use (assumed Tx1)
    :type price: pd.Series
    :param Lfast: Lookback for fast in days
    :type Lfast: int
    :param Lslow: Lookback for slow in days
    :type Lslow: int
    :returns: pd.DataFrame -- unscaled, uncapped forecast
    >>> from systems.tests.testdata import get_test_object_futures
    >>> from systems.basesystem import System
    >>> (rawdata, data, config)=get_test_object_futures()
    >>> system=System( [rawdata], data, config)
    >>>
    >>> ewmac(rawdata.get_daily_prices("EDOLLAR"), rawdata.daily_returns_volatility("EDOLLAR"), 64, 256).tail(2)
    2015-12-10    5.327019
    2015-12-11    4.927339
    Freq: B, dtype: float64
    """
    # price: This is the stitched price series
    # We can't use the price of the contract we're trading, or the volatility will be jumpy
    # And we'll miss out on the rolldown. See
    # https://qoppac.blogspot.com/2015/05/systems-building-futures-rolling.html
    # We don't need to calculate the decay parameter, just use the span
    # directly
    fast_ewma = price.ewm(span=Lfast).mean()
    slow_ewma = price.ewm(span=Lslow).mean()
    raw_ewmac = fast_ewma - slow_ewma
    vol = robust_vol_calc(price, vol_days)
    return raw_ewmac / vol.ffill()
def carry(daily_ann_roll, vol, smooth_days=90):
    """
    Old carry rule
    """
    raise Exception("DEPRECATED: USE carry2")
def carry2(raw_carry, smooth_days=90):
    """
    Calculate carry forecast, given that there exists a raw_carry() in rawdata
    Assumes that everything is daily data
    :param raw_carry: The annualised sharpe ratio of rolldown
    :type raw_carry: pd.DataFrame (assumed Tx1)
    >>> from systems.tests.testdata import get_test_object_futures
    >>> from systems.basesystem import System
    >>> (rawdata, data, config)=get_test_object_futures()
    >>> system=System( [rawdata], data, config)
    >>>
    >>> carry2(rawdata.raw_carry("EDOLLAR")).tail(2)
    2015-12-10    0.411686
    2015-12-11    0.411686
    Freq: B, dtype: float64
    """
    smooth_carry = raw_carry.ewm(smooth_days).mean()
    return smooth_carry
if __name__ == "__main__":
    import doctest
    doctest.testmod()
 | 
	gpl-3.0 | 
| 
	q1ang/scikit-learn | 
	sklearn/neighbors/tests/test_kd_tree.py | 
	159 | 
	7852 | 
	import numpy as np
from numpy.testing import assert_array_almost_equal
from sklearn.neighbors.kd_tree import (KDTree, NeighborsHeap,
                                       simultaneous_sort, kernel_norm,
                                       nodeheap_sort, DTYPE, ITYPE)
from sklearn.neighbors.dist_metrics import DistanceMetric
from sklearn.utils.testing import SkipTest, assert_allclose
V = np.random.random((3, 3))
V = np.dot(V, V.T)
DIMENSION = 3
METRICS = {'euclidean': {},
           'manhattan': {},
           'chebyshev': {},
           'minkowski': dict(p=3)}
def brute_force_neighbors(X, Y, k, metric, **kwargs):
    D = DistanceMetric.get_metric(metric, **kwargs).pairwise(Y, X)
    ind = np.argsort(D, axis=1)[:, :k]
    dist = D[np.arange(Y.shape[0])[:, None], ind]
    return dist, ind
def test_kd_tree_query():
    np.random.seed(0)
    X = np.random.random((40, DIMENSION))
    Y = np.random.random((10, DIMENSION))
    def check_neighbors(dualtree, breadth_first, k, metric, kwargs):
        kdt = KDTree(X, leaf_size=1, metric=metric, **kwargs)
        dist1, ind1 = kdt.query(Y, k, dualtree=dualtree,
                                breadth_first=breadth_first)
        dist2, ind2 = brute_force_neighbors(X, Y, k, metric, **kwargs)
        # don't check indices here: if there are any duplicate distances,
        # the indices may not match.  Distances should not have this problem.
        assert_array_almost_equal(dist1, dist2)
    for (metric, kwargs) in METRICS.items():
        for k in (1, 3, 5):
            for dualtree in (True, False):
                for breadth_first in (True, False):
                    yield (check_neighbors,
                           dualtree, breadth_first,
                           k, metric, kwargs)
def test_kd_tree_query_radius(n_samples=100, n_features=10):
    np.random.seed(0)
    X = 2 * np.random.random(size=(n_samples, n_features)) - 1
    query_pt = np.zeros(n_features, dtype=float)
    eps = 1E-15  # roundoff error can cause test to fail
    kdt = KDTree(X, leaf_size=5)
    rad = np.sqrt(((X - query_pt) ** 2).sum(1))
    for r in np.linspace(rad[0], rad[-1], 100):
        ind = kdt.query_radius([query_pt], r + eps)[0]
        i = np.where(rad <= r + eps)[0]
        ind.sort()
        i.sort()
        assert_array_almost_equal(i, ind)
def test_kd_tree_query_radius_distance(n_samples=100, n_features=10):
    np.random.seed(0)
    X = 2 * np.random.random(size=(n_samples, n_features)) - 1
    query_pt = np.zeros(n_features, dtype=float)
    eps = 1E-15  # roundoff error can cause test to fail
    kdt = KDTree(X, leaf_size=5)
    rad = np.sqrt(((X - query_pt) ** 2).sum(1))
    for r in np.linspace(rad[0], rad[-1], 100):
        ind, dist = kdt.query_radius([query_pt], r + eps, return_distance=True)
        ind = ind[0]
        dist = dist[0]
        d = np.sqrt(((query_pt - X[ind]) ** 2).sum(1))
        assert_array_almost_equal(d, dist)
def compute_kernel_slow(Y, X, kernel, h):
    d = np.sqrt(((Y[:, None, :] - X) ** 2).sum(-1))
    norm = kernel_norm(h, X.shape[1], kernel)
    if kernel == 'gaussian':
        return norm * np.exp(-0.5 * (d * d) / (h * h)).sum(-1)
    elif kernel == 'tophat':
        return norm * (d < h).sum(-1)
    elif kernel == 'epanechnikov':
        return norm * ((1.0 - (d * d) / (h * h)) * (d < h)).sum(-1)
    elif kernel == 'exponential':
        return norm * (np.exp(-d / h)).sum(-1)
    elif kernel == 'linear':
        return norm * ((1 - d / h) * (d < h)).sum(-1)
    elif kernel == 'cosine':
        return norm * (np.cos(0.5 * np.pi * d / h) * (d < h)).sum(-1)
    else:
        raise ValueError('kernel not recognized')
def test_kd_tree_kde(n_samples=100, n_features=3):
    np.random.seed(0)
    X = np.random.random((n_samples, n_features))
    Y = np.random.random((n_samples, n_features))
    kdt = KDTree(X, leaf_size=10)
    for kernel in ['gaussian', 'tophat', 'epanechnikov',
                   'exponential', 'linear', 'cosine']:
        for h in [0.01, 0.1, 1]:
            dens_true = compute_kernel_slow(Y, X, kernel, h)
            def check_results(kernel, h, atol, rtol, breadth_first):
                dens = kdt.kernel_density(Y, h, atol=atol, rtol=rtol,
                                          kernel=kernel,
                                          breadth_first=breadth_first)
                assert_allclose(dens, dens_true, atol=atol,
                                rtol=max(rtol, 1e-7))
            for rtol in [0, 1E-5]:
                for atol in [1E-6, 1E-2]:
                    for breadth_first in (True, False):
                        yield (check_results, kernel, h, atol, rtol,
                               breadth_first)
def test_gaussian_kde(n_samples=1000):
    # Compare gaussian KDE results to scipy.stats.gaussian_kde
    from scipy.stats import gaussian_kde
    np.random.seed(0)
    x_in = np.random.normal(0, 1, n_samples)
    x_out = np.linspace(-5, 5, 30)
    for h in [0.01, 0.1, 1]:
        kdt = KDTree(x_in[:, None])
        try:
            gkde = gaussian_kde(x_in, bw_method=h / np.std(x_in))
        except TypeError:
            raise SkipTest("Old scipy, does not accept explicit bandwidth.")
        dens_kdt = kdt.kernel_density(x_out[:, None], h) / n_samples
        dens_gkde = gkde.evaluate(x_out)
        assert_array_almost_equal(dens_kdt, dens_gkde, decimal=3)
def test_kd_tree_two_point(n_samples=100, n_features=3):
    np.random.seed(0)
    X = np.random.random((n_samples, n_features))
    Y = np.random.random((n_samples, n_features))
    r = np.linspace(0, 1, 10)
    kdt = KDTree(X, leaf_size=10)
    D = DistanceMetric.get_metric("euclidean").pairwise(Y, X)
    counts_true = [(D <= ri).sum() for ri in r]
    def check_two_point(r, dualtree):
        counts = kdt.two_point_correlation(Y, r=r, dualtree=dualtree)
        assert_array_almost_equal(counts, counts_true)
    for dualtree in (True, False):
        yield check_two_point, r, dualtree
def test_kd_tree_pickle():
    import pickle
    np.random.seed(0)
    X = np.random.random((10, 3))
    kdt1 = KDTree(X, leaf_size=1)
    ind1, dist1 = kdt1.query(X)
    def check_pickle_protocol(protocol):
        s = pickle.dumps(kdt1, protocol=protocol)
        kdt2 = pickle.loads(s)
        ind2, dist2 = kdt2.query(X)
        assert_array_almost_equal(ind1, ind2)
        assert_array_almost_equal(dist1, dist2)
    for protocol in (0, 1, 2):
        yield check_pickle_protocol, protocol
def test_neighbors_heap(n_pts=5, n_nbrs=10):
    heap = NeighborsHeap(n_pts, n_nbrs)
    for row in range(n_pts):
        d_in = np.random.random(2 * n_nbrs).astype(DTYPE)
        i_in = np.arange(2 * n_nbrs, dtype=ITYPE)
        for d, i in zip(d_in, i_in):
            heap.push(row, d, i)
        ind = np.argsort(d_in)
        d_in = d_in[ind]
        i_in = i_in[ind]
        d_heap, i_heap = heap.get_arrays(sort=True)
        assert_array_almost_equal(d_in[:n_nbrs], d_heap[row])
        assert_array_almost_equal(i_in[:n_nbrs], i_heap[row])
def test_node_heap(n_nodes=50):
    vals = np.random.random(n_nodes).astype(DTYPE)
    i1 = np.argsort(vals)
    vals2, i2 = nodeheap_sort(vals)
    assert_array_almost_equal(i1, i2)
    assert_array_almost_equal(vals[i1], vals2)
def test_simultaneous_sort(n_rows=10, n_pts=201):
    dist = np.random.random((n_rows, n_pts)).astype(DTYPE)
    ind = (np.arange(n_pts) + np.zeros((n_rows, 1))).astype(ITYPE)
    dist2 = dist.copy()
    ind2 = ind.copy()
    # simultaneous sort rows using function
    simultaneous_sort(dist, ind)
    # simultaneous sort rows using numpy
    i = np.argsort(dist2, axis=1)
    row_ind = np.arange(n_rows)[:, None]
    dist2 = dist2[row_ind, i]
    ind2 = ind2[row_ind, i]
    assert_array_almost_equal(dist, dist2)
    assert_array_almost_equal(ind, ind2)
 | 
	bsd-3-clause | 
| 
	bartosh/zipline | 
	tests/pipeline/test_downsampling.py | 
	4 | 
	24457 | 
	"""
Tests for Downsampled Filters/Factors/Classifiers
"""
import pandas as pd
from pandas.util.testing import assert_frame_equal
from zipline.pipeline import (
    Pipeline,
    CustomFactor,
    CustomFilter,
    CustomClassifier,
)
from zipline.pipeline.data.testing import TestingDataSet
from zipline.pipeline.factors import SimpleMovingAverage
from zipline.pipeline.filters.smoothing import All
from zipline.testing import ZiplineTestCase, parameter_space
from zipline.testing.fixtures import (
    WithTradingSessions,
    WithSeededRandomPipelineEngine,
)
from zipline.utils.input_validation import _qualified_name
from zipline.utils.numpy_utils import int64_dtype
class NDaysAgoFactor(CustomFactor):
    inputs = [TestingDataSet.float_col]
    def compute(self, today, assets, out, floats):
        out[:] = floats[0]
class NDaysAgoFilter(CustomFilter):
    inputs = [TestingDataSet.bool_col]
    def compute(self, today, assets, out, bools):
        out[:] = bools[0]
class NDaysAgoClassifier(CustomClassifier):
    inputs = [TestingDataSet.categorical_col]
    dtype = TestingDataSet.categorical_col.dtype
    def compute(self, today, assets, out, cats):
        out[:] = cats[0]
class ComputeExtraRowsTestcase(WithTradingSessions, ZiplineTestCase):
    DATA_MIN_DAY = pd.Timestamp('2012-06', tz='UTC')
    DATA_MAX_DAY = pd.Timestamp('2015', tz='UTC')
    TRADING_CALENDAR_STRS = ('NYSE',)
    # Test with different window_lengths to ensure that window length is not
    # used when calculating exra rows for the top-level term.
    factor1 = TestingDataSet.float_col.latest
    factor11 = NDaysAgoFactor(window_length=11)
    factor91 = NDaysAgoFactor(window_length=91)
    filter1 = TestingDataSet.bool_col.latest
    filter11 = NDaysAgoFilter(window_length=11)
    filter91 = NDaysAgoFilter(window_length=91)
    classifier1 = TestingDataSet.categorical_col.latest
    classifier11 = NDaysAgoClassifier(window_length=11)
    classifier91 = NDaysAgoClassifier(window_length=91)
    all_terms = [
        factor1,
        factor11,
        factor91,
        filter1,
        filter11,
        filter91,
        classifier1,
        classifier11,
        classifier91,
    ]
    @parameter_space(
        calendar_name=TRADING_CALENDAR_STRS,
        base_terms=[
            (factor1, factor11, factor91),
            (filter1, filter11, filter91),
            (classifier1, classifier11, classifier91),
        ],
        __fail_fast=True
    )
    def test_yearly(self, base_terms, calendar_name):
        downsampled_terms = tuple(
            t.downsample('year_start') for t in base_terms
        )
        all_terms = base_terms + downsampled_terms
        all_sessions = self.trading_sessions[calendar_name]
        end_session = all_sessions[-1]
        years = all_sessions.year
        sessions_in_2012 = all_sessions[years == 2012]
        sessions_in_2013 = all_sessions[years == 2013]
        sessions_in_2014 = all_sessions[years == 2014]
        # Simulate requesting computation where the unaltered lookback would
        # land exactly on the first date in 2014.  We shouldn't request any
        # additional rows for the regular terms or the downsampled terms.
        for i in range(0, 30, 5):
            start_session = sessions_in_2014[i]
            self.check_extra_row_calculations(
                all_terms,
                all_sessions,
                start_session,
                end_session,
                min_extra_rows=i,
                expected_extra_rows=i,
            )
        # Simulate requesting computation where the unaltered lookback would
        # land on the second date in 2014.  We should request one more extra
        # row in the downsampled terms to push us back to the first date in
        # 2014.
        for i in range(0, 30, 5):
            start_session = sessions_in_2014[i + 1]
            self.check_extra_row_calculations(
                downsampled_terms,
                all_sessions,
                start_session,
                end_session,
                min_extra_rows=i,
                expected_extra_rows=i + 1,
            )
            self.check_extra_row_calculations(
                base_terms,
                all_sessions,
                start_session,
                end_session,
                min_extra_rows=i,
                expected_extra_rows=i,
            )
        # Simulate requesting computation where the unaltered lookback would
        # land on the last date of 2013. The downsampled terms should request
        # enough extra rows to push us back to the start of 2013.
        for i in range(0, 30, 5):
            start_session = sessions_in_2014[i]
            self.check_extra_row_calculations(
                downsampled_terms,
                all_sessions,
                start_session,
                end_session,
                min_extra_rows=i + 1,
                expected_extra_rows=i + len(sessions_in_2013),
            )
            self.check_extra_row_calculations(
                base_terms,
                all_sessions,
                start_session,
                end_session,
                min_extra_rows=i + 1,
                expected_extra_rows=i + 1,
            )
        # Simulate requesting computation where the unaltered lookback would
        # land on the last date of 2012. The downsampled terms should request
        # enough extra rows to push us back to the first known date, which is
        # in the middle of 2012
        for i in range(0, 30, 5):
            start_session = sessions_in_2013[i]
            self.check_extra_row_calculations(
                downsampled_terms,
                all_sessions,
                start_session,
                end_session,
                min_extra_rows=i + 1,
                expected_extra_rows=i + len(sessions_in_2012),
            )
            self.check_extra_row_calculations(
                base_terms,
                all_sessions,
                start_session,
                end_session,
                min_extra_rows=i + 1,
                expected_extra_rows=i + 1,
            )
    @parameter_space(
        calendar_name=TRADING_CALENDAR_STRS,
        base_terms=[
            (factor1, factor11, factor91),
            (filter1, filter11, filter91),
            (classifier1, classifier11, classifier91),
        ],
        __fail_fast=True
    )
    def test_quarterly(self, calendar_name, base_terms):
        downsampled_terms = tuple(
            t.downsample('quarter_start') for t in base_terms
        )
        all_terms = base_terms + downsampled_terms
        # This region intersects with Q4 2013, Q1 2014, and Q2 2014.
        tmp = self.trading_sessions[calendar_name]
        all_sessions = tmp[tmp.slice_indexer('2013-12-15', '2014-04-30')]
        end_session = all_sessions[-1]
        months = all_sessions.month
        Q4_2013 = all_sessions[months == 12]
        Q1_2014 = all_sessions[(months == 1) | (months == 2) | (months == 3)]
        Q2_2014 = all_sessions[months == 4]
        # Simulate requesting computation where the unaltered lookback would
        # land exactly on the first date in Q2 2014.  We shouldn't request any
        # additional rows for the regular terms or the downsampled terms.
        for i in range(0, 15, 5):
            start_session = Q2_2014[i]
            self.check_extra_row_calculations(
                all_terms,
                all_sessions,
                start_session,
                end_session,
                min_extra_rows=i,
                expected_extra_rows=i,
            )
        # Simulate requesting computation where the unaltered lookback would
        # land exactly on the second date in Q2 2014.
        # The downsampled terms should request one more extra row.
        for i in range(0, 15, 5):
            start_session = Q2_2014[i + 1]
            self.check_extra_row_calculations(
                downsampled_terms,
                all_sessions,
                start_session,
                end_session,
                min_extra_rows=i,
                expected_extra_rows=i + 1,
            )
            self.check_extra_row_calculations(
                base_terms,
                all_sessions,
                start_session,
                end_session,
                min_extra_rows=i,
                expected_extra_rows=i,
            )
        # Simulate requesting computation where the unaltered lookback would
        # land exactly on the last date in Q1 2014.  The downsampled terms
        # should request enough extra rows to push us back to the first date of
        # Q1 2014.
        for i in range(0, 15, 5):
            start_session = Q2_2014[i]
            self.check_extra_row_calculations(
                downsampled_terms,
                all_sessions,
                start_session,
                end_session,
                min_extra_rows=i + 1,
                expected_extra_rows=i + len(Q1_2014),
            )
            self.check_extra_row_calculations(
                base_terms,
                all_sessions,
                start_session,
                end_session,
                min_extra_rows=i + 1,
                expected_extra_rows=i + 1,
            )
        # Simulate requesting computation where the unaltered lookback would
        # land exactly on the last date in Q4 2013.  The downsampled terms
        # should request enough extra rows to push us back to the first known
        # date, which is in the middle of december 2013.
        for i in range(0, 15, 5):
            start_session = Q1_2014[i]
            self.check_extra_row_calculations(
                downsampled_terms,
                all_sessions,
                start_session,
                end_session,
                min_extra_rows=i + 1,
                expected_extra_rows=i + len(Q4_2013),
            )
            self.check_extra_row_calculations(
                base_terms,
                all_sessions,
                start_session,
                end_session,
                min_extra_rows=i + 1,
                expected_extra_rows=i + 1,
            )
    @parameter_space(
        calendar_name=TRADING_CALENDAR_STRS,
        base_terms=[
            (factor1, factor11, factor91),
            (filter1, filter11, filter91),
            (classifier1, classifier11, classifier91),
        ],
        __fail_fast=True
    )
    def test_monthly(self, calendar_name, base_terms):
        downsampled_terms = tuple(
            t.downsample('month_start') for t in base_terms
        )
        all_terms = base_terms + downsampled_terms
        # This region intersects with Dec 2013, Jan 2014, and Feb 2014.
        tmp = self.trading_sessions[calendar_name]
        all_sessions = tmp[tmp.slice_indexer('2013-12-15', '2014-02-28')]
        end_session = all_sessions[-1]
        months = all_sessions.month
        dec2013 = all_sessions[months == 12]
        jan2014 = all_sessions[months == 1]
        feb2014 = all_sessions[months == 2]
        # Simulate requesting computation where the unaltered lookback would
        # land exactly on the first date in feb 2014.  We shouldn't request any
        # additional rows for the regular terms or the downsampled terms.
        for i in range(0, 10, 2):
            start_session = feb2014[i]
            self.check_extra_row_calculations(
                all_terms,
                all_sessions,
                start_session,
                end_session,
                min_extra_rows=i,
                expected_extra_rows=i,
            )
        # Simulate requesting computation where the unaltered lookback would
        # land on the second date in feb 2014.  We should request one more
        # extra row in the downsampled terms to push us back to the first date
        # in 2014.
        for i in range(0, 10, 2):
            start_session = feb2014[i + 1]
            self.check_extra_row_calculations(
                downsampled_terms,
                all_sessions,
                start_session,
                end_session,
                min_extra_rows=i,
                expected_extra_rows=i + 1,
            )
            self.check_extra_row_calculations(
                base_terms,
                all_sessions,
                start_session,
                end_session,
                min_extra_rows=i,
                expected_extra_rows=i,
            )
        # Simulate requesting computation where the unaltered lookback would
        # land on the last date of jan 2014. The downsampled terms should
        # request enough extra rows to push us back to the start of jan 2014.
        for i in range(0, 10, 2):
            start_session = feb2014[i]
            self.check_extra_row_calculations(
                downsampled_terms,
                all_sessions,
                start_session,
                end_session,
                min_extra_rows=i + 1,
                expected_extra_rows=i + len(jan2014),
            )
            self.check_extra_row_calculations(
                base_terms,
                all_sessions,
                start_session,
                end_session,
                min_extra_rows=i + 1,
                expected_extra_rows=i + 1,
            )
        # Simulate requesting computation where the unaltered lookback would
        # land on the last date of dec 2013. The downsampled terms should
        # request enough extra rows to push us back to the first known date,
        # which is in the middle of december 2013.
        for i in range(0, 10, 2):
            start_session = jan2014[i]
            self.check_extra_row_calculations(
                downsampled_terms,
                all_sessions,
                start_session,
                end_session,
                min_extra_rows=i + 1,
                expected_extra_rows=i + len(dec2013),
            )
            self.check_extra_row_calculations(
                base_terms,
                all_sessions,
                start_session,
                end_session,
                min_extra_rows=i + 1,
                expected_extra_rows=i + 1,
            )
    @parameter_space(
        calendar_name=TRADING_CALENDAR_STRS,
        base_terms=[
            (factor1, factor11, factor91),
            (filter1, filter11, filter91),
            (classifier1, classifier11, classifier91),
        ],
        __fail_fast=True
    )
    def test_weekly(self, calendar_name, base_terms):
        downsampled_terms = tuple(
            t.downsample('week_start') for t in base_terms
        )
        all_terms = base_terms + downsampled_terms
        #    December 2013
        # Mo Tu We Th Fr Sa Su
        #                    1
        #  2  3  4  5  6  7  8
        #  9 10 11 12 13 14 15
        # 16 17 18 19 20 21 22
        # 23 24 25 26 27 28 29
        # 30 31
        #     January 2014
        # Mo Tu We Th Fr Sa Su
        #        1  2  3  4  5
        #  6  7  8  9 10 11 12
        # 13 14 15 16 17 18 19
        # 20 21 22 23 24 25 26
        # 27 28 29 30 31
        # This region intersects with the last full week of 2013, the week
        # shared by 2013 and 2014, and the first full week of 2014.
        tmp = self.trading_sessions[calendar_name]
        all_sessions = tmp[tmp.slice_indexer('2013-12-27', '2014-01-12')]
        end_session = all_sessions[-1]
        week0 = all_sessions[
            all_sessions.slice_indexer('2013-12-27', '2013-12-29')
        ]
        week1 = all_sessions[
            all_sessions.slice_indexer('2013-12-30', '2014-01-05')
        ]
        week2 = all_sessions[
            all_sessions.slice_indexer('2014-01-06', '2014-01-12')
        ]
        # Simulate requesting computation where the unaltered lookback would
        # land exactly on the first date in week 2.  We shouldn't request any
        # additional rows for the regular terms or the downsampled terms.
        for i in range(3):
            start_session = week2[i]
            self.check_extra_row_calculations(
                all_terms,
                all_sessions,
                start_session,
                end_session,
                min_extra_rows=i,
                expected_extra_rows=i,
            )
        # Simulate requesting computation where the unaltered lookback would
        # land exactly on the second date in week 2.  The downsampled terms
        # should request one more extra row.
        for i in range(3):
            start_session = week2[i + 1]
            self.check_extra_row_calculations(
                downsampled_terms,
                all_sessions,
                start_session,
                end_session,
                min_extra_rows=i,
                expected_extra_rows=i + 1,
            )
            self.check_extra_row_calculations(
                base_terms,
                all_sessions,
                start_session,
                end_session,
                min_extra_rows=i,
                expected_extra_rows=i,
            )
        # Simulate requesting computation where the unaltered lookback would
        # land exactly on the last date in week 1.  The downsampled terms
        # should request enough extra rows to push us back to the first date of
        # week 1.
        for i in range(3):
            start_session = week2[i]
            self.check_extra_row_calculations(
                downsampled_terms,
                all_sessions,
                start_session,
                end_session,
                min_extra_rows=i + 1,
                expected_extra_rows=i + len(week1),
            )
            self.check_extra_row_calculations(
                base_terms,
                all_sessions,
                start_session,
                end_session,
                min_extra_rows=i + 1,
                expected_extra_rows=i + 1,
            )
        # Simulate requesting computation where the unaltered lookback would
        # land exactly on the last date in week0.  The downsampled terms
        # should request enough extra rows to push us back to the first known
        # date, which is in the middle of december 2013.
        for i in range(3):
            start_session = week1[i]
            self.check_extra_row_calculations(
                downsampled_terms,
                all_sessions,
                start_session,
                end_session,
                min_extra_rows=i + 1,
                expected_extra_rows=i + len(week0),
            )
            self.check_extra_row_calculations(
                base_terms,
                all_sessions,
                start_session,
                end_session,
                min_extra_rows=i + 1,
                expected_extra_rows=i + 1,
            )
    def check_extra_row_calculations(self,
                                     terms,
                                     all_sessions,
                                     start_session,
                                     end_session,
                                     min_extra_rows,
                                     expected_extra_rows):
        """
        Check that each term in ``terms`` computes an expected number of extra
        rows for the given parameters.
        """
        for term in terms:
            result = term.compute_extra_rows(
                all_sessions,
                start_session,
                end_session,
                min_extra_rows,
            )
            self.assertEqual(
                result,
                expected_extra_rows,
                "Expected {} extra_rows from {}, but got {}.".format(
                    expected_extra_rows,
                    term,
                    result,
                )
            )
class DownsampledPipelineTestCase(WithSeededRandomPipelineEngine,
                                  ZiplineTestCase):
    # Extend into the last few days of 2013 to test year/quarter boundaries.
    START_DATE = pd.Timestamp('2013-12-15', tz='UTC')
    # Extend into the first few days of 2015 to test year/quarter boundaries.
    END_DATE = pd.Timestamp('2015-01-06', tz='UTC')
    ASSET_FINDER_EQUITY_SIDS = tuple(range(10))
    def check_downsampled_term(self, term):
        #       June 2014
        # Mo Tu We Th Fr Sa Su
        #                    1
        #  2  3  4  5  6  7  8
        #  9 10 11 12 13 14 15
        # 16 17 18 19 20 21 22
        # 23 24 25 26 27 28 29
        # 30
        all_sessions = self.nyse_sessions
        compute_dates = all_sessions[
            all_sessions.slice_indexer('2014-06-05', '2015-01-06')
        ]
        start_date, end_date = compute_dates[[0, -1]]
        pipe = Pipeline({
            'year': term.downsample(frequency='year_start'),
            'quarter': term.downsample(frequency='quarter_start'),
            'month': term.downsample(frequency='month_start'),
            'week': term.downsample(frequency='week_start'),
        })
        # Raw values for term, computed each day from 2014 to the end of the
        # target period.
        raw_term_results = self.run_pipeline(
            Pipeline({'term': term}),
            start_date=pd.Timestamp('2014-01-02', tz='UTC'),
            end_date=pd.Timestamp('2015-01-06', tz='UTC'),
        )['term'].unstack()
        expected_results = {
            'year': (raw_term_results
                     .groupby(pd.TimeGrouper('AS'))
                     .first()
                     .reindex(compute_dates, method='ffill')),
            'quarter': (raw_term_results
                        .groupby(pd.TimeGrouper('QS'))
                        .first()
                        .reindex(compute_dates, method='ffill')),
            'month': (raw_term_results
                      .groupby(pd.TimeGrouper('MS'))
                      .first()
                      .reindex(compute_dates, method='ffill')),
            'week': (raw_term_results
                     .groupby(pd.TimeGrouper('W', label='left'))
                     .first()
                     .reindex(compute_dates, method='ffill')),
        }
        results = self.run_pipeline(pipe, start_date, end_date)
        for frequency in expected_results:
            result = results[frequency].unstack()
            expected = expected_results[frequency]
            assert_frame_equal(result, expected)
    def test_downsample_windowed_factor(self):
        self.check_downsampled_term(
            SimpleMovingAverage(
                inputs=[TestingDataSet.float_col],
                window_length=5,
            )
        )
    def test_downsample_non_windowed_factor(self):
        sma = SimpleMovingAverage(
            inputs=[TestingDataSet.float_col],
            window_length=5,
        )
        self.check_downsampled_term(((sma + sma) / 2).rank())
    def test_downsample_windowed_filter(self):
        sma = SimpleMovingAverage(
            inputs=[TestingDataSet.float_col],
            window_length=5,
        )
        self.check_downsampled_term(All(inputs=[sma.top(4)], window_length=5))
    def test_downsample_nonwindowed_filter(self):
        sma = SimpleMovingAverage(
            inputs=[TestingDataSet.float_col],
            window_length=5,
        )
        self.check_downsampled_term(sma > 5)
    def test_downsample_windowed_classifier(self):
        class IntSumClassifier(CustomClassifier):
            inputs = [TestingDataSet.float_col]
            window_length = 8
            dtype = int64_dtype
            missing_value = -1
            def compute(self, today, assets, out, floats):
                out[:] = floats.sum(axis=0).astype(int) % 4
        self.check_downsampled_term(IntSumClassifier())
    def test_downsample_nonwindowed_classifier(self):
        sma = SimpleMovingAverage(
            inputs=[TestingDataSet.float_col],
            window_length=5,
        )
        self.check_downsampled_term(sma.quantiles(5))
    def test_errors_on_bad_downsample_frequency(self):
        f = NDaysAgoFactor(window_length=3)
        with self.assertRaises(ValueError) as e:
            f.downsample('bad')
        expected = (
            "{}() expected a value in "
            "('month_start', 'quarter_start', 'week_start', 'year_start') "
            "for argument 'frequency', but got 'bad' instead."
        ).format(_qualified_name(f.downsample))
        self.assertEqual(str(e.exception), expected)
 | 
	apache-2.0 | 
| 
	Kongsea/tensorflow | 
	tensorflow/examples/learn/hdf5_classification.py | 
	75 | 
	2899 | 
	#  Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
#  Licensed under the Apache License, Version 2.0 (the "License");
#  you may not use this file except in compliance with the License.
#  You may obtain a copy of the License at
#
#   http://www.apache.org/licenses/LICENSE-2.0
#
#  Unless required by applicable law or agreed to in writing, software
#  distributed under the License is distributed on an "AS IS" BASIS,
#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#  See the License for the specific language governing permissions and
#  limitations under the License.
"""Example of DNNClassifier for Iris plant dataset, hdf5 format."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from sklearn import datasets
from sklearn import metrics
from sklearn import model_selection
import tensorflow as tf
import h5py  # pylint: disable=g-bad-import-order
X_FEATURE = 'x'  # Name of the input feature.
def main(unused_argv):
  # Load dataset.
  iris = datasets.load_iris()
  x_train, x_test, y_train, y_test = model_selection.train_test_split(
      iris.data, iris.target, test_size=0.2, random_state=42)
  # Note that we are saving and load iris data as h5 format as a simple
  # demonstration here.
  h5f = h5py.File('/tmp/test_hdf5.h5', 'w')
  h5f.create_dataset('X_train', data=x_train)
  h5f.create_dataset('X_test', data=x_test)
  h5f.create_dataset('y_train', data=y_train)
  h5f.create_dataset('y_test', data=y_test)
  h5f.close()
  h5f = h5py.File('/tmp/test_hdf5.h5', 'r')
  x_train = np.array(h5f['X_train'])
  x_test = np.array(h5f['X_test'])
  y_train = np.array(h5f['y_train'])
  y_test = np.array(h5f['y_test'])
  # Build 3 layer DNN with 10, 20, 10 units respectively.
  feature_columns = [
      tf.feature_column.numeric_column(
          X_FEATURE, shape=np.array(x_train).shape[1:])]
  classifier = tf.estimator.DNNClassifier(
      feature_columns=feature_columns, hidden_units=[10, 20, 10], n_classes=3)
  # Train.
  train_input_fn = tf.estimator.inputs.numpy_input_fn(
      x={X_FEATURE: x_train}, y=y_train, num_epochs=None, shuffle=True)
  classifier.train(input_fn=train_input_fn, steps=200)
  # Predict.
  test_input_fn = tf.estimator.inputs.numpy_input_fn(
      x={X_FEATURE: x_test}, y=y_test, num_epochs=1, shuffle=False)
  predictions = classifier.predict(input_fn=test_input_fn)
  y_predicted = np.array(list(p['class_ids'] for p in predictions))
  y_predicted = y_predicted.reshape(np.array(y_test).shape)
  # Score with sklearn.
  score = metrics.accuracy_score(y_test, y_predicted)
  print('Accuracy (sklearn): {0:f}'.format(score))
  # Score with tensorflow.
  scores = classifier.evaluate(input_fn=test_input_fn)
  print('Accuracy (tensorflow): {0:f}'.format(scores['accuracy']))
if __name__ == '__main__':
  tf.app.run()
 | 
	apache-2.0 | 
| 
	msmbuilder/msmbuilder | 
	msmbuilder/decomposition/kernel_approximation.py | 
	9 | 
	4210 | 
	# Author: Carlos Xavier Hernandez <[email protected]>
# Contributors: Muneeb Sultan <[email protected]>, Evan Feinberg <[email protected]>
# Copyright (c) 2015, Stanford University and the Authors
# All rights reserved.
from __future__ import absolute_import
import numpy as np
from scipy.linalg import svd
from sklearn import kernel_approximation
from sklearn.metrics.pairwise import pairwise_kernels
from .base import MultiSequenceDecompositionMixin
__all__ = ['Nystroem', 'LandmarkNystroem']
class Nystroem(MultiSequenceDecompositionMixin, kernel_approximation.Nystroem):
    __doc__ = kernel_approximation.Nystroem.__doc__
class LandmarkNystroem(Nystroem):
    """Approximate a kernel map using a subset of the training data.
    Constructs an approximate feature map for an arbitrary kernel
    using a subset of the data as basis.
    Read more in the :ref:`User Guide <nystroem_kernel_approx>`.
    Parameters
    ----------
    landmarks : ndarray of shape (n_frames, n_features)
        Custom landmark points for the Nyostroem approximation
    kernel : string or callable, default="rbf"
        Kernel map to be approximated. A callable should accept two arguments
        and the keyword arguments passed to this object as kernel_params, and
        should return a floating point number.
    n_components : int
        Number of features to construct.
        How many data points will be used to construct the mapping.
    gamma : float, default=None
        Gamma parameter for the RBF, polynomial, exponential chi2 and
        sigmoid kernels. Interpretation of the default value is left to
        the kernel; see the documentation for sklearn.metrics.pairwise.
        Ignored by other kernels.
    degree : float, default=3
        Degree of the polynomial kernel. Ignored by other kernels.
    coef0 : float, default=1
        Zero coefficient for polynomial and sigmoid kernels.
        Ignored by other kernels.
    kernel_params : mapping of string to any, optional
        Additional parameters (keyword arguments) for kernel function passed
        as callable object.
    random_state : {int, RandomState}, optional
        If int, random_state is the seed used by the random number generator;
        if RandomState instance, random_state is the random number generator.
    Attributes
    ----------
    components_ : array, shape (n_components, n_features)
        Subset of training points used to construct the feature map.
    component_indices_ : array, shape (n_components)
        Indices of ``components_`` in the training set.
    normalization_ : array, shape (n_components, n_components)
        Normalization matrix needed for embedding.
        Square root of the kernel matrix on ``components_``.
    References
    ----------
    .. [1] Williams, C.K.I. and Seeger, M.
       "Using the Nystroem method to speed up kernel machines",
       Advances in neural information processing systems 2001
    .. [2] T. Yang, Y. Li, M. Mahdavi, R. Jin and Z. Zhou
       "Nystroem Method vs Random Fourier Features: A Theoretical and Empirical
       Comparison",
       Advances in Neural Information Processing Systems 2012
    See also
    --------
    Nystroem : Approximate a kernel map using a subset of the training data.
    """
    def __init__(self, landmarks=None, **kwargs):
        if (landmarks is not None and
                not isinstance(landmarks, (int, np.ndarray))):
            raise ValueError('landmarks should be an int, ndarray, or None.')
        self.landmarks = landmarks
        super(LandmarkNystroem, self).__init__(**kwargs)
    def fit(self, sequences, y=None):
        if self.landmarks is not None:
            basis_kernel = pairwise_kernels(self.landmarks, metric=self.kernel,
                                            filter_params=True,
                                            **self._get_kernel_params())
            U, S, V = svd(basis_kernel)
            S = np.maximum(S, 1e-12)
            self.normalization_ = np.dot(U * 1. / np.sqrt(S), V)
            self.components_ = self.landmarks
            self.component_indices_ = None
            return self
        super(Nystroem, self).fit(sequences, y=y)
 | 
	lgpl-2.1 | 
| 
	fenglu-g/incubator-airflow | 
	airflow/hooks/presto_hook.py | 
	5 | 
	4772 | 
	# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements.  See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership.  The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License.  You may obtain a copy of the License at
#
#   http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied.  See the License for the
# specific language governing permissions and limitations
# under the License.
from builtins import str
from pyhive import presto
from pyhive.exc import DatabaseError
from requests.auth import HTTPBasicAuth
from airflow.hooks.dbapi_hook import DbApiHook
class PrestoException(Exception):
    pass
class PrestoHook(DbApiHook):
    """
    Interact with Presto through PyHive!
    >>> ph = PrestoHook()
    >>> sql = "SELECT count(1) AS num FROM airflow.static_babynames"
    >>> ph.get_records(sql)
    [[340698]]
    """
    conn_name_attr = 'presto_conn_id'
    default_conn_name = 'presto_default'
    def get_conn(self):
        """Returns a connection object"""
        db = self.get_connection(self.presto_conn_id)
        reqkwargs = None
        if db.password is not None:
            reqkwargs = {'auth': HTTPBasicAuth(db.login, db.password)}
        return presto.connect(
            host=db.host,
            port=db.port,
            username=db.login,
            source=db.extra_dejson.get('source', 'airflow'),
            protocol=db.extra_dejson.get('protocol', 'http'),
            catalog=db.extra_dejson.get('catalog', 'hive'),
            requests_kwargs=reqkwargs,
            schema=db.schema)
    @staticmethod
    def _strip_sql(sql):
        return sql.strip().rstrip(';')
    @staticmethod
    def _get_pretty_exception_message(e):
        """
        Parses some DatabaseError to provide a better error message
        """
        if (hasattr(e, 'message') and
            'errorName' in e.message and
                'message' in e.message):
            return ('{name}: {message}'.format(
                    name=e.message['errorName'],
                    message=e.message['message']))
        else:
            return str(e)
    def get_records(self, hql, parameters=None):
        """
        Get a set of records from Presto
        """
        try:
            return super(PrestoHook, self).get_records(
                self._strip_sql(hql), parameters)
        except DatabaseError as e:
            raise PrestoException(self._get_pretty_exception_message(e))
    def get_first(self, hql, parameters=None):
        """
        Returns only the first row, regardless of how many rows the query
        returns.
        """
        try:
            return super(PrestoHook, self).get_first(
                self._strip_sql(hql), parameters)
        except DatabaseError as e:
            raise PrestoException(self._get_pretty_exception_message(e))
    def get_pandas_df(self, hql, parameters=None):
        """
        Get a pandas dataframe from a sql query.
        """
        import pandas
        cursor = self.get_cursor()
        try:
            cursor.execute(self._strip_sql(hql), parameters)
            data = cursor.fetchall()
        except DatabaseError as e:
            raise PrestoException(self._get_pretty_exception_message(e))
        column_descriptions = cursor.description
        if data:
            df = pandas.DataFrame(data)
            df.columns = [c[0] for c in column_descriptions]
        else:
            df = pandas.DataFrame()
        return df
    def run(self, hql, parameters=None):
        """
        Execute the statement against Presto. Can be used to create views.
        """
        return super(PrestoHook, self).run(self._strip_sql(hql), parameters)
    # TODO Enable commit_every once PyHive supports transaction.
    # Unfortunately, PyHive 0.5.1 doesn't support transaction for now,
    # whereas Presto 0.132+ does.
    def insert_rows(self, table, rows, target_fields=None):
        """
        A generic way to insert a set of tuples into a table.
        :param table: Name of the target table
        :type table: str
        :param rows: The rows to insert into the table
        :type rows: iterable of tuples
        :param target_fields: The names of the columns to fill in the table
        :type target_fields: iterable of strings
        """
        super(PrestoHook, self).insert_rows(table, rows, target_fields, 0)
 | 
	apache-2.0 | 
| 
	Obus/scikit-learn | 
	examples/semi_supervised/plot_label_propagation_structure.py | 
	247 | 
	2432 | 
	"""
==============================================
Label Propagation learning a complex structure
==============================================
Example of LabelPropagation learning a complex internal structure
to demonstrate "manifold learning". The outer circle should be
labeled "red" and the inner circle "blue". Because both label groups
lie inside their own distinct shape, we can see that the labels
propagate correctly around the circle.
"""
print(__doc__)
# Authors: Clay Woolam <[email protected]>
#          Andreas Mueller <[email protected]>
# Licence: BSD
import numpy as np
import matplotlib.pyplot as plt
from sklearn.semi_supervised import label_propagation
from sklearn.datasets import make_circles
# generate ring with inner box
n_samples = 200
X, y = make_circles(n_samples=n_samples, shuffle=False)
outer, inner = 0, 1
labels = -np.ones(n_samples)
labels[0] = outer
labels[-1] = inner
###############################################################################
# Learn with LabelSpreading
label_spread = label_propagation.LabelSpreading(kernel='knn', alpha=1.0)
label_spread.fit(X, labels)
###############################################################################
# Plot output labels
output_labels = label_spread.transduction_
plt.figure(figsize=(8.5, 4))
plt.subplot(1, 2, 1)
plot_outer_labeled, = plt.plot(X[labels == outer, 0],
                               X[labels == outer, 1], 'rs')
plot_unlabeled, = plt.plot(X[labels == -1, 0], X[labels == -1, 1], 'g.')
plot_inner_labeled, = plt.plot(X[labels == inner, 0],
                               X[labels == inner, 1], 'bs')
plt.legend((plot_outer_labeled, plot_inner_labeled, plot_unlabeled),
           ('Outer Labeled', 'Inner Labeled', 'Unlabeled'), 'upper left',
           numpoints=1, shadow=False)
plt.title("Raw data (2 classes=red and blue)")
plt.subplot(1, 2, 2)
output_label_array = np.asarray(output_labels)
outer_numbers = np.where(output_label_array == outer)[0]
inner_numbers = np.where(output_label_array == inner)[0]
plot_outer, = plt.plot(X[outer_numbers, 0], X[outer_numbers, 1], 'rs')
plot_inner, = plt.plot(X[inner_numbers, 0], X[inner_numbers, 1], 'bs')
plt.legend((plot_outer, plot_inner), ('Outer Learned', 'Inner Learned'),
           'upper left', numpoints=1, shadow=False)
plt.title("Labels learned with Label Spreading (KNN)")
plt.subplots_adjust(left=0.07, bottom=0.07, right=0.93, top=0.92)
plt.show()
 | 
	bsd-3-clause | 
| 
	rexshihaoren/scikit-learn | 
	doc/sphinxext/gen_rst.py | 
	142 | 
	40026 | 
	"""
Example generation for the scikit learn
Generate the rst files for the examples by iterating over the python
example files.
Files that generate images should start with 'plot'
"""
from __future__ import division, print_function
from time import time
import ast
import os
import re
import shutil
import traceback
import glob
import sys
import gzip
import posixpath
import subprocess
import warnings
from sklearn.externals import six
# Try Python 2 first, otherwise load from Python 3
try:
    from StringIO import StringIO
    import cPickle as pickle
    import urllib2 as urllib
    from urllib2 import HTTPError, URLError
except ImportError:
    from io import StringIO
    import pickle
    import urllib.request
    import urllib.error
    import urllib.parse
    from urllib.error import HTTPError, URLError
try:
    # Python 2 built-in
    execfile
except NameError:
    def execfile(filename, global_vars=None, local_vars=None):
        with open(filename, encoding='utf-8') as f:
            code = compile(f.read(), filename, 'exec')
            exec(code, global_vars, local_vars)
try:
    basestring
except NameError:
    basestring = str
import token
import tokenize
import numpy as np
try:
    # make sure that the Agg backend is set before importing any
    # matplotlib
    import matplotlib
    matplotlib.use('Agg')
except ImportError:
    # this script can be imported by nosetest to find tests to run: we should not
    # impose the matplotlib requirement in that case.
    pass
from sklearn.externals import joblib
###############################################################################
# A tee object to redict streams to multiple outputs
class Tee(object):
    def __init__(self, file1, file2):
        self.file1 = file1
        self.file2 = file2
    def write(self, data):
        self.file1.write(data)
        self.file2.write(data)
    def flush(self):
        self.file1.flush()
        self.file2.flush()
###############################################################################
# Documentation link resolver objects
def _get_data(url):
    """Helper function to get data over http or from a local file"""
    if url.startswith('http://'):
        # Try Python 2, use Python 3 on exception
        try:
            resp = urllib.urlopen(url)
            encoding = resp.headers.dict.get('content-encoding', 'plain')
        except AttributeError:
            resp = urllib.request.urlopen(url)
            encoding = resp.headers.get('content-encoding', 'plain')
        data = resp.read()
        if encoding == 'plain':
            pass
        elif encoding == 'gzip':
            data = StringIO(data)
            data = gzip.GzipFile(fileobj=data).read()
        else:
            raise RuntimeError('unknown encoding')
    else:
        with open(url, 'r') as fid:
            data = fid.read()
        fid.close()
    return data
mem = joblib.Memory(cachedir='_build')
get_data = mem.cache(_get_data)
def parse_sphinx_searchindex(searchindex):
    """Parse a Sphinx search index
    Parameters
    ----------
    searchindex : str
        The Sphinx search index (contents of searchindex.js)
    Returns
    -------
    filenames : list of str
        The file names parsed from the search index.
    objects : dict
        The objects parsed from the search index.
    """
    def _select_block(str_in, start_tag, end_tag):
        """Select first block delimited by start_tag and end_tag"""
        start_pos = str_in.find(start_tag)
        if start_pos < 0:
            raise ValueError('start_tag not found')
        depth = 0
        for pos in range(start_pos, len(str_in)):
            if str_in[pos] == start_tag:
                depth += 1
            elif str_in[pos] == end_tag:
                depth -= 1
            if depth == 0:
                break
        sel = str_in[start_pos + 1:pos]
        return sel
    def _parse_dict_recursive(dict_str):
        """Parse a dictionary from the search index"""
        dict_out = dict()
        pos_last = 0
        pos = dict_str.find(':')
        while pos >= 0:
            key = dict_str[pos_last:pos]
            if dict_str[pos + 1] == '[':
                # value is a list
                pos_tmp = dict_str.find(']', pos + 1)
                if pos_tmp < 0:
                    raise RuntimeError('error when parsing dict')
                value = dict_str[pos + 2: pos_tmp].split(',')
                # try to convert elements to int
                for i in range(len(value)):
                    try:
                        value[i] = int(value[i])
                    except ValueError:
                        pass
            elif dict_str[pos + 1] == '{':
                # value is another dictionary
                subdict_str = _select_block(dict_str[pos:], '{', '}')
                value = _parse_dict_recursive(subdict_str)
                pos_tmp = pos + len(subdict_str)
            else:
                raise ValueError('error when parsing dict: unknown elem')
            key = key.strip('"')
            if len(key) > 0:
                dict_out[key] = value
            pos_last = dict_str.find(',', pos_tmp)
            if pos_last < 0:
                break
            pos_last += 1
            pos = dict_str.find(':', pos_last)
        return dict_out
    # Make sure searchindex uses UTF-8 encoding
    if hasattr(searchindex, 'decode'):
        searchindex = searchindex.decode('UTF-8')
    # parse objects
    query = 'objects:'
    pos = searchindex.find(query)
    if pos < 0:
        raise ValueError('"objects:" not found in search index')
    sel = _select_block(searchindex[pos:], '{', '}')
    objects = _parse_dict_recursive(sel)
    # parse filenames
    query = 'filenames:'
    pos = searchindex.find(query)
    if pos < 0:
        raise ValueError('"filenames:" not found in search index')
    filenames = searchindex[pos + len(query) + 1:]
    filenames = filenames[:filenames.find(']')]
    filenames = [f.strip('"') for f in filenames.split(',')]
    return filenames, objects
class SphinxDocLinkResolver(object):
    """ Resolve documentation links using searchindex.js generated by Sphinx
    Parameters
    ----------
    doc_url : str
        The base URL of the project website.
    searchindex : str
        Filename of searchindex, relative to doc_url.
    extra_modules_test : list of str
        List of extra module names to test.
    relative : bool
        Return relative links (only useful for links to documentation of this
        package).
    """
    def __init__(self, doc_url, searchindex='searchindex.js',
                 extra_modules_test=None, relative=False):
        self.doc_url = doc_url
        self.relative = relative
        self._link_cache = {}
        self.extra_modules_test = extra_modules_test
        self._page_cache = {}
        if doc_url.startswith('http://'):
            if relative:
                raise ValueError('Relative links are only supported for local '
                                 'URLs (doc_url cannot start with "http://)"')
            searchindex_url = doc_url + '/' + searchindex
        else:
            searchindex_url = os.path.join(doc_url, searchindex)
        # detect if we are using relative links on a Windows system
        if os.name.lower() == 'nt' and not doc_url.startswith('http://'):
            if not relative:
                raise ValueError('You have to use relative=True for the local'
                                 ' package on a Windows system.')
            self._is_windows = True
        else:
            self._is_windows = False
        # download and initialize the search index
        sindex = get_data(searchindex_url)
        filenames, objects = parse_sphinx_searchindex(sindex)
        self._searchindex = dict(filenames=filenames, objects=objects)
    def _get_link(self, cobj):
        """Get a valid link, False if not found"""
        fname_idx = None
        full_name = cobj['module_short'] + '.' + cobj['name']
        if full_name in self._searchindex['objects']:
            value = self._searchindex['objects'][full_name]
            if isinstance(value, dict):
                value = value[next(iter(value.keys()))]
            fname_idx = value[0]
        elif cobj['module_short'] in self._searchindex['objects']:
            value = self._searchindex['objects'][cobj['module_short']]
            if cobj['name'] in value.keys():
                fname_idx = value[cobj['name']][0]
        if fname_idx is not None:
            fname = self._searchindex['filenames'][fname_idx] + '.html'
            if self._is_windows:
                fname = fname.replace('/', '\\')
                link = os.path.join(self.doc_url, fname)
            else:
                link = posixpath.join(self.doc_url, fname)
            if hasattr(link, 'decode'):
                link = link.decode('utf-8', 'replace')
            if link in self._page_cache:
                html = self._page_cache[link]
            else:
                html = get_data(link)
                self._page_cache[link] = html
            # test if cobj appears in page
            comb_names = [cobj['module_short'] + '.' + cobj['name']]
            if self.extra_modules_test is not None:
                for mod in self.extra_modules_test:
                    comb_names.append(mod + '.' + cobj['name'])
            url = False
            if hasattr(html, 'decode'):
                # Decode bytes under Python 3
                html = html.decode('utf-8', 'replace')
            for comb_name in comb_names:
                if hasattr(comb_name, 'decode'):
                    # Decode bytes under Python 3
                    comb_name = comb_name.decode('utf-8', 'replace')
                if comb_name in html:
                    url = link + u'#' + comb_name
            link = url
        else:
            link = False
        return link
    def resolve(self, cobj, this_url):
        """Resolve the link to the documentation, returns None if not found
        Parameters
        ----------
        cobj : dict
            Dict with information about the "code object" for which we are
            resolving a link.
            cobi['name'] : function or class name (str)
            cobj['module_short'] : shortened module name (str)
            cobj['module'] : module name (str)
        this_url: str
            URL of the current page. Needed to construct relative URLs
            (only used if relative=True in constructor).
        Returns
        -------
        link : str | None
            The link (URL) to the documentation.
        """
        full_name = cobj['module_short'] + '.' + cobj['name']
        link = self._link_cache.get(full_name, None)
        if link is None:
            # we don't have it cached
            link = self._get_link(cobj)
            # cache it for the future
            self._link_cache[full_name] = link
        if link is False or link is None:
            # failed to resolve
            return None
        if self.relative:
            link = os.path.relpath(link, start=this_url)
            if self._is_windows:
                # replace '\' with '/' so it on the web
                link = link.replace('\\', '/')
            # for some reason, the relative link goes one directory too high up
            link = link[3:]
        return link
###############################################################################
rst_template = """
.. _example_%(short_fname)s:
%(docstring)s
**Python source code:** :download:`%(fname)s <%(fname)s>`
.. literalinclude:: %(fname)s
    :lines: %(end_row)s-
    """
plot_rst_template = """
.. _example_%(short_fname)s:
%(docstring)s
%(image_list)s
%(stdout)s
**Python source code:** :download:`%(fname)s <%(fname)s>`
.. literalinclude:: %(fname)s
    :lines: %(end_row)s-
**Total running time of the example:** %(time_elapsed) .2f seconds
(%(time_m) .0f minutes %(time_s) .2f seconds)
    """
# The following strings are used when we have several pictures: we use
# an html div tag that our CSS uses to turn the lists into horizontal
# lists.
HLIST_HEADER = """
.. rst-class:: horizontal
"""
HLIST_IMAGE_TEMPLATE = """
    *
      .. image:: images/%s
            :scale: 47
"""
SINGLE_IMAGE = """
.. image:: images/%s
    :align: center
"""
# The following dictionary contains the information used to create the
# thumbnails for the front page of the scikit-learn home page.
# key: first image in set
# values: (number of plot in set, height of thumbnail)
carousel_thumbs = {'plot_classifier_comparison_001.png': (1, 600),
                   'plot_outlier_detection_001.png': (3, 372),
                   'plot_gp_regression_001.png': (2, 250),
                   'plot_adaboost_twoclass_001.png': (1, 372),
                   'plot_compare_methods_001.png': (1, 349)}
def extract_docstring(filename, ignore_heading=False):
    """ Extract a module-level docstring, if any
    """
    if six.PY2:
        lines = open(filename).readlines()
    else:
        lines = open(filename, encoding='utf-8').readlines()
    start_row = 0
    if lines[0].startswith('#!'):
        lines.pop(0)
        start_row = 1
    docstring = ''
    first_par = ''
    line_iterator = iter(lines)
    tokens = tokenize.generate_tokens(lambda: next(line_iterator))
    for tok_type, tok_content, _, (erow, _), _ in tokens:
        tok_type = token.tok_name[tok_type]
        if tok_type in ('NEWLINE', 'COMMENT', 'NL', 'INDENT', 'DEDENT'):
            continue
        elif tok_type == 'STRING':
            docstring = eval(tok_content)
            # If the docstring is formatted with several paragraphs, extract
            # the first one:
            paragraphs = '\n'.join(
                line.rstrip() for line
                in docstring.split('\n')).split('\n\n')
            if paragraphs:
                if ignore_heading:
                    if len(paragraphs) > 1:
                        first_par = re.sub('\n', ' ', paragraphs[1])
                        first_par = ((first_par[:95] + '...')
                                     if len(first_par) > 95 else first_par)
                    else:
                        raise ValueError("Docstring not found by gallery.\n"
                                         "Please check the layout of your"
                                         " example file:\n {}\n and make sure"
                                         " it's correct".format(filename))
                else:
                    first_par = paragraphs[0]
        break
    return docstring, first_par, erow + 1 + start_row
def generate_example_rst(app):
    """ Generate the list of examples, as well as the contents of
        examples.
    """
    root_dir = os.path.join(app.builder.srcdir, 'auto_examples')
    example_dir = os.path.abspath(os.path.join(app.builder.srcdir, '..',
                                               'examples'))
    generated_dir = os.path.abspath(os.path.join(app.builder.srcdir,
                                                 'modules', 'generated'))
    try:
        plot_gallery = eval(app.builder.config.plot_gallery)
    except TypeError:
        plot_gallery = bool(app.builder.config.plot_gallery)
    if not os.path.exists(example_dir):
        os.makedirs(example_dir)
    if not os.path.exists(root_dir):
        os.makedirs(root_dir)
    if not os.path.exists(generated_dir):
        os.makedirs(generated_dir)
    # we create an index.rst with all examples
    fhindex = open(os.path.join(root_dir, 'index.rst'), 'w')
    # Note: The sidebar button has been removed from the examples page for now
    #      due to how it messes up the layout. Will be fixed at a later point
    fhindex.write("""\
.. raw:: html
    <style type="text/css">
    div#sidebarbutton {
        /* hide the sidebar collapser, while ensuring vertical arrangement */
        display: none;
    }
    </style>
.. _examples-index:
Examples
========
""")
    # Here we don't use an os.walk, but we recurse only twice: flat is
    # better than nested.
    seen_backrefs = set()
    generate_dir_rst('.', fhindex, example_dir, root_dir, plot_gallery, seen_backrefs)
    for directory in sorted(os.listdir(example_dir)):
        if os.path.isdir(os.path.join(example_dir, directory)):
            generate_dir_rst(directory, fhindex, example_dir, root_dir, plot_gallery, seen_backrefs)
    fhindex.flush()
def extract_line_count(filename, target_dir):
    # Extract the line count of a file
    example_file = os.path.join(target_dir, filename)
    if six.PY2:
        lines = open(example_file).readlines()
    else:
        lines = open(example_file, encoding='utf-8').readlines()
    start_row = 0
    if lines and lines[0].startswith('#!'):
        lines.pop(0)
        start_row = 1
    line_iterator = iter(lines)
    tokens = tokenize.generate_tokens(lambda: next(line_iterator))
    check_docstring = True
    erow_docstring = 0
    for tok_type, _, _, (erow, _), _ in tokens:
        tok_type = token.tok_name[tok_type]
        if tok_type in ('NEWLINE', 'COMMENT', 'NL', 'INDENT', 'DEDENT'):
            continue
        elif (tok_type == 'STRING') and check_docstring:
            erow_docstring = erow
            check_docstring = False
    return erow_docstring+1+start_row, erow+1+start_row
def line_count_sort(file_list, target_dir):
    # Sort the list of examples by line-count
    new_list = [x for x in file_list if x.endswith('.py')]
    unsorted = np.zeros(shape=(len(new_list), 2))
    unsorted = unsorted.astype(np.object)
    for count, exmpl in enumerate(new_list):
        docstr_lines, total_lines = extract_line_count(exmpl, target_dir)
        unsorted[count][1] = total_lines - docstr_lines
        unsorted[count][0] = exmpl
    index = np.lexsort((unsorted[:, 0].astype(np.str),
                        unsorted[:, 1].astype(np.float)))
    if not len(unsorted):
        return []
    return np.array(unsorted[index][:, 0]).tolist()
def _thumbnail_div(subdir, full_dir, fname, snippet):
    """Generates RST to place a thumbnail in a gallery"""
    thumb = os.path.join(full_dir, 'images', 'thumb', fname[:-3] + '.png')
    link_name = os.path.join(full_dir, fname).replace(os.path.sep, '_')
    ref_name = os.path.join(subdir, fname).replace(os.path.sep, '_')
    if ref_name.startswith('._'):
        ref_name = ref_name[2:]
    out = []
    out.append("""
.. raw:: html
    <div class="thumbnailContainer" tooltip="{}">
""".format(snippet))
    out.append('.. figure:: %s\n' % thumb)
    if link_name.startswith('._'):
        link_name = link_name[2:]
    if full_dir != '.':
        out.append('   :target: ./%s/%s.html\n\n' % (full_dir, fname[:-3]))
    else:
        out.append('   :target: ./%s.html\n\n' % link_name[:-3])
    out.append("""   :ref:`example_%s`
.. raw:: html
    </div>
""" % (ref_name))
    return ''.join(out)
def generate_dir_rst(directory, fhindex, example_dir, root_dir, plot_gallery, seen_backrefs):
    """ Generate the rst file for an example directory.
    """
    if not directory == '.':
        target_dir = os.path.join(root_dir, directory)
        src_dir = os.path.join(example_dir, directory)
    else:
        target_dir = root_dir
        src_dir = example_dir
    if not os.path.exists(os.path.join(src_dir, 'README.txt')):
        raise ValueError('Example directory %s does not have a README.txt' %
                         src_dir)
    fhindex.write("""
%s
""" % open(os.path.join(src_dir, 'README.txt')).read())
    if not os.path.exists(target_dir):
        os.makedirs(target_dir)
    sorted_listdir = line_count_sort(os.listdir(src_dir),
                                     src_dir)
    if not os.path.exists(os.path.join(directory, 'images', 'thumb')):
        os.makedirs(os.path.join(directory, 'images', 'thumb'))
    for fname in sorted_listdir:
        if fname.endswith('py'):
            backrefs = generate_file_rst(fname, target_dir, src_dir, root_dir, plot_gallery)
            new_fname = os.path.join(src_dir, fname)
            _, snippet, _ = extract_docstring(new_fname, True)
            fhindex.write(_thumbnail_div(directory, directory, fname, snippet))
            fhindex.write("""
.. toctree::
   :hidden:
   %s/%s
""" % (directory, fname[:-3]))
            for backref in backrefs:
                include_path = os.path.join(root_dir, '../modules/generated/%s.examples' % backref)
                seen = backref in seen_backrefs
                with open(include_path, 'a' if seen else 'w') as ex_file:
                    if not seen:
                        # heading
                        print(file=ex_file)
                        print('Examples using ``%s``' % backref, file=ex_file)
                        print('-----------------%s--' % ('-' * len(backref)),
                              file=ex_file)
                        print(file=ex_file)
                    rel_dir = os.path.join('../../auto_examples', directory)
                    ex_file.write(_thumbnail_div(directory, rel_dir, fname, snippet))
                    seen_backrefs.add(backref)
    fhindex.write("""
.. raw:: html
    <div class="clearer"></div>
    """)  # clear at the end of the section
# modules for which we embed links into example code
DOCMODULES = ['sklearn', 'matplotlib', 'numpy', 'scipy']
def make_thumbnail(in_fname, out_fname, width, height):
    """Make a thumbnail with the same aspect ratio centered in an
       image with a given width and height
    """
    # local import to avoid testing dependency on PIL:
    try:
        from PIL import Image
    except ImportError:
        import Image
    img = Image.open(in_fname)
    width_in, height_in = img.size
    scale_w = width / float(width_in)
    scale_h = height / float(height_in)
    if height_in * scale_w <= height:
        scale = scale_w
    else:
        scale = scale_h
    width_sc = int(round(scale * width_in))
    height_sc = int(round(scale * height_in))
    # resize the image
    img.thumbnail((width_sc, height_sc), Image.ANTIALIAS)
    # insert centered
    thumb = Image.new('RGB', (width, height), (255, 255, 255))
    pos_insert = ((width - width_sc) // 2, (height - height_sc) // 2)
    thumb.paste(img, pos_insert)
    thumb.save(out_fname)
    # Use optipng to perform lossless compression on the resized image if
    # software is installed
    if os.environ.get('SKLEARN_DOC_OPTIPNG', False):
        try:
            subprocess.call(["optipng", "-quiet", "-o", "9", out_fname])
        except Exception:
            warnings.warn('Install optipng to reduce the size of the generated images')
def get_short_module_name(module_name, obj_name):
    """ Get the shortest possible module name """
    parts = module_name.split('.')
    short_name = module_name
    for i in range(len(parts) - 1, 0, -1):
        short_name = '.'.join(parts[:i])
        try:
            exec('from %s import %s' % (short_name, obj_name))
        except ImportError:
            # get the last working module name
            short_name = '.'.join(parts[:(i + 1)])
            break
    return short_name
class NameFinder(ast.NodeVisitor):
    """Finds the longest form of variable names and their imports in code
    Only retains names from imported modules.
    """
    def __init__(self):
        super(NameFinder, self).__init__()
        self.imported_names = {}
        self.accessed_names = set()
    def visit_Import(self, node, prefix=''):
        for alias in node.names:
            local_name = alias.asname or alias.name
            self.imported_names[local_name] = prefix + alias.name
    def visit_ImportFrom(self, node):
        self.visit_Import(node, node.module + '.')
    def visit_Name(self, node):
        self.accessed_names.add(node.id)
    def visit_Attribute(self, node):
        attrs = []
        while isinstance(node, ast.Attribute):
            attrs.append(node.attr)
            node = node.value
        if isinstance(node, ast.Name):
            # This is a.b, not e.g. a().b
            attrs.append(node.id)
            self.accessed_names.add('.'.join(reversed(attrs)))
        else:
            # need to get a in a().b
            self.visit(node)
    def get_mapping(self):
        for name in self.accessed_names:
            local_name = name.split('.', 1)[0]
            remainder = name[len(local_name):]
            if local_name in self.imported_names:
                # Join import path to relative path
                full_name = self.imported_names[local_name] + remainder
                yield name, full_name
def identify_names(code):
    """Builds a codeobj summary by identifying and resovles used names
    >>> code = '''
    ... from a.b import c
    ... import d as e
    ... print(c)
    ... e.HelloWorld().f.g
    ... '''
    >>> for name, o in sorted(identify_names(code).items()):
    ...     print(name, o['name'], o['module'], o['module_short'])
    c c a.b a.b
    e.HelloWorld HelloWorld d d
    """
    finder = NameFinder()
    finder.visit(ast.parse(code))
    example_code_obj = {}
    for name, full_name in finder.get_mapping():
        # name is as written in file (e.g. np.asarray)
        # full_name includes resolved import path (e.g. numpy.asarray)
        module, attribute = full_name.rsplit('.', 1)
        # get shortened module name
        module_short = get_short_module_name(module, attribute)
        cobj = {'name': attribute, 'module': module,
                'module_short': module_short}
        example_code_obj[name] = cobj
    return example_code_obj
def generate_file_rst(fname, target_dir, src_dir, root_dir, plot_gallery):
    """ Generate the rst file for a given example.
    Returns the set of sklearn functions/classes imported in the example.
    """
    base_image_name = os.path.splitext(fname)[0]
    image_fname = '%s_%%03d.png' % base_image_name
    this_template = rst_template
    last_dir = os.path.split(src_dir)[-1]
    # to avoid leading . in file names, and wrong names in links
    if last_dir == '.' or last_dir == 'examples':
        last_dir = ''
    else:
        last_dir += '_'
    short_fname = last_dir + fname
    src_file = os.path.join(src_dir, fname)
    example_file = os.path.join(target_dir, fname)
    shutil.copyfile(src_file, example_file)
    # The following is a list containing all the figure names
    figure_list = []
    image_dir = os.path.join(target_dir, 'images')
    thumb_dir = os.path.join(image_dir, 'thumb')
    if not os.path.exists(image_dir):
        os.makedirs(image_dir)
    if not os.path.exists(thumb_dir):
        os.makedirs(thumb_dir)
    image_path = os.path.join(image_dir, image_fname)
    stdout_path = os.path.join(image_dir,
                               'stdout_%s.txt' % base_image_name)
    time_path = os.path.join(image_dir,
                             'time_%s.txt' % base_image_name)
    thumb_file = os.path.join(thumb_dir, base_image_name + '.png')
    time_elapsed = 0
    if plot_gallery and fname.startswith('plot'):
        # generate the plot as png image if file name
        # starts with plot and if it is more recent than an
        # existing image.
        first_image_file = image_path % 1
        if os.path.exists(stdout_path):
            stdout = open(stdout_path).read()
        else:
            stdout = ''
        if os.path.exists(time_path):
            time_elapsed = float(open(time_path).read())
        if not os.path.exists(first_image_file) or \
           os.stat(first_image_file).st_mtime <= os.stat(src_file).st_mtime:
            # We need to execute the code
            print('plotting %s' % fname)
            t0 = time()
            import matplotlib.pyplot as plt
            plt.close('all')
            cwd = os.getcwd()
            try:
                # First CD in the original example dir, so that any file
                # created by the example get created in this directory
                orig_stdout = sys.stdout
                os.chdir(os.path.dirname(src_file))
                my_buffer = StringIO()
                my_stdout = Tee(sys.stdout, my_buffer)
                sys.stdout = my_stdout
                my_globals = {'pl': plt}
                execfile(os.path.basename(src_file), my_globals)
                time_elapsed = time() - t0
                sys.stdout = orig_stdout
                my_stdout = my_buffer.getvalue()
                if '__doc__' in my_globals:
                    # The __doc__ is often printed in the example, we
                    # don't with to echo it
                    my_stdout = my_stdout.replace(
                        my_globals['__doc__'],
                        '')
                my_stdout = my_stdout.strip().expandtabs()
                if my_stdout:
                    stdout = '**Script output**::\n\n  %s\n\n' % (
                        '\n  '.join(my_stdout.split('\n')))
                open(stdout_path, 'w').write(stdout)
                open(time_path, 'w').write('%f' % time_elapsed)
                os.chdir(cwd)
                # In order to save every figure we have two solutions :
                # * iterate from 1 to infinity and call plt.fignum_exists(n)
                #   (this requires the figures to be numbered
                #    incrementally: 1, 2, 3 and not 1, 2, 5)
                # * iterate over [fig_mngr.num for fig_mngr in
                #   matplotlib._pylab_helpers.Gcf.get_all_fig_managers()]
                fig_managers = matplotlib._pylab_helpers.Gcf.get_all_fig_managers()
                for fig_mngr in fig_managers:
                    # Set the fig_num figure as the current figure as we can't
                    # save a figure that's not the current figure.
                    fig = plt.figure(fig_mngr.num)
                    kwargs = {}
                    to_rgba = matplotlib.colors.colorConverter.to_rgba
                    for attr in ['facecolor', 'edgecolor']:
                        fig_attr = getattr(fig, 'get_' + attr)()
                        default_attr = matplotlib.rcParams['figure.' + attr]
                        if to_rgba(fig_attr) != to_rgba(default_attr):
                            kwargs[attr] = fig_attr
                    fig.savefig(image_path % fig_mngr.num, **kwargs)
                    figure_list.append(image_fname % fig_mngr.num)
            except:
                print(80 * '_')
                print('%s is not compiling:' % fname)
                traceback.print_exc()
                print(80 * '_')
            finally:
                os.chdir(cwd)
                sys.stdout = orig_stdout
            print(" - time elapsed : %.2g sec" % time_elapsed)
        else:
            figure_list = [f[len(image_dir):]
                           for f in glob.glob(image_path.replace("%03d",
                                                '[0-9][0-9][0-9]'))]
        figure_list.sort()
        # generate thumb file
        this_template = plot_rst_template
        car_thumb_path = os.path.join(os.path.split(root_dir)[0], '_build/html/stable/_images/')
        # Note: normaly, make_thumbnail is used to write to the path contained in `thumb_file`
        # which is within `auto_examples/../images/thumbs` depending on the example.
        # Because the carousel has different dimensions than those of the examples gallery,
        # I did not simply reuse them all as some contained whitespace due to their default gallery
        # thumbnail size. Below, for a few cases, seperate thumbnails are created (the originals can't
        # just be overwritten with the carousel dimensions as it messes up the examples gallery layout).
        # The special carousel thumbnails are written directly to _build/html/stable/_images/,
        # as for some reason unknown to me, Sphinx refuses to copy my 'extra' thumbnails from the
        # auto examples gallery to the _build folder. This works fine as is, but it would be cleaner to
        # have it happen with the rest. Ideally the should be written to 'thumb_file' as well, and then
        # copied to the _images folder during the `Copying Downloadable Files` step like the rest.
        if not os.path.exists(car_thumb_path):
            os.makedirs(car_thumb_path)
        if os.path.exists(first_image_file):
            # We generate extra special thumbnails for the carousel
            carousel_tfile = os.path.join(car_thumb_path, base_image_name + '_carousel.png')
            first_img = image_fname % 1
            if first_img in carousel_thumbs:
                make_thumbnail((image_path % carousel_thumbs[first_img][0]),
                               carousel_tfile, carousel_thumbs[first_img][1], 190)
            make_thumbnail(first_image_file, thumb_file, 400, 280)
    if not os.path.exists(thumb_file):
        # create something to replace the thumbnail
        make_thumbnail('images/no_image.png', thumb_file, 200, 140)
    docstring, short_desc, end_row = extract_docstring(example_file)
    # Depending on whether we have one or more figures, we're using a
    # horizontal list or a single rst call to 'image'.
    if len(figure_list) == 1:
        figure_name = figure_list[0]
        image_list = SINGLE_IMAGE % figure_name.lstrip('/')
    else:
        image_list = HLIST_HEADER
        for figure_name in figure_list:
            image_list += HLIST_IMAGE_TEMPLATE % figure_name.lstrip('/')
    time_m, time_s = divmod(time_elapsed, 60)
    f = open(os.path.join(target_dir, base_image_name + '.rst'), 'w')
    f.write(this_template % locals())
    f.flush()
    # save variables so we can later add links to the documentation
    if six.PY2:
        example_code_obj = identify_names(open(example_file).read())
    else:
        example_code_obj = \
            identify_names(open(example_file, encoding='utf-8').read())
    if example_code_obj:
        codeobj_fname = example_file[:-3] + '_codeobj.pickle'
        with open(codeobj_fname, 'wb') as fid:
            pickle.dump(example_code_obj, fid, pickle.HIGHEST_PROTOCOL)
    backrefs = set('{module_short}.{name}'.format(**entry)
                   for entry in example_code_obj.values()
                   if entry['module'].startswith('sklearn'))
    return backrefs
def embed_code_links(app, exception):
    """Embed hyperlinks to documentation into example code"""
    if exception is not None:
        return
    print('Embedding documentation hyperlinks in examples..')
    if app.builder.name == 'latex':
        # Don't embed hyperlinks when a latex builder is used.
        return
    # Add resolvers for the packages for which we want to show links
    doc_resolvers = {}
    doc_resolvers['sklearn'] = SphinxDocLinkResolver(app.builder.outdir,
                                                     relative=True)
    resolver_urls = {
        'matplotlib': 'http://matplotlib.org',
        'numpy': 'http://docs.scipy.org/doc/numpy-1.6.0',
        'scipy': 'http://docs.scipy.org/doc/scipy-0.11.0/reference',
    }
    for this_module, url in resolver_urls.items():
        try:
            doc_resolvers[this_module] = SphinxDocLinkResolver(url)
        except HTTPError as e:
            print("The following HTTP Error has occurred:\n")
            print(e.code)
        except URLError as e:
            print("\n...\n"
                  "Warning: Embedding the documentation hyperlinks requires "
                  "internet access.\nPlease check your network connection.\n"
                  "Unable to continue embedding `{0}` links due to a URL "
                  "Error:\n".format(this_module))
            print(e.args)
    example_dir = os.path.join(app.builder.srcdir, 'auto_examples')
    html_example_dir = os.path.abspath(os.path.join(app.builder.outdir,
                                                    'auto_examples'))
    # patterns for replacement
    link_pattern = '<a href="%s">%s</a>'
    orig_pattern = '<span class="n">%s</span>'
    period = '<span class="o">.</span>'
    for dirpath, _, filenames in os.walk(html_example_dir):
        for fname in filenames:
            print('\tprocessing: %s' % fname)
            full_fname = os.path.join(html_example_dir, dirpath, fname)
            subpath = dirpath[len(html_example_dir) + 1:]
            pickle_fname = os.path.join(example_dir, subpath,
                                        fname[:-5] + '_codeobj.pickle')
            if os.path.exists(pickle_fname):
                # we have a pickle file with the objects to embed links for
                with open(pickle_fname, 'rb') as fid:
                    example_code_obj = pickle.load(fid)
                fid.close()
                str_repl = {}
                # generate replacement strings with the links
                for name, cobj in example_code_obj.items():
                    this_module = cobj['module'].split('.')[0]
                    if this_module not in doc_resolvers:
                        continue
                    try:
                        link = doc_resolvers[this_module].resolve(cobj,
                                                                  full_fname)
                    except (HTTPError, URLError) as e:
                        print("The following error has occurred:\n")
                        print(repr(e))
                        continue
                    if link is not None:
                        parts = name.split('.')
                        name_html = period.join(orig_pattern % part
                                                for part in parts)
                        str_repl[name_html] = link_pattern % (link, name_html)
                # do the replacement in the html file
                # ensure greediness
                names = sorted(str_repl, key=len, reverse=True)
                expr = re.compile(r'(?<!\.)\b' +  # don't follow . or word
                                  '|'.join(re.escape(name)
                                           for name in names))
                def substitute_link(match):
                    return str_repl[match.group()]
                if len(str_repl) > 0:
                    with open(full_fname, 'rb') as fid:
                        lines_in = fid.readlines()
                    with open(full_fname, 'wb') as fid:
                        for line in lines_in:
                            line = line.decode('utf-8')
                            line = expr.sub(substitute_link, line)
                            fid.write(line.encode('utf-8'))
    print('[done]')
def setup(app):
    app.connect('builder-inited', generate_example_rst)
    app.add_config_value('plot_gallery', True, 'html')
    # embed links after build is finished
    app.connect('build-finished', embed_code_links)
    # Sphinx hack: sphinx copies generated images to the build directory
    #  each time the docs are made.  If the desired image name already
    #  exists, it appends a digit to prevent overwrites.  The problem is,
    #  the directory is never cleared.  This means that each time you build
    #  the docs, the number of images in the directory grows.
    #
    # This question has been asked on the sphinx development list, but there
    #  was no response: http://osdir.com/ml/sphinx-dev/2011-02/msg00123.html
    #
    # The following is a hack that prevents this behavior by clearing the
    #  image build directory each time the docs are built.  If sphinx
    #  changes their layout between versions, this will not work (though
    #  it should probably not cause a crash).  Tested successfully
    #  on Sphinx 1.0.7
    build_image_dir = '_build/html/_images'
    if os.path.exists(build_image_dir):
        filelist = os.listdir(build_image_dir)
        for filename in filelist:
            if filename.endswith('png'):
                os.remove(os.path.join(build_image_dir, filename))
def setup_module():
    # HACK: Stop nosetests running setup() above
    pass
 | 
	bsd-3-clause | 
| 
	charanpald/wallhack | 
	wallhack/viroscopy/ContactGrowthStatistics.py | 
	1 | 
	49412 | 
	import logging
import sys
import gc 
import numpy
import os.path
import matplotlib.pyplot as plt
from datetime import date
from sandbox.util.PathDefaults import PathDefaults
from sandbox.util.DateUtils import DateUtils
from sandbox.util.Latex import Latex
from sandbox.util.Util import Util
from apgl.graph import * 
from apgl.viroscopy.HIVGraphReader import HIVGraphReader
from apgl.viroscopy.HIVGraphStatistics import HIVGraphStatistics
"""
This script computes some basic statistics on the growing graph. We currently
combine both infection and detection graphs and hence
look at the contact graph. 
"""
logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)
numpy.set_printoptions(suppress=True, linewidth=150)
hivReader = HIVGraphReader()
graph = hivReader.readHIVGraph()
fInds = hivReader.getIndicatorFeatureIndices()
#The set of edges indexed by zeros is the contact graph
#The ones indexed by 1 is the infection graph
edgeTypeIndex1 = 0
edgeTypeIndex2 = 1
sGraphContact = graph.getSparseGraph(edgeTypeIndex1)
sGraphInfect = graph.getSparseGraph(edgeTypeIndex2)
sGraphContact = sGraphContact.union(sGraphInfect)
sGraph = sGraphContact
#sGraph = sGraph.subgraph(range(0, 200))
figureDir = PathDefaults.getOutputDir() + "viroscopy/figures/contact/"
resultsDir = PathDefaults.getOutputDir() + "viroscopy/"
graphStats = GraphStatistics()
statsArray = graphStats.scalarStatistics(sGraph, False)
slowStats = True
saveResults = False
logging.info(sGraph)
logging.info("Number of features: " + str(sGraph.getVertexList().getNumFeatures()))
logging.info("Largest component is " + str(statsArray[graphStats.maxComponentSizeIndex]))
logging.info("Number of components " + str(statsArray[graphStats.numComponentsIndex]))
#sGraph = sGraph.subgraph(components[componentIndex])
vertexArray = sGraph.getVertexList().getVertices()
logging.info("Size of graph we will use: " + str(sGraph.getNumVertices()))
#Some indices
dobIndex = fInds["birthDate"]
detectionIndex = fInds["detectDate"]
deathIndex = fInds["deathDate"]
genderIndex = fInds["gender"]
orientationIndex = fInds["orient"]
ages = vertexArray[:, detectionIndex] - vertexArray[:, dobIndex]
deaths = vertexArray[:, deathIndex] - vertexArray[:, detectionIndex]
detections = vertexArray[:, detectionIndex]
startYear = 1900
daysInYear = 365
daysInMonth = 30
monthStep = 3
#Effective diameter q 
q = 0.9
plotInd = 1
plotStyles = ['ko-', 'kx-', 'k+-', 'k.-', 'k*-']
plotStyles2 = ['k-', 'r-', 'g-', 'b-', 'c-', 'm-']
plotStyleBW = ['k-', 'k--', 'k-.', 'k:']
plotStyles4 = ['r-', 'r--', 'r-.', 'r:']
numConfigGraphs = 10
#Make sure we include all detections
dayList = range(int(numpy.min(detections)), int(numpy.max(detections)), daysInMonth*monthStep)
dayList.append(numpy.max(detections))
absDayList = [float(i-numpy.min(detections)) for i in dayList]
subgraphIndicesList = []
for i in dayList:
    logging.info("Date: " + str(DateUtils.getDateStrFromDay(i, startYear)))
    subgraphIndices = numpy.nonzero(detections <= i)[0]
    subgraphIndicesList.append(subgraphIndices)
    
#Compute the indices list for the vector statistics
dayList2 = [DateUtils.getDayDelta(date(1989, 12, 31), startYear)]
dayList2.append(DateUtils.getDayDelta(date(1993, 12, 31), startYear))
dayList2.append(DateUtils.getDayDelta(date(1997, 12, 31), startYear))
dayList2.append(DateUtils.getDayDelta(date(2001, 12, 31), startYear))
dayList2.append(int(numpy.max(detections)))
subgraphIndicesList2 = []
for i in dayList2:
    logging.info("Date: " + str(DateUtils.getDateStrFromDay(i, startYear)))
    subgraphIndices = numpy.nonzero(detections <= i)[0]
    subgraphIndicesList2.append(subgraphIndices)
#Locations and labels for years
locs = list(range(0, int(absDayList[-1]), daysInYear*2))
labels = numpy.arange(1986, 2006, 2)
#Some indices
contactIndex = fInds["contactTrace"]
donorIndex = fInds["donor"]
randomTestIndex = fInds["randomTest"]
stdIndex = fInds["STD"]
prisonerIndex = fInds["prisoner"]
doctorIndex = fInds["recommendVisit"]
#The most popular provinces
havanaIndex = fInds["CH"]
villaClaraIndex = fInds["VC"]
pinarIndex = fInds["PR"]
holguinIndex = fInds["HO"]
habanaIndex = fInds["LH"]
sanctiIndex = fInds["SS"]
santiagoIndex = fInds['SC']
camagueyIndex = fInds['CA']
def plotVertexStats():
    #Calculate all vertex statistics
    logging.info("Computing vertex stats")
    
    #Indices
    numContactsIndex = fInds["numContacts"]
    numTestedIndex = fInds["numTested"]
    numPositiveIndex = fInds["numPositive"]
    #Properties of vertex values
    detectionAges = []
    deathAfterInfectAges = []
    deathAges = []
    homoMeans = []
    maleSums = []
    femaleSums = []
    heteroSums = []
    biSums = []
    contactMaleSums = []
    contactFemaleSums = []
    contactHeteroSums = []
    contactBiSums = []
    doctorMaleSums = []
    doctorFemaleSums = []
    doctorHeteroSums = []
    doctorBiSums = []
    contactSums = []
    nonContactSums = []
    donorSums = []
    randomTestSums = []
    stdSums = []
    prisonerSums = []
    recommendSums = []
    #This is: all detections - contact, donor, randomTest, str, recommend
    otherSums = []
    havanaSums = []
    villaClaraSums = []
    pinarSums = []
    holguinSums = []
    habanaSums = []
    sanctiSums = []
    numContactSums = []
    numTestedSums = []
    numPositiveSums = []
    #Total number of sexual contacts 
    numContactMaleSums = []
    numContactFemaleSums = []
    numContactHeteroSums = []
    numContactBiSums = []
    numTestedMaleSums = []
    numTestedFemaleSums = []
    numTestedHeteroSums = []
    numTestedBiSums = []
    numPositiveMaleSums = []
    numPositiveFemaleSums = []
    numPositiveHeteroSums = []
    numPositiveBiSums = []
    propPositiveMaleSums = []
    propPositiveFemaleSums = []
    propPositiveHeteroSums = []
    propPositiveBiSums = []
    numContactVertices = []
    numContactEdges = []
    numInfectEdges = []
    #Mean proportion of degree at end of epidemic 
    meanPropDegree = []
    finalDegreeSequence = numpy.array(sGraph.outDegreeSequence(), numpy.float) 
    degreeOneSums = []
    degreeTwoSums = []
    degreeThreePlusSums = []
    numProvinces = 15
    provinceArray = numpy.zeros((len(subgraphIndicesList), numProvinces))
    m = 0 
    for subgraphIndices in subgraphIndicesList: 
        subgraph = sGraph.subgraph(subgraphIndices)
        infectSubGraph = sGraphInfect.subgraph(subgraphIndices)
        subgraphVertexArray = subgraph.getVertexList().getVertices(range(subgraph.getNumVertices()))
        detectionAges.append(numpy.mean((subgraphVertexArray[:, detectionIndex] - subgraphVertexArray[:, dobIndex]))/daysInYear)
        deathAfterInfectAges.append((numpy.mean(subgraphVertexArray[:, deathIndex] - subgraphVertexArray[:, detectionIndex]))/daysInYear)
        deathAges.append(numpy.mean((subgraphVertexArray[:, deathIndex] - subgraphVertexArray[:, dobIndex]))/daysInYear)
        homoMeans.append(numpy.mean(subgraphVertexArray[:, orientationIndex]))
        nonContactSums.append(subgraphVertexArray.shape[0] - numpy.sum(subgraphVertexArray[:, contactIndex]))
        contactSums.append(numpy.sum(subgraphVertexArray[:, contactIndex]))
        donorSums.append(numpy.sum(subgraphVertexArray[:, donorIndex]))
        randomTestSums.append(numpy.sum(subgraphVertexArray[:, randomTestIndex]))
        stdSums.append(numpy.sum(subgraphVertexArray[:, stdIndex]))
        prisonerSums.append(numpy.sum(subgraphVertexArray[:, prisonerIndex]))
        recommendSums.append(numpy.sum(subgraphVertexArray[:, doctorIndex]))
        otherSums.append(subgraphVertexArray.shape[0] - numpy.sum(subgraphVertexArray[:, [contactIndex, donorIndex, randomTestIndex, stdIndex, doctorIndex]]))
        heteroSums.append(numpy.sum(subgraphVertexArray[:, orientationIndex]==0))
        biSums.append(numpy.sum(subgraphVertexArray[:, orientationIndex]==1))
        femaleSums.append(numpy.sum(subgraphVertexArray[:, genderIndex]==1))
        maleSums.append(numpy.sum(subgraphVertexArray[:, genderIndex]==0))
        contactHeteroSums.append(numpy.sum(numpy.logical_and(subgraphVertexArray[:, orientationIndex]==0, subgraphVertexArray[:, contactIndex])))
        contactBiSums.append(numpy.sum(numpy.logical_and(subgraphVertexArray[:, orientationIndex]==1, subgraphVertexArray[:, contactIndex])))
        contactFemaleSums.append(numpy.sum(numpy.logical_and(subgraphVertexArray[:, genderIndex]==1, subgraphVertexArray[:, contactIndex])))
        contactMaleSums.append(numpy.sum(numpy.logical_and(subgraphVertexArray[:, genderIndex]==0, subgraphVertexArray[:, contactIndex])))
        doctorHeteroSums.append(numpy.sum(numpy.logical_and(subgraphVertexArray[:, orientationIndex]==0, subgraphVertexArray[:, doctorIndex])))
        doctorBiSums.append(numpy.sum(numpy.logical_and(subgraphVertexArray[:, orientationIndex]==1, subgraphVertexArray[:, doctorIndex])))
        doctorFemaleSums.append(numpy.sum(numpy.logical_and(subgraphVertexArray[:, genderIndex]==1, subgraphVertexArray[:, doctorIndex])))
        doctorMaleSums.append(numpy.sum(numpy.logical_and(subgraphVertexArray[:, genderIndex]==0, subgraphVertexArray[:, doctorIndex])))
        havanaSums.append(numpy.sum(subgraphVertexArray[:, havanaIndex]==1))
        villaClaraSums.append(numpy.sum(subgraphVertexArray[:, villaClaraIndex]==1))
        pinarSums.append(numpy.sum(subgraphVertexArray[:, pinarIndex]==1))
        holguinSums.append(numpy.sum(subgraphVertexArray[:, holguinIndex]==1))
        habanaSums.append(numpy.sum(subgraphVertexArray[:, habanaIndex]==1))
        sanctiSums.append(numpy.sum(subgraphVertexArray[:, sanctiIndex]==1))
        numContactSums.append(numpy.mean(subgraphVertexArray[:, numContactsIndex]))
        numTestedSums.append(numpy.mean(subgraphVertexArray[:, numTestedIndex]))
        numPositiveSums.append(numpy.mean(subgraphVertexArray[:, numPositiveIndex]))
        numContactMaleSums.append(numpy.mean(subgraphVertexArray[subgraphVertexArray[:, genderIndex]==0, numContactsIndex]))
        numContactFemaleSums.append(numpy.mean(subgraphVertexArray[subgraphVertexArray[:, genderIndex]==1, numContactsIndex]))
        numContactHeteroSums.append(numpy.mean(subgraphVertexArray[subgraphVertexArray[:, orientationIndex]==0, numContactsIndex]))
        numContactBiSums.append(numpy.mean(subgraphVertexArray[subgraphVertexArray[:, orientationIndex]==1, numContactsIndex]))
        numTestedMaleSums.append(numpy.mean(subgraphVertexArray[subgraphVertexArray[:, genderIndex]==0, numTestedIndex]))
        numTestedFemaleSums.append(numpy.mean(subgraphVertexArray[subgraphVertexArray[:, genderIndex]==1, numTestedIndex]))
        numTestedHeteroSums.append(numpy.mean(subgraphVertexArray[subgraphVertexArray[:, orientationIndex]==0, numTestedIndex]))
        numTestedBiSums.append(numpy.mean(subgraphVertexArray[subgraphVertexArray[:, orientationIndex]==1, numTestedIndex]))
        numPositiveMaleSums.append(numpy.mean(subgraphVertexArray[subgraphVertexArray[:, genderIndex]==0, numPositiveIndex]))
        numPositiveFemaleSums.append(numpy.mean(subgraphVertexArray[subgraphVertexArray[:, genderIndex]==1, numPositiveIndex]))
        numPositiveHeteroSums.append(numpy.mean(subgraphVertexArray[subgraphVertexArray[:, orientationIndex]==0, numPositiveIndex]))
        numPositiveBiSums.append(numpy.mean(subgraphVertexArray[subgraphVertexArray[:, orientationIndex]==1, numPositiveIndex]))
        propPositiveMaleSums.append(numPositiveMaleSums[m]/float(numTestedMaleSums[m]))
        propPositiveFemaleSums.append(numPositiveFemaleSums[m]/float(numTestedFemaleSums[m]))
        propPositiveHeteroSums.append(numPositiveHeteroSums[m]/float(numTestedHeteroSums[m]))
        propPositiveBiSums.append(numPositiveBiSums[m]/float(numTestedMaleSums[m]))
        numContactVertices.append(subgraph.getNumVertices())
        numContactEdges.append(subgraph.getNumEdges())
        numInfectEdges.append(infectSubGraph.getNumEdges())
        nonZeroInds = finalDegreeSequence[subgraphIndices]!=0
        propDegrees = numpy.mean(subgraph.outDegreeSequence()[nonZeroInds]/finalDegreeSequence[subgraphIndices][nonZeroInds])
        meanPropDegree.append(numpy.mean(propDegrees)) 
        degreeOneSums.append(numpy.sum(subgraph.outDegreeSequence()==1))
        degreeTwoSums.append(numpy.sum(subgraph.outDegreeSequence()==2))
        degreeThreePlusSums.append(numpy.sum(subgraph.outDegreeSequence()>=3))
        provinceArray[m, :] = numpy.sum(subgraphVertexArray[:, fInds["CA"]:fInds['VC']+1], 0)
        m += 1 
    #Save some of the results for the ABC work
    numStats = 2 
    vertexStatsArray = numpy.zeros((len(subgraphIndicesList), numStats))
    vertexStatsArray[:, 0] = numpy.array(biSums)
    vertexStatsArray[:, 1] = numpy.array(heteroSums)
    resultsFileName = resultsDir + "ContactGrowthVertexStats.pkl"
    Util.savePickle(vertexStatsArray, resultsFileName)
    global plotInd 
    plt.figure(plotInd)
    plt.plot(absDayList, detectionAges)
    plt.xticks(locs, labels)
    plt.xlabel("Year")
    plt.ylabel("Detection Age (years)")
    plt.savefig(figureDir + "DetectionMeansGrowth.eps")
    plotInd += 1
    plt.figure(plotInd)
    plt.plot(absDayList, heteroSums, 'k-', absDayList, biSums, 'k--', absDayList, femaleSums, 'k-.', absDayList, maleSums, 'k:')
    plt.xticks(locs, labels)
    plt.xlabel("Year")
    plt.ylabel("Detections")
    plt.legend(("Heterosexual", "MSM", "Female", "Male"), loc="upper left")
    plt.savefig(figureDir + "OrientationGenderGrowth.eps")
    plotInd += 1
    plt.figure(plotInd)
    plt.plot(absDayList, contactHeteroSums, 'k-', absDayList, contactBiSums, 'k--', absDayList, contactFemaleSums, 'k-.', absDayList, contactMaleSums, 'k:')
    plt.xticks(locs, labels)
    plt.xlabel("Year")
    plt.ylabel("Contact tracing detections")
    plt.legend(("Heterosexual", "MSM", "Female", "Male"), loc="upper left")
    plt.savefig(figureDir + "OrientationGenderContact.eps")
    plotInd += 1
    plt.figure(plotInd)
    plt.plot(absDayList, doctorHeteroSums, 'k-', absDayList, doctorBiSums, 'k--', absDayList, doctorFemaleSums, 'k-.', absDayList, doctorMaleSums, 'k:')
    plt.xticks(locs, labels)
    plt.xlabel("Year")
    plt.ylabel("Doctor recommendation detections")
    plt.legend(("Heterosexual", "MSM", "Female", "Male"), loc="upper left")
    plt.savefig(figureDir + "OrientationGenderDoctor.eps")
    plotInd += 1
    #Plot all the provinces 
    plt.figure(plotInd)
    plt.hold(True)
    for k in range(provinceArray.shape[1]):
        plt.plot(absDayList, provinceArray[:, k], label=str(k))
    plt.xticks(locs, labels)
    plt.xlabel("Year")
    plt.ylabel("Detections")
    plt.legend(loc="upper left")
    plotInd += 1 
    #Plot of detection types
    plt.figure(plotInd)
    plt.plot(absDayList, contactSums, plotStyles2[0], absDayList, donorSums, plotStyles2[1], absDayList, randomTestSums, plotStyles2[2], absDayList, stdSums, plotStyles2[3], absDayList, otherSums, plotStyles2[4], absDayList, recommendSums, plotStyles2[5])
    plt.xticks(locs, labels)
    plt.xlabel("Year")
    plt.ylabel("Detections")
    plt.legend(("Contact tracing", "Blood donation", "Random test", "STD", "Other test", "Doctor recommendation"), loc="upper left")
    plt.savefig(figureDir + "DetectionGrowth.eps")
    plotInd += 1
    plt.figure(plotInd)
    plt.plot(absDayList, numContactSums, plotStyleBW[0], absDayList, numTestedSums, plotStyleBW[1], absDayList, numPositiveSums, plotStyleBW[2])
    plt.xticks(locs, labels)
    plt.xlabel("Year")
    plt.ylabel("Contacts")
    plt.legend(("No. contacts", "No. tested", "No. positive"), loc="center left")
    plt.savefig(figureDir + "ContactsGrowth.eps")
    plotInd += 1
    plt.figure(plotInd)
    plt.plot(absDayList, numContactHeteroSums, plotStyleBW[0], absDayList, numContactBiSums, plotStyleBW[1], absDayList, numContactFemaleSums, plotStyleBW[2], absDayList, numContactMaleSums, plotStyleBW[3])
    plt.xticks(locs, labels)
    plt.xlabel("Year")
    plt.ylabel("Total contacts")
    plt.legend(("Heterosexual", "MSM", "Female", "Male"), loc="upper right")
    plt.savefig(figureDir + "ContactsGrowthOrientGen.eps")
    plotInd += 1
    plt.figure(plotInd)
    plt.plot(absDayList, numTestedHeteroSums, plotStyleBW[0], absDayList, numTestedBiSums, plotStyleBW[1], absDayList, numTestedFemaleSums, plotStyleBW[2], absDayList, numTestedMaleSums, plotStyleBW[3])
    plt.xticks(locs, labels)
    plt.xlabel("Year")
    plt.ylabel("Tested contacts")
    plt.legend(("Heterosexual", "MSM", "Female", "Male"), loc="upper right")
    plt.savefig(figureDir + "TestedGrowthOrientGen.eps")
    plotInd += 1
    plt.figure(plotInd)
    plt.plot(absDayList, numPositiveHeteroSums, plotStyleBW[0], absDayList, numPositiveBiSums, plotStyleBW[1], absDayList, numPositiveFemaleSums, plotStyleBW[2], absDayList, numPositiveMaleSums, plotStyleBW[3])
    plt.xticks(locs, labels)
    plt.xlabel("Year")
    plt.ylabel("Positive contacts")
    plt.legend(("Heterosexual", "MSM", "Female", "Male"), loc="upper right")
    plt.savefig(figureDir + "PositiveGrowthOrientGen.eps")
    plotInd += 1
    #Proportion positive versus tested
    plt.figure(plotInd)
    plt.plot(absDayList, propPositiveHeteroSums, plotStyleBW[0], absDayList, propPositiveBiSums, plotStyleBW[1], absDayList, propPositiveFemaleSums, plotStyleBW[2], absDayList, propPositiveMaleSums, plotStyleBW[3])
    plt.xticks(locs, labels)
    plt.xlabel("Year")
    plt.ylabel("Proportion positive contacts")
    plt.legend(("Heterosexual", "MSM", "Female", "Male"), loc="upper right")
    plt.savefig(figureDir + "PercentPositiveGrowthOrientGen.eps")
    plotInd += 1
    plt.figure(plotInd)
    plt.hold(True)
    plt.plot(absDayList, havanaSums, plotStyles2[0])
    plt.plot(absDayList, villaClaraSums, plotStyles2[1])
    plt.plot(absDayList, pinarSums, plotStyles2[2])
    plt.plot(absDayList, holguinSums, plotStyles2[3])
    plt.plot(absDayList, habanaSums, plotStyles2[4])
    plt.plot(absDayList, sanctiSums, plotStyles2[5])
    plt.xticks(locs, labels)
    plt.xlabel("Year")
    plt.ylabel("Detections")
    plt.legend(("Havana City", "Villa Clara", "Pinar del Rio", "Holguin", "La Habana", "Sancti Spiritus"), loc="upper left")
    plt.savefig(figureDir + "ProvinceGrowth.eps")
    plotInd += 1
    plt.figure(plotInd)
    plt.plot(absDayList, numContactVertices, plotStyleBW[0], absDayList, numContactEdges, plotStyleBW[1], absDayList, numInfectEdges, plotStyleBW[2])
    plt.xticks(locs, labels)
    plt.xlabel("Year")
    plt.ylabel("Vertices/edges")
    plt.legend(("Contact vertices", "Contact edges", "Infect edges"), loc="upper left")
    plt.savefig(figureDir + "VerticesEdges.eps")
    plotInd += 1
    plt.figure(plotInd)
    plt.plot(absDayList, meanPropDegree, plotStyleBW[0])
    plt.xticks(locs, labels)
    plt.xlabel("Year")
    plt.ylabel("Proportion of final degree")
    plt.savefig(figureDir + "MeanPropDegree.eps")
    plotInd += 1
    plt.figure(plotInd)
    plt.plot(absDayList, degreeOneSums, plotStyleBW[0], absDayList, degreeTwoSums, plotStyleBW[1], absDayList, degreeThreePlusSums, plotStyleBW[2])
    plt.xticks(locs, labels)
    plt.xlabel("Year")
    plt.ylabel("Detections")
    plt.legend(("Degree = 1", "Degree = 2", "Degree >= 3"), loc="upper left")
    plotInd += 1
    #Print a table of interesting stats
    results = numpy.array([havanaSums])
    results = numpy.r_[results, numpy.array([villaClaraSums])]
    results = numpy.r_[results, numpy.array([pinarSums])]
    results = numpy.r_[results, numpy.array([holguinSums])]
    results = numpy.r_[results, numpy.array([habanaSums])]
    results = numpy.r_[results, numpy.array([sanctiSums])]
    print(Latex.listToRow(["Havana City", "Villa Clara", "Pinar del Rio", "Holguin", "La Habana", "Sancti Spiritus"]))
    print("\\hline")
    for i in range(0, len(dayList), 4):
        day = dayList[i]
        print(str(DateUtils.getDateStrFromDay(day, startYear)) + " & " + Latex.array1DToRow(results[:, i].T) + "\\\\")
    results = numpy.array([heteroSums])
    results = numpy.r_[results, numpy.array([biSums])]
    results = numpy.r_[results, numpy.array([femaleSums])]
    results = numpy.r_[results, numpy.array([maleSums])]
    print("\n\n")
    print(Latex.listToRow(["Heterosexual", "MSM", "Female", "Male"]))
    print("\\hline")
    for i in range(0, len(dayList), 4):
        day = dayList[i]
        print(str(DateUtils.getDateStrFromDay(day, startYear)) + " & " + Latex.array1DToRow(results[:, i].T) + "\\\\")
def computeConfigScalarStats():
    logging.info("Computing configuration model scalar stats")
    graphFileNameBase = resultsDir + "ConfigGraph"
    resultsFileNameBase = resultsDir + "ConfigGraphScalarStats"
    #graphStats.useFloydWarshall = True
    for j in range(numConfigGraphs):
        resultsFileName = resultsFileNameBase + str(j)
        if not os.path.isfile(resultsFileName):
            configGraph = SparseGraph.load(graphFileNameBase + str(j))
            statsArray = graphStats.sequenceScalarStats(configGraph, subgraphIndicesList, slowStats)
            Util.savePickle(statsArray, resultsFileName, True)
            gc.collect()
    logging.info("All done")
def computeConfigVectorStats():
    #Note: We can make this multithreaded 
    logging.info("Computing configuration model vector stats")
    graphFileNameBase = resultsDir + "ConfigGraph"
    resultsFileNameBase = resultsDir + "ConfigGraphVectorStats"
    for j in range(numConfigGraphs):
        resultsFileName = resultsFileNameBase + str(j)
        if not os.path.isfile(resultsFileName):
            configGraph = SparseGraph.load(graphFileNameBase + str(j))
            statsDictList = graphStats.sequenceVectorStats(configGraph, subgraphIndicesList2, eigenStats=False)
            Util.savePickle(statsDictList, resultsFileName, False)
            gc.collect()
    logging.info("All done")
def plotScalarStats():
    logging.info("Computing scalar stats")
    resultsFileName = resultsDir + "ContactGrowthScalarStats.pkl"
    if saveResults:
        statsArray = graphStats.sequenceScalarStats(sGraph, subgraphIndicesList, slowStats)
        Util.savePickle(statsArray, resultsFileName, True)
        #Now compute statistics on the configuration graphs 
    else:
        statsArray = Util.loadPickle(resultsFileName)
        #Take the mean of the results over the configuration model graphs
        resultsFileNameBase = resultsDir + "ConfigGraphScalarStats"
        numGraphs = len(subgraphIndicesList)
        #configStatsArrays = numpy.zeros((numGraphs, graphStats.getNumStats(), numConfigGraphs))
        configStatsArrays = numpy.zeros((numGraphs, graphStats.getNumStats()-2, numConfigGraphs))
        for j in range(numConfigGraphs):
            resultsFileName = resultsFileNameBase + str(j)
            configStatsArrays[:, :, j] = Util.loadPickle(resultsFileName)
        configStatsArray = numpy.mean(configStatsArrays, 2)
        configStatsStd =  numpy.std(configStatsArrays, 2)
        global plotInd
        def plotRealConfigError(index, styleReal, styleConfig, realLabel, configLabel):
            plt.hold(True)
            plt.plot(absDayList, statsArray[:, index], styleReal, label=realLabel)
            #errors = numpy.c_[configStatsArray[:, index]-configStatsMinArray[:, index] , configStatsMaxArray[:, index]-configStatsArray[:, index]].T
            errors = numpy.c_[configStatsStd[:, index], configStatsStd[:, index]].T
            plt.plot(absDayList, configStatsArray[:, index], styleConfig, label=configLabel)
            plt.errorbar(absDayList, configStatsArray[:, index], errors, linewidth=0, elinewidth=1, label="_nolegend_", ecolor="red")
            xmin, xmax = plt.xlim()
            plt.xlim((0, xmax))
            ymin, ymax = plt.ylim()
            plt.ylim((0, ymax))
        #Output all the results into plots
        plt.figure(plotInd)
        plt.hold(True)
        plotRealConfigError(graphStats.maxComponentSizeIndex, plotStyleBW[0], plotStyles4[0], "Max comp. vertices", "CM max comp. vertices")
        plotRealConfigError(graphStats.maxComponentEdgesIndex, plotStyleBW[1], plotStyles4[1], "Max comp. edges", "CM max comp. edges")
        plt.xticks(locs, labels)
        plt.xlabel("Year")
        plt.ylabel("No. vertices/edges")
        plt.legend(loc="upper left")
        plt.savefig(figureDir + "MaxComponentSizeGrowth.eps")
        plotInd += 1
        for k in range(len(dayList)):
            day = dayList[k]
            print(str(DateUtils.getDateStrFromDay(day, startYear)) + ": " + str(statsArray[k, graphStats.maxComponentEdgesIndex]))
            #print(str(DateUtils.getDateStrFromDay(day, startYear)) + ": " + str(configStatsArray[k, graphStats.numComponentsIndex]))
        plt.figure(plotInd)
        plotRealConfigError(graphStats.numComponentsIndex, plotStyleBW[0], plotStyles4[0], "Size >= 1", "CM size >= 1")
        plotRealConfigError(graphStats.numNonSingletonComponentsIndex, plotStyleBW[1], plotStyles4[1], "Size >= 2", "CM size >= 2")
        plotRealConfigError(graphStats.numTriOrMoreComponentsIndex, plotStyleBW[2], plotStyles4[2], "Size >= 3", "CM size >= 3")
        plt.xticks(locs, labels)
        plt.xlabel("Year")
        plt.ylabel("No. components")
        plt.legend(loc="upper left")
        plt.savefig(figureDir + "NumComponentsGrowth.eps")
        plotInd += 1
        plt.figure(plotInd)
        plotRealConfigError(graphStats.meanComponentSizeIndex, plotStyleBW[0], plotStyles4[0], "Real graph", "CM")
        plt.xticks(locs, labels)
        plt.xlabel("Year")
        plt.ylabel("Mean component size")
        plt.legend(loc="lower right")
        plt.savefig(figureDir + "MeanComponentSizeGrowth.eps")
        plotInd += 1
        plt.figure(plotInd)
        plotRealConfigError(graphStats.diameterIndex, plotStyleBW[0], plotStyles4[0], "Real graph", "CM")
        plt.xticks(locs, labels)
        plt.xlabel("Year")
        plt.ylabel("Max component diameter")
        plt.legend(loc="lower right")
        plt.savefig(figureDir + "MaxComponentDiameterGrowth.eps")
        plotInd += 1
        plt.figure(plotInd)
        plotRealConfigError(graphStats.effectiveDiameterIndex, plotStyleBW[0], plotStyles4[0], "Real graph", "CM")
        plt.xticks(locs, labels)
        plt.xlabel("Year")
        plt.ylabel("Effective diameter")
        plt.legend(loc="lower right")
        plt.savefig(figureDir + "MaxComponentEffDiameterGrowth.eps")
        plotInd += 1
        plt.figure(plotInd)
        plotRealConfigError(graphStats.meanDegreeIndex, plotStyleBW[0], plotStyles4[0], "All vertices", "CM all vertices")
        plotRealConfigError(graphStats.maxCompMeanDegreeIndex, plotStyleBW[1], plotStyles4[1], "Max component", "CM max component")
        #plt.plot(absDayList, statsArray[:, graphStats.meanDegreeIndex], plotStyleBW[0], absDayList, statsArray[:, graphStats.maxCompMeanDegreeIndex], plotStyleBW[1], absDayList, configStatsArray[:, graphStats.meanDegreeIndex], plotStyles4[0], absDayList, configStatsArray[:, graphStats.maxCompMeanDegreeIndex], plotStyles4[1])
        plt.xticks(locs, labels)
        plt.xlabel("Year")
        plt.ylabel("Mean degree")
        plt.legend(loc="lower right")
        plt.savefig(figureDir + "MeanDegrees.eps")
        plotInd += 1
        plt.figure(plotInd)
        plotRealConfigError(graphStats.densityIndex, plotStyleBW[0], plotStyles4[0], "Real Graph", "Config Model")
        #plt.plot(absDayList, statsArray[:, graphStats.densityIndex], plotStyleBW[0], absDayList, configStatsArray[:, graphStats.densityIndex], plotStyles4[0])
        plt.xticks(locs, labels)
        plt.xlabel("Year")
        plt.ylabel("Density")
        plt.legend()
        plt.savefig(figureDir + "DensityGrowth.eps")
        plotInd += 1
        plt.figure(plotInd)
        plt.plot(absDayList, statsArray[:, graphStats.powerLawIndex], plotStyleBW[0])
        plt.xticks(locs, labels)
        plt.xlabel("Year")
        plt.ylabel("Alpha")
        plt.savefig(figureDir + "PowerLawGrowth.eps")
        plotInd += 1
        plt.figure(plotInd)
        plotRealConfigError(graphStats.geodesicDistanceIndex, plotStyleBW[0], plotStyles4[0], "Real Graph", "Config Model")
        #plt.plot(absDayList, statsArray[:, graphStats.geodesicDistanceIndex], plotStyleBW[0], absDayList, configStatsArray[:, graphStats.geodesicDistanceIndex], plotStyles4[0])
        plt.xticks(locs, labels)
        plt.xlabel("Year")
        plt.ylabel("Geodesic distance")
        plt.legend(loc="lower right")
        plt.savefig(figureDir + "GeodesicGrowth.eps")
        plotInd += 1
        plt.figure(plotInd)
        plotRealConfigError(graphStats.harmonicGeoDistanceIndex, plotStyleBW[0], plotStyles4[0], "Real Graph", "Config Model")
        #plt.plot(absDayList, statsArray[:, graphStats.harmonicGeoDistanceIndex], plotStyleBW[0], absDayList, configStatsArray[:, graphStats.harmonicGeoDistanceIndex], plotStyles4[0])
        plt.xticks(locs, labels)
        plt.xlabel("Year")
        plt.ylabel("Mean harmonic geodesic distance")
        plt.legend(loc="upper right")
        plt.savefig(figureDir + "HarmonicGeodesicGrowth.eps")
        plotInd += 1
        #print(statsArray[:, graphStats.harmonicGeoDistanceIndex])
        plt.figure(plotInd)
        plotRealConfigError(graphStats.geodesicDistMaxCompIndex, plotStyleBW[0], plotStyles4[0], "Real graph", "Config model")
        #plt.plot(absDayList, statsArray[:, graphStats.geodesicDistMaxCompIndex], plotStyleBW[0], absDayList, configStatsArray[:, graphStats.geodesicDistMaxCompIndex], plotStyles4[0])
        plt.xticks(locs, labels)
        plt.xlabel("Year")
        plt.ylabel("Max component mean geodesic distance")
        plt.legend(loc="lower right")
        plt.savefig(figureDir + "MaxCompGeodesicGrowth.eps")
        plotInd += 1
        #Find the number of edges in the infection graph
        resultsFileName = resultsDir + "InfectGrowthScalarStats.pkl"
        infectStatsArray = Util.loadPickle(resultsFileName)
        #Make sure we don't include 0 in the array
        vertexIndex = numpy.argmax(statsArray[:, graphStats.numVerticesIndex] > 0)
        edgeIndex = numpy.argmax(infectStatsArray[:, graphStats.numEdgesIndex] > 0)
        minIndex = numpy.maximum(vertexIndex, edgeIndex)
        plt.figure(plotInd)
        plt.plot(numpy.log(statsArray[minIndex:, graphStats.numVerticesIndex]), numpy.log(statsArray[minIndex:, graphStats.numEdgesIndex]), plotStyleBW[0])
        plt.plot(numpy.log(infectStatsArray[minIndex:, graphStats.numVerticesIndex]), numpy.log(infectStatsArray[minIndex:, graphStats.numEdgesIndex]), plotStyleBW[1])
        plt.plot(numpy.log(statsArray[minIndex:, graphStats.maxComponentSizeIndex]), numpy.log(statsArray[minIndex:, graphStats.maxComponentEdgesIndex]), plotStyleBW[2])
        plt.xlabel("log(|V|)")
        plt.ylabel("log(|E|)/log(|D|)")
        plt.legend(("Contact graph", "Infection graph", "Max component"), loc="upper left")
        plt.savefig(figureDir + "LogVerticesEdgesGrowth.eps")
        plotInd += 1
    results = statsArray[:, graphStats.effectiveDiameterIndex] 
    results = numpy.c_[results, configStatsArray[:, graphStats.effectiveDiameterIndex]]
    results = numpy.c_[results, statsArray[:, graphStats.geodesicDistMaxCompIndex]]
    results = numpy.c_[results, configStatsArray[:, graphStats.geodesicDistMaxCompIndex]]
    configStatsArray
    print("\n\n")
    print(Latex.listToRow(["Diameter", "CM Diameter", "Mean Geodesic", "CM Mean Geodesic"]))
    print("\\hline")
    for i in range(0, len(dayList), 4):
        day = dayList[i]
        print(str(DateUtils.getDateStrFromDay(day, startYear)) + " & " + Latex.array1DToRow(results[i, :]) + "\\\\")
def plotVectorStats():
    #Finally, compute some vector stats at various points in the graph
    logging.info("Computing vector stats")
    global plotInd
    resultsFileName = resultsDir + "ContactGrowthVectorStats.pkl"
    if saveResults:
        statsDictList = graphStats.sequenceVectorStats(sGraph, subgraphIndicesList2)
        Util.savePickle(statsDictList, resultsFileName, False)
    else:
        statsDictList = Util.loadPickle(resultsFileName)
        #Load up configuration model results
        configStatsDictList = []
        resultsFileNameBase = resultsDir + "ConfigGraphVectorStats"
        for j in range(numConfigGraphs):
            resultsFileName = resultsFileNameBase + str(j)
            configStatsDictList.append(Util.loadPickle(resultsFileName))
        #Now need to take mean of 1st element of list
        meanConfigStatsDictList = configStatsDictList[0]
        for i in range(len(configStatsDictList[0])):
            for k in range(1, numConfigGraphs):
                for key in configStatsDictList[k][i].keys():
                    if configStatsDictList[k][i][key].shape[0] > meanConfigStatsDictList[i][key].shape[0]:
                        meanConfigStatsDictList[i][key] = numpy.r_[meanConfigStatsDictList[i][key], numpy.zeros(configStatsDictList[k][i][key].shape[0] - meanConfigStatsDictList[i][key].shape[0])]
                    elif configStatsDictList[k][i][key].shape[0] < meanConfigStatsDictList[i][key].shape[0]:
                        configStatsDictList[k][i][key] = numpy.r_[configStatsDictList[k][i][key], numpy.zeros(meanConfigStatsDictList[i][key].shape[0] - configStatsDictList[k][i][key].shape[0])]
                    meanConfigStatsDictList[i][key] += configStatsDictList[k][i][key]
            for key in configStatsDictList[0][i].keys():
                meanConfigStatsDictList[i][key] = meanConfigStatsDictList[i][key]/numConfigGraphs
        triangleDistArray = numpy.zeros((len(dayList2), 100))
        configTriangleDistArray = numpy.zeros((len(dayList2), 100))
        hopPlotArray = numpy.zeros((len(dayList2), 27))
        configHopPlotArray = numpy.zeros((len(dayList2), 30))
        componentsDistArray = numpy.zeros((len(dayList2), 3000))
        configComponentsDistArray = numpy.zeros((len(dayList2), 3000))
        numVerticesEdgesArray = numpy.zeros((len(dayList2), 2), numpy.int)
        numVerticesEdgesArray[:, 0] = [len(sgl) for sgl in subgraphIndicesList2]
        numVerticesEdgesArray[:, 1] = [sGraph.subgraph(sgl).getNumEdges() for sgl in subgraphIndicesList2]
        binWidths = numpy.arange(0, 0.50, 0.05)
        eigVectorDists = numpy.zeros((len(dayList2), binWidths.shape[0]-1), numpy.int)
        femaleSums = numpy.zeros(len(dayList2))
        maleSums = numpy.zeros(len(dayList2))
        heteroSums = numpy.zeros(len(dayList2))
        biSums = numpy.zeros(len(dayList2))
        contactSums = numpy.zeros(len(dayList2))
        nonContactSums = numpy.zeros(len(dayList2))
        donorSums = numpy.zeros(len(dayList2))
        randomTestSums = numpy.zeros(len(dayList2))
        stdSums = numpy.zeros(len(dayList2))
        prisonerSums = numpy.zeros(len(dayList2))
        recommendSums = numpy.zeros(len(dayList2))
        
        meanAges = numpy.zeros(len(dayList2))
        degrees = numpy.zeros((len(dayList2), 20))
        provinces = numpy.zeros((len(dayList2), 15))
        havanaSums = numpy.zeros(len(dayList2))
        villaClaraSums = numpy.zeros(len(dayList2))
        pinarSums = numpy.zeros(len(dayList2))
        holguinSums = numpy.zeros(len(dayList2))
        habanaSums = numpy.zeros(len(dayList2))
        sanctiSums = numpy.zeros(len(dayList2))
        meanDegrees = numpy.zeros(len(dayList2))
        stdDegrees = numpy.zeros(len(dayList2))
        #Note that death has a lot of missing values
        for j in range(len(dayList2)):
            dateStr = (str(DateUtils.getDateStrFromDay(dayList2[j], startYear)))
            logging.info(dateStr)
            statsDict = statsDictList[j]
            configStatsDict = meanConfigStatsDictList[j]
            degreeDist = statsDict["outDegreeDist"]
            degreeDist = degreeDist/float(numpy.sum(degreeDist))
            #Note that degree distribution for configuration graph will be identical 
            eigenDist = statsDict["eigenDist"]
            eigenDist = numpy.log(eigenDist[eigenDist>=10**-1])
            #configEigenDist = configStatsDict["eigenDist"]
            #configEigenDist = numpy.log(configEigenDist[configEigenDist>=10**-1])
            hopCount = statsDict["hopCount"]
            hopCount = numpy.log10(hopCount)
            hopPlotArray[j, 0:hopCount.shape[0]] = hopCount
            configHopCount = configStatsDict["hopCount"]
            configHopCount = numpy.log10(configHopCount)
            #configHopPlotArray[j, 0:configHopCount.shape[0]] = configHopCount
            triangleDist = statsDict["triangleDist"]
            #triangleDist = numpy.array(triangleDist, numpy.float64)/numpy.sum(triangleDist)
            triangleDist = numpy.array(triangleDist, numpy.float64)
            triangleDistArray[j, 0:triangleDist.shape[0]] = triangleDist
            configTriangleDist = configStatsDict["triangleDist"]
            configTriangleDist = numpy.array(configTriangleDist, numpy.float64)/numpy.sum(configTriangleDist)
            configTriangleDistArray[j, 0:configTriangleDist.shape[0]] = configTriangleDist
            maxEigVector = statsDict["maxEigVector"]
            eigenvectorInds = numpy.flipud(numpy.argsort(numpy.abs(maxEigVector)))
            top10eigenvectorInds = eigenvectorInds[0:numpy.round(eigenvectorInds.shape[0]/10.0)]
            maxEigVector = numpy.abs(maxEigVector[eigenvectorInds])
            #print(maxEigVector)
            eigVectorDists[j, :] = numpy.histogram(maxEigVector, binWidths)[0]
            componentsDist = statsDict["componentsDist"]
            componentsDist = numpy.array(componentsDist, numpy.float64)/numpy.sum(componentsDist)
            componentsDistArray[j, 0:componentsDist.shape[0]] = componentsDist
            configComponentsDist = configStatsDict["componentsDist"]
            configComponentsDist = numpy.array(configComponentsDist, numpy.float64)/numpy.sum(configComponentsDist)
            configComponentsDistArray[j, 0:configComponentsDist.shape[0]] = configComponentsDist
            plotInd2 = plotInd
            plt.figure(plotInd2)
            plt.plot(numpy.arange(degreeDist.shape[0]), degreeDist, plotStyles2[j], label=dateStr)
            plt.xlabel("Degree")
            plt.ylabel("Probability")
            plt.ylim((0, 0.5))
            plt.savefig(figureDir + "DegreeDist" +  ".eps")
            plt.legend()
            plotInd2 += 1
            """
            plt.figure(plotInd2)
            plt.plot(numpy.arange(eigenDist.shape[0]), eigenDist, label=dateStr)
            plt.xlabel("Eigenvalue rank")
            plt.ylabel("log(Eigenvalue)")
            plt.savefig(figureDir + "EigenDist" +  ".eps")
            plt.legend()
            plotInd2 += 1
            """
            #How does kleinberg do the hop plots 
            plt.figure(plotInd2)
            plt.plot(numpy.arange(hopCount.shape[0]), hopCount, plotStyles[j], label=dateStr)
            plt.xlabel("k")
            plt.ylabel("log10(pairs)")
            plt.ylim( (2.5, 7) )
            plt.legend(loc="lower right")
            plt.savefig(figureDir + "HopCount" + ".eps")
            plotInd2 += 1
            
            plt.figure(plotInd2)
            plt.plot(numpy.arange(maxEigVector.shape[0]), maxEigVector, plotStyles2[j], label=dateStr)
            plt.xlabel("Rank")
            plt.ylabel("log(eigenvector coefficient)")
            plt.savefig(figureDir + "MaxEigVector" +  ".eps")
            plt.legend()
            plotInd2 += 1
            #Compute some information the 10% most central vertices
            
            subgraphIndices = numpy.nonzero(detections <= dayList2[j])[0]
            subgraph = sGraph.subgraph(subgraphIndices)
            subgraphVertexArray = subgraph.getVertexList().getVertices()
            femaleSums[j] = numpy.sum(subgraphVertexArray[top10eigenvectorInds, genderIndex]==1)
            maleSums[j] = numpy.sum(subgraphVertexArray[top10eigenvectorInds, genderIndex]==0)
            heteroSums[j] = numpy.sum(subgraphVertexArray[top10eigenvectorInds, orientationIndex]==0)
            biSums[j] = numpy.sum(subgraphVertexArray[top10eigenvectorInds, orientationIndex]==1)
            contactSums[j] = numpy.sum(subgraphVertexArray[top10eigenvectorInds, contactIndex])
            donorSums[j] = numpy.sum(subgraphVertexArray[top10eigenvectorInds, donorIndex])
            randomTestSums[j] = numpy.sum(subgraphVertexArray[top10eigenvectorInds, randomTestIndex])
            stdSums[j] = numpy.sum(subgraphVertexArray[top10eigenvectorInds, stdIndex])
            prisonerSums[j] = numpy.sum(subgraphVertexArray[top10eigenvectorInds, prisonerIndex])
            recommendSums[j] = numpy.sum(subgraphVertexArray[top10eigenvectorInds, doctorIndex])
            meanAges[j] = numpy.mean(subgraphVertexArray[top10eigenvectorInds, detectionIndex] - subgraphVertexArray[top10eigenvectorInds, dobIndex])/daysInYear
            havanaSums[j] = numpy.sum(subgraphVertexArray[top10eigenvectorInds, havanaIndex])
            villaClaraSums[j] = numpy.sum(subgraphVertexArray[top10eigenvectorInds, villaClaraIndex])
            pinarSums[j] = numpy.sum(subgraphVertexArray[top10eigenvectorInds, pinarIndex])
            holguinSums[j] = numpy.sum(subgraphVertexArray[top10eigenvectorInds, holguinIndex])
            habanaSums[j] = numpy.sum(subgraphVertexArray[top10eigenvectorInds, habanaIndex])
            sanctiSums[j] = numpy.sum(subgraphVertexArray[top10eigenvectorInds, sanctiIndex])
            provinces[j, :] = numpy.sum(subgraphVertexArray[top10eigenvectorInds, 22:37], 0)
            ddist = numpy.bincount(subgraph.outDegreeSequence()[top10eigenvectorInds])
            degrees[j, 0:ddist.shape[0]] = numpy.array(ddist, numpy.float)/numpy.sum(ddist)
            meanDegrees[j] = numpy.mean(subgraph.outDegreeSequence()[top10eigenvectorInds])
            stdDegrees[j] = numpy.std(subgraph.outDegreeSequence()[top10eigenvectorInds])
            plt.figure(plotInd2)
            plt.plot(numpy.arange(degrees[j, :].shape[0]), degrees[j, :], plotStyles2[j], label=dateStr)
            plt.xlabel("Degree")
            plt.ylabel("Probability")
            #plt.ylim((0, 0.5))
            plt.savefig(figureDir + "DegreeDistCentral" +  ".eps")
            plt.legend()
            plotInd2 += 1
        precision = 4
        dateStrList = [DateUtils.getDateStrFromDay(day, startYear) for day in dayList2]
        print("Hop counts")
        print(Latex.listToRow(dateStrList))
        print(Latex.array2DToRows(hopPlotArray.T))
        print("\nHop counts for configuration graphs")
        print(Latex.listToRow(dateStrList))
        print(Latex.array2DToRows(configHopPlotArray.T))
        print("\n\nEdges and vertices")
        print((Latex.listToRow(dateStrList)))
        print((Latex.array2DToRows(numVerticesEdgesArray.T, precision)))
        print("\n\nEigenvector distribution")
        print((Latex.array1DToRow(binWidths[1:]) + "\\\\"))
        print((Latex.array2DToRows(eigVectorDists)))
        print("\n\nDistribution of component sizes")
        componentsDistArray = componentsDistArray[:, 0:componentsDist.shape[0]]
        nonZeroCols = numpy.sum(componentsDistArray, 0)!=0
        componentsDistArray = numpy.r_[numpy.array([numpy.arange(componentsDistArray.shape[1])[nonZeroCols]]), componentsDistArray[:, nonZeroCols]]
        print((Latex.listToRow(dateStrList)))
        print((Latex.array2DToRows(componentsDistArray.T, precision)))
        print("\n\nDistribution of component sizes in configuration graphs")
        configComponentsDistArray = configComponentsDistArray[:, 0:configComponentsDist.shape[0]]
        nonZeroCols = numpy.sum(configComponentsDistArray, 0)!=0
        configComponentsDistArray = numpy.r_[numpy.array([numpy.arange(configComponentsDistArray.shape[1])[nonZeroCols]]), configComponentsDistArray[:, nonZeroCols]]
        print((Latex.listToRow(dateStrList)))
        print((Latex.array2DToRows(configComponentsDistArray.T, precision)))
        print("\n\nDistribution of triangle participations")
        triangleDistArray = triangleDistArray[:, 0:triangleDist.shape[0]]
        nonZeroCols = numpy.sum(triangleDistArray, 0)!=0
        triangleDistArray = numpy.r_[numpy.array([numpy.arange(triangleDistArray.shape[1])[nonZeroCols]])/2, triangleDistArray[:, nonZeroCols]]
        print((Latex.listToRow(dateStrList)))
        print((Latex.array2DToRows(triangleDistArray.T, precision)))
        configTriangleDistArray = configTriangleDistArray[:, 0:configTriangleDist.shape[0]]
        nonZeroCols = numpy.sum(configTriangleDistArray, 0)!=0
        configTriangleDistArray = numpy.r_[numpy.array([numpy.arange(configTriangleDistArray.shape[1])[nonZeroCols]])/2, configTriangleDistArray[:, nonZeroCols]]
        configTriangleDistArray = numpy.c_[configTriangleDistArray, numpy.zeros((configTriangleDistArray.shape[0], triangleDistArray.shape[1]-configTriangleDistArray.shape[1]))]
        print("\n\nDistribution of central vertices")
        print((Latex.listToRow(dateStrList)))
        subgraphSizes = numpy.array(maleSums + femaleSums, numpy.float)
        print("Female & " + Latex.array1DToRow(femaleSums*100/subgraphSizes, 1) + "\\\\")
        print("Male & " + Latex.array1DToRow(maleSums*100/subgraphSizes, 1) + "\\\\")
        print("\hline")
        print("Heterosexual & " + Latex.array1DToRow(heteroSums*100/subgraphSizes, 1) + "\\\\")
        print("Bisexual & " + Latex.array1DToRow(biSums*100/subgraphSizes, 1) + "\\\\")
        print("\hline")
        print("Contact traced & " + Latex.array1DToRow(contactSums*100/subgraphSizes, 1) + "\\\\")
        print("Blood donor & " + Latex.array1DToRow(donorSums*100/subgraphSizes, 1) + "\\\\")
        print("RandomTest & " + Latex.array1DToRow(randomTestSums*100/subgraphSizes, 1) + "\\\\")
        print("STD & " + Latex.array1DToRow(stdSums*100/subgraphSizes, 1) + "\\\\")
        print("Prisoner & " + Latex.array1DToRow(prisonerSums*100/subgraphSizes, 1) + "\\\\")
        print("Doctor recommendation & " + Latex.array1DToRow(recommendSums*100/subgraphSizes, 1) + "\\\\")
        print("\hline")
        print("Mean ages (years) & " + Latex.array1DToRow(meanAges, 2) + "\\\\")
        print("\hline")
        print("Holguin & " + Latex.array1DToRow(holguinSums*100/subgraphSizes, 1) + "\\\\")
        print("La Habana & " + Latex.array1DToRow(habanaSums*100/subgraphSizes, 1) + "\\\\")
        print("Havana City & " + Latex.array1DToRow(havanaSums*100/subgraphSizes, 1) + "\\\\")
        print("Pinar del Rio & " + Latex.array1DToRow(pinarSums*100/subgraphSizes, 1) + "\\\\")
        print("Sancti Spiritus & " + Latex.array1DToRow(sanctiSums*100/subgraphSizes, 1) + "\\\\")
        print("Villa Clara & " + Latex.array1DToRow(villaClaraSums*100/subgraphSizes, 1) + "\\\\")
        print("\hline")
        print("Mean degrees & " + Latex.array1DToRow(meanDegrees, 2) + "\\\\")
        print("Std degrees & " + Latex.array1DToRow(stdDegrees, 2) + "\\\\")
        
        print("\n\nProvinces")
        print(Latex.array2DToRows(provinces))
        print("\n\nDegree distribution")
        print(Latex.array2DToRows(degrees))
def plotOtherStats():
    #Let's look at geodesic distances in subgraphs and communities
    logging.info("Computing other stats")
    resultsFileName = resultsDir + "ContactGrowthOtherStats.pkl"
    hivGraphStats = HIVGraphStatistics(fInds)
    if saveResults:
        statsArray = hivGraphStats.sequenceScalarStats(sGraph, subgraphIndicesList)
        #statsArray["dayList"] = absDayList
        Util.savePickle(statsArray, resultsFileName, True)
    else:
        statsArray = Util.loadPickle(resultsFileName)
        #Just load the harmonic geodesic distances of the full graph 
        resultsFileName = resultsDir + "ContactGrowthScalarStats.pkl"
        statsArray2 = Util.loadPickle(resultsFileName)
        global plotInd
        msmGeodesic = statsArray[:, hivGraphStats.msmGeodesicIndex]
        msmGeodesic[msmGeodesic < 0] = 0
        msmGeodesic[msmGeodesic == float('inf')] = 0
        #Output all the results into plots
        plt.figure(plotInd)
        plt.plot(absDayList, msmGeodesic, 'k-', absDayList, statsArray[:, hivGraphStats.mostConnectedGeodesicIndex], 'k--')
        plt.xticks(locs, labels)
        #plt.ylim([0, 0.1])
        plt.xlabel("Year")
        plt.ylabel("Mean harmonic geodesic distance")
        plt.legend(("MSM individuals", "Top 10% degree"), loc="upper right")
        plt.savefig(figureDir + "MSM10Geodesic" + ".eps")
        plotInd += 1
        plt.figure(plotInd)
        plt.plot(absDayList, statsArray2[:, graphStats.harmonicGeoDistanceIndex], 'k-', absDayList, statsArray[:, hivGraphStats.menSubgraphGeodesicIndex], 'k--')
        plt.xticks(locs, labels)
        plt.ylim([0, 200.0])
        plt.xlabel("Year")
        plt.ylabel("Mean harmonic geodesic distance")
        plt.legend(("All individuals", "Men subgraph"), loc="upper right")
        plt.savefig(figureDir + "MenSubgraphGeodesic" + ".eps")
        plotInd += 1
#plotVertexStats()
plotScalarStats()
#plotVectorStats()
#plotOtherStats()
plt.show()
#computeConfigScalarStats()
#computeConfigVectorStats()
"""
Probability of adding node based on degree - try to find how we can generate data.
Mean Time between first and last infection for each person
""" | 
	gpl-3.0 | 
| 
	fyffyt/scikit-learn | 
	sklearn/preprocessing/tests/test_data.py | 
	71 | 
	38516 | 
	import warnings
import numpy as np
import numpy.linalg as la
from scipy import sparse
from distutils.version import LooseVersion
from sklearn.utils.testing import assert_almost_equal, clean_warning_registry
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_greater_equal
from sklearn.utils.testing import assert_less_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raises_regex
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import assert_no_warnings
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.sparsefuncs import mean_variance_axis
from sklearn.preprocessing.data import _transform_selected
from sklearn.preprocessing.data import Binarizer
from sklearn.preprocessing.data import KernelCenterer
from sklearn.preprocessing.data import Normalizer
from sklearn.preprocessing.data import normalize
from sklearn.preprocessing.data import OneHotEncoder
from sklearn.preprocessing.data import StandardScaler
from sklearn.preprocessing.data import scale
from sklearn.preprocessing.data import MinMaxScaler
from sklearn.preprocessing.data import minmax_scale
from sklearn.preprocessing.data import MaxAbsScaler
from sklearn.preprocessing.data import maxabs_scale
from sklearn.preprocessing.data import RobustScaler
from sklearn.preprocessing.data import robust_scale
from sklearn.preprocessing.data import add_dummy_feature
from sklearn.preprocessing.data import PolynomialFeatures
from sklearn.utils.validation import DataConversionWarning
from sklearn import datasets
iris = datasets.load_iris()
def toarray(a):
    if hasattr(a, "toarray"):
        a = a.toarray()
    return a
def test_polynomial_features():
    # Test Polynomial Features
    X1 = np.arange(6)[:, np.newaxis]
    P1 = np.hstack([np.ones_like(X1),
                    X1, X1 ** 2, X1 ** 3])
    deg1 = 3
    X2 = np.arange(6).reshape((3, 2))
    x1 = X2[:, :1]
    x2 = X2[:, 1:]
    P2 = np.hstack([x1 ** 0 * x2 ** 0,
                    x1 ** 1 * x2 ** 0,
                    x1 ** 0 * x2 ** 1,
                    x1 ** 2 * x2 ** 0,
                    x1 ** 1 * x2 ** 1,
                    x1 ** 0 * x2 ** 2])
    deg2 = 2
    for (deg, X, P) in [(deg1, X1, P1), (deg2, X2, P2)]:
        P_test = PolynomialFeatures(deg, include_bias=True).fit_transform(X)
        assert_array_almost_equal(P_test, P)
        P_test = PolynomialFeatures(deg, include_bias=False).fit_transform(X)
        assert_array_almost_equal(P_test, P[:, 1:])
    interact = PolynomialFeatures(2, interaction_only=True, include_bias=True)
    X_poly = interact.fit_transform(X)
    assert_array_almost_equal(X_poly, P2[:, [0, 1, 2, 4]])
@ignore_warnings
def test_scaler_1d():
    # Test scaling of dataset along single axis
    rng = np.random.RandomState(0)
    X = rng.randn(5)
    X_orig_copy = X.copy()
    scaler = StandardScaler()
    X_scaled = scaler.fit(X).transform(X, copy=False)
    assert_array_almost_equal(X_scaled.mean(axis=0), 0.0)
    assert_array_almost_equal(X_scaled.std(axis=0), 1.0)
    # check inverse transform
    X_scaled_back = scaler.inverse_transform(X_scaled)
    assert_array_almost_equal(X_scaled_back, X_orig_copy)
    # Test with 1D list
    X = [0., 1., 2, 0.4, 1.]
    scaler = StandardScaler()
    X_scaled = scaler.fit(X).transform(X, copy=False)
    assert_array_almost_equal(X_scaled.mean(axis=0), 0.0)
    assert_array_almost_equal(X_scaled.std(axis=0), 1.0)
    X_scaled = scale(X)
    assert_array_almost_equal(X_scaled.mean(axis=0), 0.0)
    assert_array_almost_equal(X_scaled.std(axis=0), 1.0)
    X = np.ones(5)
    assert_array_equal(scale(X, with_mean=False), X)
def test_standard_scaler_numerical_stability():
    """Test numerical stability of scaling"""
    # np.log(1e-5) is taken because of its floating point representation
    # was empirically found to cause numerical problems with np.mean & np.std.
    x = np.zeros(8, dtype=np.float64) + np.log(1e-5, dtype=np.float64)
    if LooseVersion(np.__version__) >= LooseVersion('1.9'):
        # This does not raise a warning as the number of samples is too low
        # to trigger the problem in recent numpy
        x_scaled = assert_no_warnings(scale, x)
        assert_array_almost_equal(scale(x), np.zeros(8))
    else:
        w = "standard deviation of the data is probably very close to 0"
        x_scaled = assert_warns_message(UserWarning, w, scale, x)
        assert_array_almost_equal(x_scaled, np.zeros(8))
    # with 2 more samples, the std computation run into numerical issues:
    x = np.zeros(10, dtype=np.float64) + np.log(1e-5, dtype=np.float64)
    w = "standard deviation of the data is probably very close to 0"
    x_scaled = assert_warns_message(UserWarning, w, scale, x)
    assert_array_almost_equal(x_scaled, np.zeros(10))
    x = np.ones(10, dtype=np.float64) * 1e-100
    x_small_scaled = assert_no_warnings(scale, x)
    assert_array_almost_equal(x_small_scaled, np.zeros(10))
    # Large values can cause (often recoverable) numerical stability issues:
    x_big = np.ones(10, dtype=np.float64) * 1e100
    w = "Dataset may contain too large values"
    x_big_scaled = assert_warns_message(UserWarning, w, scale, x_big)
    assert_array_almost_equal(x_big_scaled, np.zeros(10))
    assert_array_almost_equal(x_big_scaled, x_small_scaled)
    x_big_centered = assert_warns_message(UserWarning, w, scale, x_big,
                                          with_std=False)
    assert_array_almost_equal(x_big_centered, np.zeros(10))
    assert_array_almost_equal(x_big_centered, x_small_scaled)
def test_scaler_2d_arrays():
    # Test scaling of 2d array along first axis
    rng = np.random.RandomState(0)
    X = rng.randn(4, 5)
    X[:, 0] = 0.0  # first feature is always of zero
    scaler = StandardScaler()
    X_scaled = scaler.fit(X).transform(X, copy=True)
    assert_false(np.any(np.isnan(X_scaled)))
    assert_array_almost_equal(X_scaled.mean(axis=0), 5 * [0.0])
    assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.])
    # Check that X has been copied
    assert_true(X_scaled is not X)
    # check inverse transform
    X_scaled_back = scaler.inverse_transform(X_scaled)
    assert_true(X_scaled_back is not X)
    assert_true(X_scaled_back is not X_scaled)
    assert_array_almost_equal(X_scaled_back, X)
    X_scaled = scale(X, axis=1, with_std=False)
    assert_false(np.any(np.isnan(X_scaled)))
    assert_array_almost_equal(X_scaled.mean(axis=1), 4 * [0.0])
    X_scaled = scale(X, axis=1, with_std=True)
    assert_false(np.any(np.isnan(X_scaled)))
    assert_array_almost_equal(X_scaled.mean(axis=1), 4 * [0.0])
    assert_array_almost_equal(X_scaled.std(axis=1), 4 * [1.0])
    # Check that the data hasn't been modified
    assert_true(X_scaled is not X)
    X_scaled = scaler.fit(X).transform(X, copy=False)
    assert_false(np.any(np.isnan(X_scaled)))
    assert_array_almost_equal(X_scaled.mean(axis=0), 5 * [0.0])
    assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.])
    # Check that X has not been copied
    assert_true(X_scaled is X)
    X = rng.randn(4, 5)
    X[:, 0] = 1.0  # first feature is a constant, non zero feature
    scaler = StandardScaler()
    X_scaled = scaler.fit(X).transform(X, copy=True)
    assert_false(np.any(np.isnan(X_scaled)))
    assert_array_almost_equal(X_scaled.mean(axis=0), 5 * [0.0])
    assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.])
    # Check that X has not been copied
    assert_true(X_scaled is not X)
def test_min_max_scaler_iris():
    X = iris.data
    scaler = MinMaxScaler()
    # default params
    X_trans = scaler.fit_transform(X)
    assert_array_almost_equal(X_trans.min(axis=0), 0)
    assert_array_almost_equal(X_trans.min(axis=0), 0)
    assert_array_almost_equal(X_trans.max(axis=0), 1)
    X_trans_inv = scaler.inverse_transform(X_trans)
    assert_array_almost_equal(X, X_trans_inv)
    # not default params: min=1, max=2
    scaler = MinMaxScaler(feature_range=(1, 2))
    X_trans = scaler.fit_transform(X)
    assert_array_almost_equal(X_trans.min(axis=0), 1)
    assert_array_almost_equal(X_trans.max(axis=0), 2)
    X_trans_inv = scaler.inverse_transform(X_trans)
    assert_array_almost_equal(X, X_trans_inv)
    # min=-.5, max=.6
    scaler = MinMaxScaler(feature_range=(-.5, .6))
    X_trans = scaler.fit_transform(X)
    assert_array_almost_equal(X_trans.min(axis=0), -.5)
    assert_array_almost_equal(X_trans.max(axis=0), .6)
    X_trans_inv = scaler.inverse_transform(X_trans)
    assert_array_almost_equal(X, X_trans_inv)
    # raises on invalid range
    scaler = MinMaxScaler(feature_range=(2, 1))
    assert_raises(ValueError, scaler.fit, X)
def test_min_max_scaler_zero_variance_features():
    # Check min max scaler on toy data with zero variance features
    X = [[0., 1., +0.5],
         [0., 1., -0.1],
         [0., 1., +1.1]]
    X_new = [[+0., 2., 0.5],
             [-1., 1., 0.0],
             [+0., 1., 1.5]]
    # default params
    scaler = MinMaxScaler()
    X_trans = scaler.fit_transform(X)
    X_expected_0_1 = [[0., 0., 0.5],
                      [0., 0., 0.0],
                      [0., 0., 1.0]]
    assert_array_almost_equal(X_trans, X_expected_0_1)
    X_trans_inv = scaler.inverse_transform(X_trans)
    assert_array_almost_equal(X, X_trans_inv)
    X_trans_new = scaler.transform(X_new)
    X_expected_0_1_new = [[+0., 1., 0.500],
                          [-1., 0., 0.083],
                          [+0., 0., 1.333]]
    assert_array_almost_equal(X_trans_new, X_expected_0_1_new, decimal=2)
    # not default params
    scaler = MinMaxScaler(feature_range=(1, 2))
    X_trans = scaler.fit_transform(X)
    X_expected_1_2 = [[1., 1., 1.5],
                      [1., 1., 1.0],
                      [1., 1., 2.0]]
    assert_array_almost_equal(X_trans, X_expected_1_2)
    # function interface
    X_trans = minmax_scale(X)
    assert_array_almost_equal(X_trans, X_expected_0_1)
    X_trans = minmax_scale(X, feature_range=(1, 2))
    assert_array_almost_equal(X_trans, X_expected_1_2)
def test_minmax_scale_axis1():
    X = iris.data
    X_trans = minmax_scale(X, axis=1)
    assert_array_almost_equal(np.min(X_trans, axis=1), 0)
    assert_array_almost_equal(np.max(X_trans, axis=1), 1)
@ignore_warnings
def test_min_max_scaler_1d():
    # Test scaling of dataset along single axis
    rng = np.random.RandomState(0)
    X = rng.randn(5)
    X_orig_copy = X.copy()
    scaler = MinMaxScaler()
    X_scaled = scaler.fit(X).transform(X)
    assert_array_almost_equal(X_scaled.min(axis=0), 0.0)
    assert_array_almost_equal(X_scaled.max(axis=0), 1.0)
    # check inverse transform
    X_scaled_back = scaler.inverse_transform(X_scaled)
    assert_array_almost_equal(X_scaled_back, X_orig_copy)
    # Test with 1D list
    X = [0., 1., 2, 0.4, 1.]
    scaler = MinMaxScaler()
    X_scaled = scaler.fit(X).transform(X)
    assert_array_almost_equal(X_scaled.min(axis=0), 0.0)
    assert_array_almost_equal(X_scaled.max(axis=0), 1.0)
    # Constant feature.
    X = np.zeros(5)
    scaler = MinMaxScaler()
    X_scaled = scaler.fit(X).transform(X)
    assert_greater_equal(X_scaled.min(), 0.)
    assert_less_equal(X_scaled.max(), 1.)
def test_scaler_without_centering():
    rng = np.random.RandomState(42)
    X = rng.randn(4, 5)
    X[:, 0] = 0.0  # first feature is always of zero
    X_csr = sparse.csr_matrix(X)
    X_csc = sparse.csc_matrix(X)
    assert_raises(ValueError, StandardScaler().fit, X_csr)
    null_transform = StandardScaler(with_mean=False, with_std=False, copy=True)
    X_null = null_transform.fit_transform(X_csr)
    assert_array_equal(X_null.data, X_csr.data)
    X_orig = null_transform.inverse_transform(X_null)
    assert_array_equal(X_orig.data, X_csr.data)
    scaler = StandardScaler(with_mean=False).fit(X)
    X_scaled = scaler.transform(X, copy=True)
    assert_false(np.any(np.isnan(X_scaled)))
    scaler_csr = StandardScaler(with_mean=False).fit(X_csr)
    X_csr_scaled = scaler_csr.transform(X_csr, copy=True)
    assert_false(np.any(np.isnan(X_csr_scaled.data)))
    scaler_csc = StandardScaler(with_mean=False).fit(X_csc)
    X_csc_scaled = scaler_csr.transform(X_csc, copy=True)
    assert_false(np.any(np.isnan(X_csc_scaled.data)))
    assert_equal(scaler.mean_, scaler_csr.mean_)
    assert_array_almost_equal(scaler.std_, scaler_csr.std_)
    assert_equal(scaler.mean_, scaler_csc.mean_)
    assert_array_almost_equal(scaler.std_, scaler_csc.std_)
    assert_array_almost_equal(
        X_scaled.mean(axis=0), [0., -0.01, 2.24, -0.35, -0.78], 2)
    assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.])
    X_csr_scaled_mean, X_csr_scaled_std = mean_variance_axis(X_csr_scaled, 0)
    assert_array_almost_equal(X_csr_scaled_mean, X_scaled.mean(axis=0))
    assert_array_almost_equal(X_csr_scaled_std, X_scaled.std(axis=0))
    # Check that X has not been modified (copy)
    assert_true(X_scaled is not X)
    assert_true(X_csr_scaled is not X_csr)
    X_scaled_back = scaler.inverse_transform(X_scaled)
    assert_true(X_scaled_back is not X)
    assert_true(X_scaled_back is not X_scaled)
    assert_array_almost_equal(X_scaled_back, X)
    X_csr_scaled_back = scaler_csr.inverse_transform(X_csr_scaled)
    assert_true(X_csr_scaled_back is not X_csr)
    assert_true(X_csr_scaled_back is not X_csr_scaled)
    assert_array_almost_equal(X_csr_scaled_back.toarray(), X)
    X_csc_scaled_back = scaler_csr.inverse_transform(X_csc_scaled.tocsc())
    assert_true(X_csc_scaled_back is not X_csc)
    assert_true(X_csc_scaled_back is not X_csc_scaled)
    assert_array_almost_equal(X_csc_scaled_back.toarray(), X)
def test_scaler_int():
    # test that scaler converts integer input to floating
    # for both sparse and dense matrices
    rng = np.random.RandomState(42)
    X = rng.randint(20, size=(4, 5))
    X[:, 0] = 0  # first feature is always of zero
    X_csr = sparse.csr_matrix(X)
    X_csc = sparse.csc_matrix(X)
    null_transform = StandardScaler(with_mean=False, with_std=False, copy=True)
    clean_warning_registry()
    with warnings.catch_warnings(record=True):
        X_null = null_transform.fit_transform(X_csr)
    assert_array_equal(X_null.data, X_csr.data)
    X_orig = null_transform.inverse_transform(X_null)
    assert_array_equal(X_orig.data, X_csr.data)
    clean_warning_registry()
    with warnings.catch_warnings(record=True):
        scaler = StandardScaler(with_mean=False).fit(X)
        X_scaled = scaler.transform(X, copy=True)
    assert_false(np.any(np.isnan(X_scaled)))
    clean_warning_registry()
    with warnings.catch_warnings(record=True):
        scaler_csr = StandardScaler(with_mean=False).fit(X_csr)
        X_csr_scaled = scaler_csr.transform(X_csr, copy=True)
    assert_false(np.any(np.isnan(X_csr_scaled.data)))
    clean_warning_registry()
    with warnings.catch_warnings(record=True):
        scaler_csc = StandardScaler(with_mean=False).fit(X_csc)
        X_csc_scaled = scaler_csr.transform(X_csc, copy=True)
    assert_false(np.any(np.isnan(X_csc_scaled.data)))
    assert_equal(scaler.mean_, scaler_csr.mean_)
    assert_array_almost_equal(scaler.std_, scaler_csr.std_)
    assert_equal(scaler.mean_, scaler_csc.mean_)
    assert_array_almost_equal(scaler.std_, scaler_csc.std_)
    assert_array_almost_equal(
        X_scaled.mean(axis=0),
        [0., 1.109, 1.856, 21., 1.559], 2)
    assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.])
    X_csr_scaled_mean, X_csr_scaled_std = mean_variance_axis(
        X_csr_scaled.astype(np.float), 0)
    assert_array_almost_equal(X_csr_scaled_mean, X_scaled.mean(axis=0))
    assert_array_almost_equal(X_csr_scaled_std, X_scaled.std(axis=0))
    # Check that X has not been modified (copy)
    assert_true(X_scaled is not X)
    assert_true(X_csr_scaled is not X_csr)
    X_scaled_back = scaler.inverse_transform(X_scaled)
    assert_true(X_scaled_back is not X)
    assert_true(X_scaled_back is not X_scaled)
    assert_array_almost_equal(X_scaled_back, X)
    X_csr_scaled_back = scaler_csr.inverse_transform(X_csr_scaled)
    assert_true(X_csr_scaled_back is not X_csr)
    assert_true(X_csr_scaled_back is not X_csr_scaled)
    assert_array_almost_equal(X_csr_scaled_back.toarray(), X)
    X_csc_scaled_back = scaler_csr.inverse_transform(X_csc_scaled.tocsc())
    assert_true(X_csc_scaled_back is not X_csc)
    assert_true(X_csc_scaled_back is not X_csc_scaled)
    assert_array_almost_equal(X_csc_scaled_back.toarray(), X)
def test_scaler_without_copy():
    # Check that StandardScaler.fit does not change input
    rng = np.random.RandomState(42)
    X = rng.randn(4, 5)
    X[:, 0] = 0.0  # first feature is always of zero
    X_csr = sparse.csr_matrix(X)
    X_copy = X.copy()
    StandardScaler(copy=False).fit(X)
    assert_array_equal(X, X_copy)
    X_csr_copy = X_csr.copy()
    StandardScaler(with_mean=False, copy=False).fit(X_csr)
    assert_array_equal(X_csr.toarray(), X_csr_copy.toarray())
def test_scale_sparse_with_mean_raise_exception():
    rng = np.random.RandomState(42)
    X = rng.randn(4, 5)
    X_csr = sparse.csr_matrix(X)
    # check scaling and fit with direct calls on sparse data
    assert_raises(ValueError, scale, X_csr, with_mean=True)
    assert_raises(ValueError, StandardScaler(with_mean=True).fit, X_csr)
    # check transform and inverse_transform after a fit on a dense array
    scaler = StandardScaler(with_mean=True).fit(X)
    assert_raises(ValueError, scaler.transform, X_csr)
    X_transformed_csr = sparse.csr_matrix(scaler.transform(X))
    assert_raises(ValueError, scaler.inverse_transform, X_transformed_csr)
def test_scale_input_finiteness_validation():
    # Check if non finite inputs raise ValueError
    X = [np.nan, 5, 6, 7, 8]
    assert_raises_regex(ValueError,
                        "Input contains NaN, infinity or a value too large",
                        scale, X)
    X = [np.inf, 5, 6, 7, 8]
    assert_raises_regex(ValueError,
                        "Input contains NaN, infinity or a value too large",
                        scale, X)
def test_scale_function_without_centering():
    rng = np.random.RandomState(42)
    X = rng.randn(4, 5)
    X[:, 0] = 0.0  # first feature is always of zero
    X_csr = sparse.csr_matrix(X)
    X_scaled = scale(X, with_mean=False)
    assert_false(np.any(np.isnan(X_scaled)))
    X_csr_scaled = scale(X_csr, with_mean=False)
    assert_false(np.any(np.isnan(X_csr_scaled.data)))
    # test csc has same outcome
    X_csc_scaled = scale(X_csr.tocsc(), with_mean=False)
    assert_array_almost_equal(X_scaled, X_csc_scaled.toarray())
    # raises value error on axis != 0
    assert_raises(ValueError, scale, X_csr, with_mean=False, axis=1)
    assert_array_almost_equal(X_scaled.mean(axis=0),
                              [0., -0.01, 2.24, -0.35, -0.78], 2)
    assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.])
    # Check that X has not been copied
    assert_true(X_scaled is not X)
    X_csr_scaled_mean, X_csr_scaled_std = mean_variance_axis(X_csr_scaled, 0)
    assert_array_almost_equal(X_csr_scaled_mean, X_scaled.mean(axis=0))
    assert_array_almost_equal(X_csr_scaled_std, X_scaled.std(axis=0))
def test_robust_scaler_2d_arrays():
    """Test robust scaling of 2d array along first axis"""
    rng = np.random.RandomState(0)
    X = rng.randn(4, 5)
    X[:, 0] = 0.0  # first feature is always of zero
    scaler = RobustScaler()
    X_scaled = scaler.fit(X).transform(X)
    assert_array_almost_equal(np.median(X_scaled, axis=0), 5 * [0.0])
    assert_array_almost_equal(X_scaled.std(axis=0)[0], 0)
def test_robust_scaler_iris():
    X = iris.data
    scaler = RobustScaler()
    X_trans = scaler.fit_transform(X)
    assert_array_almost_equal(np.median(X_trans, axis=0), 0)
    X_trans_inv = scaler.inverse_transform(X_trans)
    assert_array_almost_equal(X, X_trans_inv)
    q = np.percentile(X_trans, q=(25, 75), axis=0)
    iqr = q[1] - q[0]
    assert_array_almost_equal(iqr, 1)
def test_robust_scale_axis1():
    X = iris.data
    X_trans = robust_scale(X, axis=1)
    assert_array_almost_equal(np.median(X_trans, axis=1), 0)
    q = np.percentile(X_trans, q=(25, 75), axis=1)
    iqr = q[1] - q[0]
    assert_array_almost_equal(iqr, 1)
def test_robust_scaler_zero_variance_features():
    """Check RobustScaler on toy data with zero variance features"""
    X = [[0., 1., +0.5],
         [0., 1., -0.1],
         [0., 1., +1.1]]
    scaler = RobustScaler()
    X_trans = scaler.fit_transform(X)
    # NOTE: for such a small sample size, what we expect in the third column
    # depends HEAVILY on the method used to calculate quantiles. The values
    # here were calculated to fit the quantiles produces by np.percentile
    # using numpy 1.9 Calculating quantiles with
    # scipy.stats.mstats.scoreatquantile or scipy.stats.mstats.mquantiles
    # would yield very different results!
    X_expected = [[0., 0., +0.0],
                  [0., 0., -1.0],
                  [0., 0., +1.0]]
    assert_array_almost_equal(X_trans, X_expected)
    X_trans_inv = scaler.inverse_transform(X_trans)
    assert_array_almost_equal(X, X_trans_inv)
    # make sure new data gets transformed correctly
    X_new = [[+0., 2., 0.5],
             [-1., 1., 0.0],
             [+0., 1., 1.5]]
    X_trans_new = scaler.transform(X_new)
    X_expected_new = [[+0., 1., +0.],
                      [-1., 0., -0.83333],
                      [+0., 0., +1.66667]]
    assert_array_almost_equal(X_trans_new, X_expected_new, decimal=3)
def test_maxabs_scaler_zero_variance_features():
    """Check MaxAbsScaler on toy data with zero variance features"""
    X = [[0., 1., +0.5],
         [0., 1., -0.3],
         [0., 1., +1.5],
         [0., 0., +0.0]]
    scaler = MaxAbsScaler()
    X_trans = scaler.fit_transform(X)
    X_expected = [[0., 1., 1.0 / 3.0],
                  [0., 1., -0.2],
                  [0., 1., 1.0],
                  [0., 0., 0.0]]
    assert_array_almost_equal(X_trans, X_expected)
    X_trans_inv = scaler.inverse_transform(X_trans)
    assert_array_almost_equal(X, X_trans_inv)
    # make sure new data gets transformed correctly
    X_new = [[+0., 2., 0.5],
             [-1., 1., 0.0],
             [+0., 1., 1.5]]
    X_trans_new = scaler.transform(X_new)
    X_expected_new = [[+0., 2.0, 1.0 / 3.0],
                      [-1., 1.0, 0.0],
                      [+0., 1.0, 1.0]]
    assert_array_almost_equal(X_trans_new, X_expected_new, decimal=2)
    # sparse data
    X_csr = sparse.csr_matrix(X)
    X_trans = scaler.fit_transform(X_csr)
    X_expected = [[0., 1., 1.0 / 3.0],
                  [0., 1., -0.2],
                  [0., 1., 1.0],
                  [0., 0., 0.0]]
    assert_array_almost_equal(X_trans.A, X_expected)
    X_trans_inv = scaler.inverse_transform(X_trans)
    assert_array_almost_equal(X, X_trans_inv.A)
def test_maxabs_scaler_large_negative_value():
    """Check MaxAbsScaler on toy data with a large negative value"""
    X = [[0., 1.,   +0.5, -1.0],
         [0., 1.,   -0.3, -0.5],
         [0., 1., -100.0,  0.0],
         [0., 0.,   +0.0, -2.0]]
    scaler = MaxAbsScaler()
    X_trans = scaler.fit_transform(X)
    X_expected = [[0., 1.,  0.005,    -0.5],
                  [0., 1., -0.003,    -0.25],
                  [0., 1., -1.0,       0.0],
                  [0., 0.,  0.0,      -1.0]]
    assert_array_almost_equal(X_trans, X_expected)
def test_warning_scaling_integers():
    # Check warning when scaling integer data
    X = np.array([[1, 2, 0],
                  [0, 0, 0]], dtype=np.uint8)
    w = "Data with input dtype uint8 was converted to float64"
    clean_warning_registry()
    assert_warns_message(DataConversionWarning, w, scale, X)
    assert_warns_message(DataConversionWarning, w, StandardScaler().fit, X)
    assert_warns_message(DataConversionWarning, w, MinMaxScaler().fit, X)
def test_normalizer_l1():
    rng = np.random.RandomState(0)
    X_dense = rng.randn(4, 5)
    X_sparse_unpruned = sparse.csr_matrix(X_dense)
    # set the row number 3 to zero
    X_dense[3, :] = 0.0
    # set the row number 3 to zero without pruning (can happen in real life)
    indptr_3 = X_sparse_unpruned.indptr[3]
    indptr_4 = X_sparse_unpruned.indptr[4]
    X_sparse_unpruned.data[indptr_3:indptr_4] = 0.0
    # build the pruned variant using the regular constructor
    X_sparse_pruned = sparse.csr_matrix(X_dense)
    # check inputs that support the no-copy optim
    for X in (X_dense, X_sparse_pruned, X_sparse_unpruned):
        normalizer = Normalizer(norm='l1', copy=True)
        X_norm = normalizer.transform(X)
        assert_true(X_norm is not X)
        X_norm1 = toarray(X_norm)
        normalizer = Normalizer(norm='l1', copy=False)
        X_norm = normalizer.transform(X)
        assert_true(X_norm is X)
        X_norm2 = toarray(X_norm)
        for X_norm in (X_norm1, X_norm2):
            row_sums = np.abs(X_norm).sum(axis=1)
            for i in range(3):
                assert_almost_equal(row_sums[i], 1.0)
            assert_almost_equal(row_sums[3], 0.0)
    # check input for which copy=False won't prevent a copy
    for init in (sparse.coo_matrix, sparse.csc_matrix, sparse.lil_matrix):
        X = init(X_dense)
        X_norm = normalizer = Normalizer(norm='l2', copy=False).transform(X)
        assert_true(X_norm is not X)
        assert_true(isinstance(X_norm, sparse.csr_matrix))
        X_norm = toarray(X_norm)
        for i in range(3):
            assert_almost_equal(row_sums[i], 1.0)
        assert_almost_equal(la.norm(X_norm[3]), 0.0)
def test_normalizer_l2():
    rng = np.random.RandomState(0)
    X_dense = rng.randn(4, 5)
    X_sparse_unpruned = sparse.csr_matrix(X_dense)
    # set the row number 3 to zero
    X_dense[3, :] = 0.0
    # set the row number 3 to zero without pruning (can happen in real life)
    indptr_3 = X_sparse_unpruned.indptr[3]
    indptr_4 = X_sparse_unpruned.indptr[4]
    X_sparse_unpruned.data[indptr_3:indptr_4] = 0.0
    # build the pruned variant using the regular constructor
    X_sparse_pruned = sparse.csr_matrix(X_dense)
    # check inputs that support the no-copy optim
    for X in (X_dense, X_sparse_pruned, X_sparse_unpruned):
        normalizer = Normalizer(norm='l2', copy=True)
        X_norm1 = normalizer.transform(X)
        assert_true(X_norm1 is not X)
        X_norm1 = toarray(X_norm1)
        normalizer = Normalizer(norm='l2', copy=False)
        X_norm2 = normalizer.transform(X)
        assert_true(X_norm2 is X)
        X_norm2 = toarray(X_norm2)
        for X_norm in (X_norm1, X_norm2):
            for i in range(3):
                assert_almost_equal(la.norm(X_norm[i]), 1.0)
            assert_almost_equal(la.norm(X_norm[3]), 0.0)
    # check input for which copy=False won't prevent a copy
    for init in (sparse.coo_matrix, sparse.csc_matrix, sparse.lil_matrix):
        X = init(X_dense)
        X_norm = normalizer = Normalizer(norm='l2', copy=False).transform(X)
        assert_true(X_norm is not X)
        assert_true(isinstance(X_norm, sparse.csr_matrix))
        X_norm = toarray(X_norm)
        for i in range(3):
            assert_almost_equal(la.norm(X_norm[i]), 1.0)
        assert_almost_equal(la.norm(X_norm[3]), 0.0)
def test_normalizer_max():
    rng = np.random.RandomState(0)
    X_dense = rng.randn(4, 5)
    X_sparse_unpruned = sparse.csr_matrix(X_dense)
    # set the row number 3 to zero
    X_dense[3, :] = 0.0
    # set the row number 3 to zero without pruning (can happen in real life)
    indptr_3 = X_sparse_unpruned.indptr[3]
    indptr_4 = X_sparse_unpruned.indptr[4]
    X_sparse_unpruned.data[indptr_3:indptr_4] = 0.0
    # build the pruned variant using the regular constructor
    X_sparse_pruned = sparse.csr_matrix(X_dense)
    # check inputs that support the no-copy optim
    for X in (X_dense, X_sparse_pruned, X_sparse_unpruned):
        normalizer = Normalizer(norm='max', copy=True)
        X_norm1 = normalizer.transform(X)
        assert_true(X_norm1 is not X)
        X_norm1 = toarray(X_norm1)
        normalizer = Normalizer(norm='max', copy=False)
        X_norm2 = normalizer.transform(X)
        assert_true(X_norm2 is X)
        X_norm2 = toarray(X_norm2)
        for X_norm in (X_norm1, X_norm2):
            row_maxs = X_norm.max(axis=1)
            for i in range(3):
                assert_almost_equal(row_maxs[i], 1.0)
            assert_almost_equal(row_maxs[3], 0.0)
    # check input for which copy=False won't prevent a copy
    for init in (sparse.coo_matrix, sparse.csc_matrix, sparse.lil_matrix):
        X = init(X_dense)
        X_norm = normalizer = Normalizer(norm='l2', copy=False).transform(X)
        assert_true(X_norm is not X)
        assert_true(isinstance(X_norm, sparse.csr_matrix))
        X_norm = toarray(X_norm)
        for i in range(3):
            assert_almost_equal(row_maxs[i], 1.0)
        assert_almost_equal(la.norm(X_norm[3]), 0.0)
def test_normalize():
    # Test normalize function
    # Only tests functionality not used by the tests for Normalizer.
    X = np.random.RandomState(37).randn(3, 2)
    assert_array_equal(normalize(X, copy=False),
                       normalize(X.T, axis=0, copy=False).T)
    assert_raises(ValueError, normalize, [[0]], axis=2)
    assert_raises(ValueError, normalize, [[0]], norm='l3')
def test_binarizer():
    X_ = np.array([[1, 0, 5], [2, 3, -1]])
    for init in (np.array, list, sparse.csr_matrix, sparse.csc_matrix):
        X = init(X_.copy())
        binarizer = Binarizer(threshold=2.0, copy=True)
        X_bin = toarray(binarizer.transform(X))
        assert_equal(np.sum(X_bin == 0), 4)
        assert_equal(np.sum(X_bin == 1), 2)
        X_bin = binarizer.transform(X)
        assert_equal(sparse.issparse(X), sparse.issparse(X_bin))
        binarizer = Binarizer(copy=True).fit(X)
        X_bin = toarray(binarizer.transform(X))
        assert_true(X_bin is not X)
        assert_equal(np.sum(X_bin == 0), 2)
        assert_equal(np.sum(X_bin == 1), 4)
        binarizer = Binarizer(copy=True)
        X_bin = binarizer.transform(X)
        assert_true(X_bin is not X)
        X_bin = toarray(X_bin)
        assert_equal(np.sum(X_bin == 0), 2)
        assert_equal(np.sum(X_bin == 1), 4)
        binarizer = Binarizer(copy=False)
        X_bin = binarizer.transform(X)
        if init is not list:
            assert_true(X_bin is X)
        X_bin = toarray(X_bin)
        assert_equal(np.sum(X_bin == 0), 2)
        assert_equal(np.sum(X_bin == 1), 4)
    binarizer = Binarizer(threshold=-0.5, copy=True)
    for init in (np.array, list):
        X = init(X_.copy())
        X_bin = toarray(binarizer.transform(X))
        assert_equal(np.sum(X_bin == 0), 1)
        assert_equal(np.sum(X_bin == 1), 5)
        X_bin = binarizer.transform(X)
    # Cannot use threshold < 0 for sparse
    assert_raises(ValueError, binarizer.transform, sparse.csc_matrix(X))
def test_center_kernel():
    # Test that KernelCenterer is equivalent to StandardScaler
       # in feature space
    rng = np.random.RandomState(0)
    X_fit = rng.random_sample((5, 4))
    scaler = StandardScaler(with_std=False)
    scaler.fit(X_fit)
    X_fit_centered = scaler.transform(X_fit)
    K_fit = np.dot(X_fit, X_fit.T)
    # center fit time matrix
    centerer = KernelCenterer()
    K_fit_centered = np.dot(X_fit_centered, X_fit_centered.T)
    K_fit_centered2 = centerer.fit_transform(K_fit)
    assert_array_almost_equal(K_fit_centered, K_fit_centered2)
    # center predict time matrix
    X_pred = rng.random_sample((2, 4))
    K_pred = np.dot(X_pred, X_fit.T)
    X_pred_centered = scaler.transform(X_pred)
    K_pred_centered = np.dot(X_pred_centered, X_fit_centered.T)
    K_pred_centered2 = centerer.transform(K_pred)
    assert_array_almost_equal(K_pred_centered, K_pred_centered2)
def test_fit_transform():
    rng = np.random.RandomState(0)
    X = rng.random_sample((5, 4))
    for obj in ((StandardScaler(), Normalizer(), Binarizer())):
        X_transformed = obj.fit(X).transform(X)
        X_transformed2 = obj.fit_transform(X)
        assert_array_equal(X_transformed, X_transformed2)
def test_add_dummy_feature():
    X = [[1, 0], [0, 1], [0, 1]]
    X = add_dummy_feature(X)
    assert_array_equal(X, [[1, 1, 0], [1, 0, 1], [1, 0, 1]])
def test_add_dummy_feature_coo():
    X = sparse.coo_matrix([[1, 0], [0, 1], [0, 1]])
    X = add_dummy_feature(X)
    assert_true(sparse.isspmatrix_coo(X), X)
    assert_array_equal(X.toarray(), [[1, 1, 0], [1, 0, 1], [1, 0, 1]])
def test_add_dummy_feature_csc():
    X = sparse.csc_matrix([[1, 0], [0, 1], [0, 1]])
    X = add_dummy_feature(X)
    assert_true(sparse.isspmatrix_csc(X), X)
    assert_array_equal(X.toarray(), [[1, 1, 0], [1, 0, 1], [1, 0, 1]])
def test_add_dummy_feature_csr():
    X = sparse.csr_matrix([[1, 0], [0, 1], [0, 1]])
    X = add_dummy_feature(X)
    assert_true(sparse.isspmatrix_csr(X), X)
    assert_array_equal(X.toarray(), [[1, 1, 0], [1, 0, 1], [1, 0, 1]])
def test_one_hot_encoder_sparse():
    # Test OneHotEncoder's fit and transform.
    X = [[3, 2, 1], [0, 1, 1]]
    enc = OneHotEncoder()
    # discover max values automatically
    X_trans = enc.fit_transform(X).toarray()
    assert_equal(X_trans.shape, (2, 5))
    assert_array_equal(enc.active_features_,
                       np.where([1, 0, 0, 1, 0, 1, 1, 0, 1])[0])
    assert_array_equal(enc.feature_indices_, [0, 4, 7, 9])
    # check outcome
    assert_array_equal(X_trans,
                       [[0., 1., 0., 1., 1.],
                        [1., 0., 1., 0., 1.]])
    # max value given as 3
    enc = OneHotEncoder(n_values=4)
    X_trans = enc.fit_transform(X)
    assert_equal(X_trans.shape, (2, 4 * 3))
    assert_array_equal(enc.feature_indices_, [0, 4, 8, 12])
    # max value given per feature
    enc = OneHotEncoder(n_values=[3, 2, 2])
    X = [[1, 0, 1], [0, 1, 1]]
    X_trans = enc.fit_transform(X)
    assert_equal(X_trans.shape, (2, 3 + 2 + 2))
    assert_array_equal(enc.n_values_, [3, 2, 2])
    # check that testing with larger feature works:
    X = np.array([[2, 0, 1], [0, 1, 1]])
    enc.transform(X)
    # test that an error is raised when out of bounds:
    X_too_large = [[0, 2, 1], [0, 1, 1]]
    assert_raises(ValueError, enc.transform, X_too_large)
    assert_raises(ValueError, OneHotEncoder(n_values=2).fit_transform, X)
    # test that error is raised when wrong number of features
    assert_raises(ValueError, enc.transform, X[:, :-1])
    # test that error is raised when wrong number of features in fit
    # with prespecified n_values
    assert_raises(ValueError, enc.fit, X[:, :-1])
    # test exception on wrong init param
    assert_raises(TypeError, OneHotEncoder(n_values=np.int).fit, X)
    enc = OneHotEncoder()
    # test negative input to fit
    assert_raises(ValueError, enc.fit, [[0], [-1]])
    # test negative input to transform
    enc.fit([[0], [1]])
    assert_raises(ValueError, enc.transform, [[0], [-1]])
def test_one_hot_encoder_dense():
    # check for sparse=False
    X = [[3, 2, 1], [0, 1, 1]]
    enc = OneHotEncoder(sparse=False)
    # discover max values automatically
    X_trans = enc.fit_transform(X)
    assert_equal(X_trans.shape, (2, 5))
    assert_array_equal(enc.active_features_,
                       np.where([1, 0, 0, 1, 0, 1, 1, 0, 1])[0])
    assert_array_equal(enc.feature_indices_, [0, 4, 7, 9])
    # check outcome
    assert_array_equal(X_trans,
                       np.array([[0., 1., 0., 1., 1.],
                                 [1., 0., 1., 0., 1.]]))
def _check_transform_selected(X, X_expected, sel):
    for M in (X, sparse.csr_matrix(X)):
        Xtr = _transform_selected(M, Binarizer().transform, sel)
        assert_array_equal(toarray(Xtr), X_expected)
def test_transform_selected():
    X = [[3, 2, 1], [0, 1, 1]]
    X_expected = [[1, 2, 1], [0, 1, 1]]
    _check_transform_selected(X, X_expected, [0])
    _check_transform_selected(X, X_expected, [True, False, False])
    X_expected = [[1, 1, 1], [0, 1, 1]]
    _check_transform_selected(X, X_expected, [0, 1, 2])
    _check_transform_selected(X, X_expected, [True, True, True])
    _check_transform_selected(X, X_expected, "all")
    _check_transform_selected(X, X, [])
    _check_transform_selected(X, X, [False, False, False])
def _run_one_hot(X, X2, cat):
    enc = OneHotEncoder(categorical_features=cat)
    Xtr = enc.fit_transform(X)
    X2tr = enc.transform(X2)
    return Xtr, X2tr
def _check_one_hot(X, X2, cat, n_features):
    ind = np.where(cat)[0]
    # With mask
    A, B = _run_one_hot(X, X2, cat)
    # With indices
    C, D = _run_one_hot(X, X2, ind)
    # Check shape
    assert_equal(A.shape, (2, n_features))
    assert_equal(B.shape, (1, n_features))
    assert_equal(C.shape, (2, n_features))
    assert_equal(D.shape, (1, n_features))
    # Check that mask and indices give the same results
    assert_array_equal(toarray(A), toarray(C))
    assert_array_equal(toarray(B), toarray(D))
def test_one_hot_encoder_categorical_features():
    X = np.array([[3, 2, 1], [0, 1, 1]])
    X2 = np.array([[1, 1, 1]])
    cat = [True, False, False]
    _check_one_hot(X, X2, cat, 4)
    # Edge case: all non-categorical
    cat = [False, False, False]
    _check_one_hot(X, X2, cat, 3)
    # Edge case: all categorical
    cat = [True, True, True]
    _check_one_hot(X, X2, cat, 5)
def test_one_hot_encoder_unknown_transform():
    X = np.array([[0, 2, 1], [1, 0, 3], [1, 0, 2]])
    y = np.array([[4, 1, 1]])
    # Test that one hot encoder raises error for unknown features
    # present during transform.
    oh = OneHotEncoder(handle_unknown='error')
    oh.fit(X)
    assert_raises(ValueError, oh.transform, y)
    # Test the ignore option, ignores unknown features.
    oh = OneHotEncoder(handle_unknown='ignore')
    oh.fit(X)
    assert_array_equal(
        oh.transform(y).toarray(),
        np.array([[0.,  0.,  0.,  0.,  1.,  0.,  0.]])
        )
    # Raise error if handle_unknown is neither ignore or error.
    oh = OneHotEncoder(handle_unknown='42')
    oh.fit(X)
    assert_raises(ValueError, oh.transform, y)
 | 
	bsd-3-clause | 
| 
	UNR-AERIAL/scikit-learn | 
	examples/linear_model/plot_iris_logistic.py | 
	283 | 
	1678 | 
	#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Logistic Regression 3-class Classifier
=========================================================
Show below is a logistic-regression classifiers decision boundaries on the
`iris <http://en.wikipedia.org/wiki/Iris_flower_data_set>`_ dataset. The
datapoints are colored according to their labels.
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model, datasets
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2]  # we only take the first two features.
Y = iris.target
h = .02  # step size in the mesh
logreg = linear_model.LogisticRegression(C=1e5)
# we create an instance of Neighbours Classifier and fit the data.
logreg.fit(X, Y)
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5
y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
Z = logreg.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.figure(1, figsize=(4, 3))
plt.pcolormesh(xx, yy, Z, cmap=plt.cm.Paired)
# Plot also the training points
plt.scatter(X[:, 0], X[:, 1], c=Y, edgecolors='k', cmap=plt.cm.Paired)
plt.xlabel('Sepal length')
plt.ylabel('Sepal width')
plt.xlim(xx.min(), xx.max())
plt.ylim(yy.min(), yy.max())
plt.xticks(())
plt.yticks(())
plt.show()
 | 
	bsd-3-clause | 
| 
	saketkc/statsmodels | 
	examples/incomplete/dates.py | 
	29 | 
	1251 | 
	"""
Using dates with timeseries models
"""
import statsmodels.api as sm
import pandas as pd
# Getting started
# ---------------
data = sm.datasets.sunspots.load()
# Right now an annual date series must be datetimes at the end of the year.
dates = sm.tsa.datetools.dates_from_range('1700', length=len(data.endog))
# Using Pandas
# ------------
# Make a pandas TimeSeries or DataFrame
endog = pd.TimeSeries(data.endog, index=dates)
# and instantiate the model
ar_model = sm.tsa.AR(endog, freq='A')
pandas_ar_res = ar_model.fit(maxlag=9, method='mle', disp=-1)
# Let's do some out-of-sample prediction
pred = pandas_ar_res.predict(start='2005', end='2015')
print(pred)
# Using explicit dates
# --------------------
ar_model = sm.tsa.AR(data.endog, dates=dates, freq='A')
ar_res = ar_model.fit(maxlag=9, method='mle', disp=-1)
pred = ar_res.predict(start='2005', end='2015')
print(pred)
# This just returns a regular array, but since the model has date information
# attached, you can get the prediction dates in a roundabout way.
print(ar_res.data.predict_dates)
# This attribute only exists if predict has been called. It holds the dates
# associated with the last call to predict.
#..TODO: should this be attached to the results instance?
 | 
	bsd-3-clause | 
| 
	YoungKwonJo/mlxtend | 
	tests/tests_evaluate/test_learning_curves.py | 
	1 | 
	2212 | 
	from mlxtend.evaluate import plot_learning_curves
from sklearn import datasets
from sklearn.cross_validation import train_test_split
from sklearn.tree import DecisionTreeClassifier
import numpy as np
def test_training_size():
    iris = datasets.load_iris()
    X = iris.data
    y = iris.target
    X_train, X_test, y_train, y_test = train_test_split(X, y, train_size=0.6, random_state=2)
    clf = DecisionTreeClassifier(max_depth=1, random_state=1)
    training_errors, test_errors = plot_learning_curves(X_train, y_train, X_test, y_test, clf, kind='training_size', suppress_plot=True)
    desired1 = [0.32, 0.33, 0.32, 0.33, 0.30, 0.31, 0.31, 0.22, 0.22, 0.22]
    desired2 = [0.35, 0.35, 0.35, 0.35, 0.43, 0.45, 0.35, 0.35, 0.45, 0.45]
    np.testing.assert_almost_equal(training_errors, desired1, decimal=2)
    np.testing.assert_almost_equal(test_errors, desired2, decimal=2)
def test_scikit_metrics():
    iris = datasets.load_iris()
    X = iris.data
    y = iris.target
    X_train, X_test, y_train, y_test = train_test_split(X, y, train_size=0.6, random_state=2)
    clf = DecisionTreeClassifier(max_depth=1, random_state=1)
    training_errors, test_errors = plot_learning_curves(X_train, y_train, X_test, y_test, clf, kind='training_size', suppress_plot=True, scoring='accuracy')
    desired1 = [0.68, 0.67, 0.68, 0.67, 0.7, 0.69, 0.69, 0.78, 0.78, 0.78]
    desired2 = [0.65, 0.65, 0.65, 0.65, 0.57, 0.55, 0.65, 0.65, 0.55, 0.55]
    np.testing.assert_almost_equal(training_errors, desired1, decimal=2)
    np.testing.assert_almost_equal(test_errors, desired2, decimal=2)
def test_n_features():
    iris = datasets.load_iris()
    X = iris.data
    y = iris.target
    X_train, X_test, y_train, y_test = train_test_split(X, y, train_size=0.6, random_state=2)
    clf = DecisionTreeClassifier(max_depth=1, random_state=1)
    training_errors, test_errors = plot_learning_curves(X_train, y_train, X_test, y_test, clf, kind='n_features', suppress_plot=True)
    desired1 = [0.40, 0.40, 0.32, 0.32]
    desired2 = [0.42, 0.42, 0.35, 0.35]
    np.testing.assert_almost_equal(training_errors, desired1, decimal=2)
    np.testing.assert_almost_equal(test_errors, desired2, decimal=2) | 
	bsd-3-clause | 
| 
	bigdataelephants/scikit-learn | 
	examples/manifold/plot_swissroll.py | 
	330 | 
	1446 | 
	"""
===================================
Swiss Roll reduction with LLE
===================================
An illustration of Swiss Roll reduction
with locally linear embedding
"""
# Author: Fabian Pedregosa -- <[email protected]>
# License: BSD 3 clause (C) INRIA 2011
print(__doc__)
import matplotlib.pyplot as plt
# This import is needed to modify the way figure behaves
from mpl_toolkits.mplot3d import Axes3D
Axes3D
#----------------------------------------------------------------------
# Locally linear embedding of the swiss roll
from sklearn import manifold, datasets
X, color = datasets.samples_generator.make_swiss_roll(n_samples=1500)
print("Computing LLE embedding")
X_r, err = manifold.locally_linear_embedding(X, n_neighbors=12,
                                             n_components=2)
print("Done. Reconstruction error: %g" % err)
#----------------------------------------------------------------------
# Plot result
fig = plt.figure()
try:
    # compatibility matplotlib < 1.0
    ax = fig.add_subplot(211, projection='3d')
    ax.scatter(X[:, 0], X[:, 1], X[:, 2], c=color, cmap=plt.cm.Spectral)
except:
    ax = fig.add_subplot(211)
    ax.scatter(X[:, 0], X[:, 2], c=color, cmap=plt.cm.Spectral)
ax.set_title("Original data")
ax = fig.add_subplot(212)
ax.scatter(X_r[:, 0], X_r[:, 1], c=color, cmap=plt.cm.Spectral)
plt.axis('tight')
plt.xticks([]), plt.yticks([])
plt.title('Projected data')
plt.show()
 | 
	bsd-3-clause | 
| 
	wilselby/diy_driverless_car_ROS | 
	rover_cv/camera_cal/src/camera_cal/camera_cal.py | 
	1 | 
	6503 | 
	#!/usr/bin/env python
# -*- coding: utf-8 -*-
#https://github.com/paramaggarwal/CarND-Advanced-Lane-Lines/blob/master/Notebook.ipynb
from __future__ import print_function
from __future__ import division
import sys
import traceback
import rospy
import numpy as np
import cv2
import pickle
import glob
import time
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
from std_msgs.msg import String
from sensor_msgs.msg import Image
from cv_bridge import CvBridge, CvBridgeError
class camera_calibarion(object):
    def __init__(self):
            
      """ROS Subscriptions """
      self.image_pub = rospy.Publisher("/camera_calibation/image_corrected",Image, queue_size=10)
      self.image_sub = rospy.Subscriber("/cam/camera_/image_raw",Image,self.cvt_image)
      """ Variables """
      self.bridge = CvBridge()
      self.latestImage = None
      self.outputImage = None
      self.process = False
      self.calibrated = False
      self.correctedImage = None
      self.mtx = None
      self.dist = None
    def cvt_image(self,data):  
      try:
        self.latestImage = self.bridge.imgmsg_to_cv2(data, "bgr8")	
      except CvBridgeError as e:
        print(e)
      if self.process != True:
          self.process = True    
      
    def camera_cal(self, image):
        
        # termination criteria
        criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001)
        
        nx = 8
        ny = 6
        
        dst = np.copy(image) 
        
        # prepare object points, like (0,0,0), (1,0,0), (2,0,0) ....,(6,5,0)
        objp = np.zeros((ny * nx, 3), np.float32)
        objp[:,:2] = np.mgrid[0:nx, 0:ny].T.reshape(-1,2)
        # Arrays to store object points and image points from all the images.
        objpoints = [] # 3d points in real world space
        imgpoints = [] # 2d points in image plane.
        
        # Search for chessboard corners
        grey = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
        
        #ret_thresh,  mask = cv2.threshold(grey, 30, 255, cv2.THRESH_BINARY)
        
        ret, corners = cv2.findChessboardCorners(image, (nx, ny), None)  #flags=(cv2.cv.CV_CALIB_CB_ADAPTIVE_THRESH + cv2.cv.CV_CALIB_CB_FILTER_QUADS))        
        
        # If found, add object points, image points
        if ret == True:
            objpoints.append(objp)           
            cv2.cornerSubPix(grey,corners, (11,11), (-1,-1), criteria)
            imgpoints.append(corners)
            self.calibrated = True
            print ("FOUND!")
            
            #Draw and display the corners
            cv2.drawChessboardCorners(image, (nx, ny), corners, ret)  
            
            # Do camera calibration given object points and image points
            ret, self.mtx, self.dist, rvecs, tvecs = cv2.calibrateCamera(objpoints, imgpoints, grey.shape[::-1], None, None)        
        
            # Save the camera calibration result for later use (we won't worry about rvecs / tvecs)
            dist_pickle = {}
            dist_pickle["mtx"] = self.mtx
            dist_pickle["dist"] = self.dist
            dist_pickle['objpoints'] = objpoints
            dist_pickle['imgpoints'] = imgpoints
            pickle.dump( dist_pickle, open( "/home/wil/ros/catkin_ws/src/av_sim/computer_vision/camera_calibration/data/camera_cal_pickle.p", "wb" ) )
         #else:
             #print("Searching...")
             
        return image
    def drawQuad(self, image, points, color=[255, 0, 0], thickness=4):
        p1, p2, p3, p4 = points
        cv2.line(image, tuple(p1), tuple(p2), color, thickness)
        cv2.line(image, tuple(p2), tuple(p3), color, thickness)
        cv2.line(image, tuple(p3), tuple(p4), color, thickness)
        cv2.line(image, tuple(p4), tuple(p1), color, thickness)
    
    def perspective_transform(self,  image, debug=True, size_top=70, size_bottom=370):
        height, width = image.shape[0:2]
        output_size = height/2
        #src = np.float32([[(width/2) - size_top, height*0.65], [(width/2) + size_top, height*0.65], [(width/2) + size_bottom, height-50], [(width/2) - size_bottom, height-50]])
        src = np.float32([[512, 450], [675, 454], [707, 560], [347, 568]])
        dst = np.float32([[347, height], [707, height], [707, 0], [347, 0]])
        #dst = np.float32([[(width/2) - output_size, (height/2) - output_size], [(width/2) + output_size, (height/2) - output_size], [(width/2) + output_size, (height/2) + output_size], [(width/2) - output_size, (height/2) + output_size]])
        
        M = cv2.getPerspectiveTransform(src, dst)
        print(M)
        warped = cv2.warpPerspective(image, M, (width, height), flags=cv2.INTER_LINEAR)
        
        if debug:
            self.drawQuad(image, src, [255, 0, 0])
            self.drawQuad(image, dst, [255, 255, 0])
            plt.imshow(image)
            plt.show()
            
        return warped
    def undistort_image(self, image):
      
        return cv2.undistort(image, self.mtx, self.dist, None, self.mtx)
    def run(self):
                
         while True:
             
             # Only run loop if we have an image
             if self.process:                 
                 
                 filename = "/home/wil/ros/catkin_ws/src/av_sim/computer_vision/camera_calibration/data/check_test.png"	
                 image = cv2.imread(filename, flags=cv2.IMREAD_COLOR)
                 
                 if self.calibrated is not True:
                     #print("Calibrating...")
                     cornersImage = self.camera_cal(image)
                     cvImage = cornersImage
                     
                 else:
                     correctedImage = self.undistort_image(self.latestImage)	# Distortion Correction Function
                     transformImage = self.perspective_transform(self.latestImage)
                     cvImage = transformImage
                     
                 # Publish Undistorted Image            
                 try:
                     imgmsg = self.bridge.cv2_to_imgmsg(cvImage, "bgr8") #"mono8" "bgr8"
                     self.image_pub.publish(imgmsg)
                 except CvBridgeError as e:
                     print(e)
def main(args):
  rospy.init_node('camera_calibarion', anonymous=True)
  cc = camera_calibarion() 
  cc.run() 
  try:
    rospy.spin()
  except KeyboardInterrupt:
    print("Shutting down")
  cv2.destroyAllWindows()
if __name__ == '__main__':
    main(sys.argv)
 | 
	bsd-2-clause | 
| 
	AnasGhrab/scikit-learn | 
	sklearn/decomposition/pca.py | 
	192 | 
	23117 | 
	""" Principal Component Analysis
"""
# Author: Alexandre Gramfort <[email protected]>
#         Olivier Grisel <[email protected]>
#         Mathieu Blondel <[email protected]>
#         Denis A. Engemann <[email protected]>
#         Michael Eickenberg <[email protected]>
#
# License: BSD 3 clause
from math import log, sqrt
import numpy as np
from scipy import linalg
from scipy.special import gammaln
from ..base import BaseEstimator, TransformerMixin
from ..utils import check_random_state, as_float_array
from ..utils import check_array
from ..utils.extmath import fast_dot, fast_logdet, randomized_svd
from ..utils.validation import check_is_fitted
def _assess_dimension_(spectrum, rank, n_samples, n_features):
    """Compute the likelihood of a rank ``rank`` dataset
    The dataset is assumed to be embedded in gaussian noise of shape(n,
    dimf) having spectrum ``spectrum``.
    Parameters
    ----------
    spectrum: array of shape (n)
        Data spectrum.
    rank: int
        Tested rank value.
    n_samples: int
        Number of samples.
    n_features: int
        Number of features.
    Returns
    -------
    ll: float,
        The log-likelihood
    Notes
    -----
    This implements the method of `Thomas P. Minka:
    Automatic Choice of Dimensionality for PCA. NIPS 2000: 598-604`
    """
    if rank > len(spectrum):
        raise ValueError("The tested rank cannot exceed the rank of the"
                         " dataset")
    pu = -rank * log(2.)
    for i in range(rank):
        pu += (gammaln((n_features - i) / 2.)
               - log(np.pi) * (n_features - i) / 2.)
    pl = np.sum(np.log(spectrum[:rank]))
    pl = -pl * n_samples / 2.
    if rank == n_features:
        pv = 0
        v = 1
    else:
        v = np.sum(spectrum[rank:]) / (n_features - rank)
        pv = -np.log(v) * n_samples * (n_features - rank) / 2.
    m = n_features * rank - rank * (rank + 1.) / 2.
    pp = log(2. * np.pi) * (m + rank + 1.) / 2.
    pa = 0.
    spectrum_ = spectrum.copy()
    spectrum_[rank:n_features] = v
    for i in range(rank):
        for j in range(i + 1, len(spectrum)):
            pa += log((spectrum[i] - spectrum[j]) *
                      (1. / spectrum_[j] - 1. / spectrum_[i])) + log(n_samples)
    ll = pu + pl + pv + pp - pa / 2. - rank * log(n_samples) / 2.
    return ll
def _infer_dimension_(spectrum, n_samples, n_features):
    """Infers the dimension of a dataset of shape (n_samples, n_features)
    The dataset is described by its spectrum `spectrum`.
    """
    n_spectrum = len(spectrum)
    ll = np.empty(n_spectrum)
    for rank in range(n_spectrum):
        ll[rank] = _assess_dimension_(spectrum, rank, n_samples, n_features)
    return ll.argmax()
class PCA(BaseEstimator, TransformerMixin):
    """Principal component analysis (PCA)
    Linear dimensionality reduction using Singular Value Decomposition of the
    data and keeping only the most significant singular vectors to project the
    data to a lower dimensional space.
    This implementation uses the scipy.linalg implementation of the singular
    value decomposition. It only works for dense arrays and is not scalable to
    large dimensional data.
    The time complexity of this implementation is ``O(n ** 3)`` assuming
    n ~ n_samples ~ n_features.
    Read more in the :ref:`User Guide <PCA>`.
    Parameters
    ----------
    n_components : int, None or string
        Number of components to keep.
        if n_components is not set all components are kept::
            n_components == min(n_samples, n_features)
        if n_components == 'mle', Minka\'s MLE is used to guess the dimension
        if ``0 < n_components < 1``, select the number of components such that
        the amount of variance that needs to be explained is greater than the
        percentage specified by n_components
    copy : bool
        If False, data passed to fit are overwritten and running
        fit(X).transform(X) will not yield the expected results,
        use fit_transform(X) instead.
    whiten : bool, optional
        When True (False by default) the `components_` vectors are divided
        by n_samples times singular values to ensure uncorrelated outputs
        with unit component-wise variances.
        Whitening will remove some information from the transformed signal
        (the relative variance scales of the components) but can sometime
        improve the predictive accuracy of the downstream estimators by
        making there data respect some hard-wired assumptions.
    Attributes
    ----------
    components_ : array, [n_components, n_features]
        Principal axes in feature space, representing the directions of
        maximum variance in the data.
    explained_variance_ratio_ : array, [n_components]
        Percentage of variance explained by each of the selected components.
        If ``n_components`` is not set then all components are stored and the
        sum of explained variances is equal to 1.0
    mean_ : array, [n_features]
        Per-feature empirical mean, estimated from the training set.
    n_components_ : int
        The estimated number of components. Relevant when n_components is set
        to 'mle' or a number between 0 and 1 to select using explained
        variance.
    noise_variance_ : float
        The estimated noise covariance following the Probabilistic PCA model
        from Tipping and Bishop 1999. See "Pattern Recognition and
        Machine Learning" by C. Bishop, 12.2.1 p. 574 or
        http://www.miketipping.com/papers/met-mppca.pdf. It is required to
        computed the estimated data covariance and score samples.
    Notes
    -----
    For n_components='mle', this class uses the method of `Thomas P. Minka:
    Automatic Choice of Dimensionality for PCA. NIPS 2000: 598-604`
    Implements the probabilistic PCA model from:
    M. Tipping and C. Bishop, Probabilistic Principal Component Analysis,
    Journal of the Royal Statistical Society, Series B, 61, Part 3, pp. 611-622
    via the score and score_samples methods.
    See http://www.miketipping.com/papers/met-mppca.pdf
    Due to implementation subtleties of the Singular Value Decomposition (SVD),
    which is used in this implementation, running fit twice on the same matrix
    can lead to principal components with signs flipped (change in direction).
    For this reason, it is important to always use the same estimator object to
    transform data in a consistent fashion.
    Examples
    --------
    >>> import numpy as np
    >>> from sklearn.decomposition import PCA
    >>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
    >>> pca = PCA(n_components=2)
    >>> pca.fit(X)
    PCA(copy=True, n_components=2, whiten=False)
    >>> print(pca.explained_variance_ratio_) # doctest: +ELLIPSIS
    [ 0.99244...  0.00755...]
    See also
    --------
    RandomizedPCA
    KernelPCA
    SparsePCA
    TruncatedSVD
    """
    def __init__(self, n_components=None, copy=True, whiten=False):
        self.n_components = n_components
        self.copy = copy
        self.whiten = whiten
    def fit(self, X, y=None):
        """Fit the model with X.
        Parameters
        ----------
        X: array-like, shape (n_samples, n_features)
            Training data, where n_samples in the number of samples
            and n_features is the number of features.
        Returns
        -------
        self : object
            Returns the instance itself.
        """
        self._fit(X)
        return self
    def fit_transform(self, X, y=None):
        """Fit the model with X and apply the dimensionality reduction on X.
        Parameters
        ----------
        X : array-like, shape (n_samples, n_features)
            Training data, where n_samples is the number of samples
            and n_features is the number of features.
        Returns
        -------
        X_new : array-like, shape (n_samples, n_components)
        """
        U, S, V = self._fit(X)
        U = U[:, :self.n_components_]
        if self.whiten:
            # X_new = X * V / S * sqrt(n_samples) = U * sqrt(n_samples)
            U *= sqrt(X.shape[0])
        else:
            # X_new = X * V = U * S * V^T * V = U * S
            U *= S[:self.n_components_]
        return U
    def _fit(self, X):
        """Fit the model on X
        Parameters
        ----------
        X: array-like, shape (n_samples, n_features)
            Training vector, where n_samples in the number of samples and
            n_features is the number of features.
        Returns
        -------
        U, s, V : ndarrays
            The SVD of the input data, copied and centered when
            requested.
        """
        X = check_array(X)
        n_samples, n_features = X.shape
        X = as_float_array(X, copy=self.copy)
        # Center data
        self.mean_ = np.mean(X, axis=0)
        X -= self.mean_
        U, S, V = linalg.svd(X, full_matrices=False)
        explained_variance_ = (S ** 2) / n_samples
        explained_variance_ratio_ = (explained_variance_ /
                                     explained_variance_.sum())
        components_ = V
        n_components = self.n_components
        if n_components is None:
            n_components = n_features
        elif n_components == 'mle':
            if n_samples < n_features:
                raise ValueError("n_components='mle' is only supported "
                                 "if n_samples >= n_features")
            n_components = _infer_dimension_(explained_variance_,
                                             n_samples, n_features)
        elif not 0 <= n_components <= n_features:
            raise ValueError("n_components=%r invalid for n_features=%d"
                             % (n_components, n_features))
        if 0 < n_components < 1.0:
            # number of components for which the cumulated explained variance
            # percentage is superior to the desired threshold
            ratio_cumsum = explained_variance_ratio_.cumsum()
            n_components = np.sum(ratio_cumsum < n_components) + 1
        # Compute noise covariance using Probabilistic PCA model
        # The sigma2 maximum likelihood (cf. eq. 12.46)
        if n_components < n_features:
            self.noise_variance_ = explained_variance_[n_components:].mean()
        else:
            self.noise_variance_ = 0.
        # store n_samples to revert whitening when getting covariance
        self.n_samples_ = n_samples
        self.components_ = components_[:n_components]
        self.explained_variance_ = explained_variance_[:n_components]
        explained_variance_ratio_ = explained_variance_ratio_[:n_components]
        self.explained_variance_ratio_ = explained_variance_ratio_
        self.n_components_ = n_components
        return (U, S, V)
    def get_covariance(self):
        """Compute data covariance with the generative model.
        ``cov = components_.T * S**2 * components_ + sigma2 * eye(n_features)``
        where  S**2 contains the explained variances.
        Returns
        -------
        cov : array, shape=(n_features, n_features)
            Estimated covariance of data.
        """
        components_ = self.components_
        exp_var = self.explained_variance_
        if self.whiten:
            components_ = components_ * np.sqrt(exp_var[:, np.newaxis])
        exp_var_diff = np.maximum(exp_var - self.noise_variance_, 0.)
        cov = np.dot(components_.T * exp_var_diff, components_)
        cov.flat[::len(cov) + 1] += self.noise_variance_  # modify diag inplace
        return cov
    def get_precision(self):
        """Compute data precision matrix with the generative model.
        Equals the inverse of the covariance but computed with
        the matrix inversion lemma for efficiency.
        Returns
        -------
        precision : array, shape=(n_features, n_features)
            Estimated precision of data.
        """
        n_features = self.components_.shape[1]
        # handle corner cases first
        if self.n_components_ == 0:
            return np.eye(n_features) / self.noise_variance_
        if self.n_components_ == n_features:
            return linalg.inv(self.get_covariance())
        # Get precision using matrix inversion lemma
        components_ = self.components_
        exp_var = self.explained_variance_
        exp_var_diff = np.maximum(exp_var - self.noise_variance_, 0.)
        precision = np.dot(components_, components_.T) / self.noise_variance_
        precision.flat[::len(precision) + 1] += 1. / exp_var_diff
        precision = np.dot(components_.T,
                           np.dot(linalg.inv(precision), components_))
        precision /= -(self.noise_variance_ ** 2)
        precision.flat[::len(precision) + 1] += 1. / self.noise_variance_
        return precision
    def transform(self, X):
        """Apply the dimensionality reduction on X.
        X is projected on the first principal components previous extracted
        from a training set.
        Parameters
        ----------
        X : array-like, shape (n_samples, n_features)
            New data, where n_samples is the number of samples
            and n_features is the number of features.
        Returns
        -------
        X_new : array-like, shape (n_samples, n_components)
        """
        check_is_fitted(self, 'mean_')
        X = check_array(X)
        if self.mean_ is not None:
            X = X - self.mean_
        X_transformed = fast_dot(X, self.components_.T)
        if self.whiten:
            X_transformed /= np.sqrt(self.explained_variance_)
        return X_transformed
    def inverse_transform(self, X):
        """Transform data back to its original space, i.e.,
        return an input X_original whose transform would be X
        Parameters
        ----------
        X : array-like, shape (n_samples, n_components)
            New data, where n_samples is the number of samples
            and n_components is the number of components.
        Returns
        -------
        X_original array-like, shape (n_samples, n_features)
        """
        check_is_fitted(self, 'mean_')
        if self.whiten:
            return fast_dot(
                X,
                np.sqrt(self.explained_variance_[:, np.newaxis]) *
                self.components_) + self.mean_
        else:
            return fast_dot(X, self.components_) + self.mean_
    def score_samples(self, X):
        """Return the log-likelihood of each sample
        See. "Pattern Recognition and Machine Learning"
        by C. Bishop, 12.2.1 p. 574
        or http://www.miketipping.com/papers/met-mppca.pdf
        Parameters
        ----------
        X: array, shape(n_samples, n_features)
            The data.
        Returns
        -------
        ll: array, shape (n_samples,)
            Log-likelihood of each sample under the current model
        """
        check_is_fitted(self, 'mean_')
        X = check_array(X)
        Xr = X - self.mean_
        n_features = X.shape[1]
        log_like = np.zeros(X.shape[0])
        precision = self.get_precision()
        log_like = -.5 * (Xr * (np.dot(Xr, precision))).sum(axis=1)
        log_like -= .5 * (n_features * log(2. * np.pi)
                          - fast_logdet(precision))
        return log_like
    def score(self, X, y=None):
        """Return the average log-likelihood of all samples
        See. "Pattern Recognition and Machine Learning"
        by C. Bishop, 12.2.1 p. 574
        or http://www.miketipping.com/papers/met-mppca.pdf
        Parameters
        ----------
        X: array, shape(n_samples, n_features)
            The data.
        Returns
        -------
        ll: float
            Average log-likelihood of the samples under the current model
        """
        return np.mean(self.score_samples(X))
class RandomizedPCA(BaseEstimator, TransformerMixin):
    """Principal component analysis (PCA) using randomized SVD
    Linear dimensionality reduction using approximated Singular Value
    Decomposition of the data and keeping only the most significant
    singular vectors to project the data to a lower dimensional space.
    Read more in the :ref:`User Guide <RandomizedPCA>`.
    Parameters
    ----------
    n_components : int, optional
        Maximum number of components to keep. When not given or None, this
        is set to n_features (the second dimension of the training data).
    copy : bool
        If False, data passed to fit are overwritten and running
        fit(X).transform(X) will not yield the expected results,
        use fit_transform(X) instead.
    iterated_power : int, optional
        Number of iterations for the power method. 3 by default.
    whiten : bool, optional
        When True (False by default) the `components_` vectors are divided
        by the singular values to ensure uncorrelated outputs with unit
        component-wise variances.
        Whitening will remove some information from the transformed signal
        (the relative variance scales of the components) but can sometime
        improve the predictive accuracy of the downstream estimators by
        making their data respect some hard-wired assumptions.
    random_state : int or RandomState instance or None (default)
        Pseudo Random Number generator seed control. If None, use the
        numpy.random singleton.
    Attributes
    ----------
    components_ : array, [n_components, n_features]
        Components with maximum variance.
    explained_variance_ratio_ : array, [n_components]
        Percentage of variance explained by each of the selected components. \
        k is not set then all components are stored and the sum of explained \
        variances is equal to 1.0
    mean_ : array, [n_features]
        Per-feature empirical mean, estimated from the training set.
    Examples
    --------
    >>> import numpy as np
    >>> from sklearn.decomposition import RandomizedPCA
    >>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
    >>> pca = RandomizedPCA(n_components=2)
    >>> pca.fit(X)                 # doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
    RandomizedPCA(copy=True, iterated_power=3, n_components=2,
           random_state=None, whiten=False)
    >>> print(pca.explained_variance_ratio_) # doctest: +ELLIPSIS
    [ 0.99244...  0.00755...]
    See also
    --------
    PCA
    TruncatedSVD
    References
    ----------
    .. [Halko2009] `Finding structure with randomness: Stochastic algorithms
      for constructing approximate matrix decompositions Halko, et al., 2009
      (arXiv:909)`
    .. [MRT] `A randomized algorithm for the decomposition of matrices
      Per-Gunnar Martinsson, Vladimir Rokhlin and Mark Tygert`
    """
    def __init__(self, n_components=None, copy=True, iterated_power=3,
                 whiten=False, random_state=None):
        self.n_components = n_components
        self.copy = copy
        self.iterated_power = iterated_power
        self.whiten = whiten
        self.random_state = random_state
    def fit(self, X, y=None):
        """Fit the model with X by extracting the first principal components.
        Parameters
        ----------
        X: array-like, shape (n_samples, n_features)
            Training data, where n_samples in the number of samples
            and n_features is the number of features.
        Returns
        -------
        self : object
            Returns the instance itself.
        """
        self._fit(check_array(X))
        return self
    def _fit(self, X):
        """Fit the model to the data X.
        Parameters
        ----------
        X: array-like, shape (n_samples, n_features)
            Training vector, where n_samples in the number of samples and
            n_features is the number of features.
        Returns
        -------
        X : ndarray, shape (n_samples, n_features)
            The input data, copied, centered and whitened when requested.
        """
        random_state = check_random_state(self.random_state)
        X = np.atleast_2d(as_float_array(X, copy=self.copy))
        n_samples = X.shape[0]
        # Center data
        self.mean_ = np.mean(X, axis=0)
        X -= self.mean_
        if self.n_components is None:
            n_components = X.shape[1]
        else:
            n_components = self.n_components
        U, S, V = randomized_svd(X, n_components,
                                 n_iter=self.iterated_power,
                                 random_state=random_state)
        self.explained_variance_ = exp_var = (S ** 2) / n_samples
        full_var = np.var(X, axis=0).sum()
        self.explained_variance_ratio_ = exp_var / full_var
        if self.whiten:
            self.components_ = V / S[:, np.newaxis] * sqrt(n_samples)
        else:
            self.components_ = V
        return X
    def transform(self, X, y=None):
        """Apply dimensionality reduction on X.
        X is projected on the first principal components previous extracted
        from a training set.
        Parameters
        ----------
        X : array-like, shape (n_samples, n_features)
            New data, where n_samples in the number of samples
            and n_features is the number of features.
        Returns
        -------
        X_new : array-like, shape (n_samples, n_components)
        """
        check_is_fitted(self, 'mean_')
        X = check_array(X)
        if self.mean_ is not None:
            X = X - self.mean_
        X = fast_dot(X, self.components_.T)
        return X
    def fit_transform(self, X, y=None):
        """Fit the model with X and apply the dimensionality reduction on X.
        Parameters
        ----------
        X : array-like, shape (n_samples, n_features)
            New data, where n_samples in the number of samples
            and n_features is the number of features.
        Returns
        -------
        X_new : array-like, shape (n_samples, n_components)
        """
        X = check_array(X)
        X = self._fit(X)
        return fast_dot(X, self.components_.T)
    def inverse_transform(self, X, y=None):
        """Transform data back to its original space.
        Returns an array X_original whose transform would be X.
        Parameters
        ----------
        X : array-like, shape (n_samples, n_components)
            New data, where n_samples in the number of samples
            and n_components is the number of components.
        Returns
        -------
        X_original array-like, shape (n_samples, n_features)
        Notes
        -----
        If whitening is enabled, inverse_transform does not compute the
        exact inverse operation of transform.
        """
        check_is_fitted(self, 'mean_')
        X_original = fast_dot(X, self.components_)
        if self.mean_ is not None:
            X_original = X_original + self.mean_
        return X_original
 | 
	bsd-3-clause | 
| 
	kbrose/article-tagging | 
	lib/tagnews/utils/load_data.py | 
	1 | 
	18109 | 
	import pandas as pd
import numpy as np
import re
import json
import os
import warnings
import shutil
from pathlib import Path
import codecs
"""
Helper functions to load the article data. The main method to use
is load_data().
"""
# Caution! Modifying this in code will have no effect since the
# default arguments are populated with this reference at creation
# time, so post-hoc modifications will do nothing.
__data_folder = os.path.join(os.path.split(__file__)[0], '..', 'data')
def clean_string(s):
    """
    Clean all the HTML/Unicode nastiness out of a string.
    Replaces newlines with spaces.
    """
    return s.replace('\r', '').replace('\n', ' ').replace('\xa0', ' ').strip()
def load_articles(data_folder=__data_folder, nrows=None):
    """
    Loads the articles CSV. Can optionally only load the first
    `nrows` number of rows.
    """
    column_names = ['id',
                    'feedname',
                    'url',
                    'orig_html',
                    'title',
                    'bodytext',
                    'relevant',
                    'created',
                    'last_modified',
                    'news_source_id',
                    'author']
    return pd.read_csv(os.path.join(data_folder,
                                    'newsarticles_article.csv'),
                       header=None,
                       names=column_names,
                       nrows=nrows,
                       dtype={'orig_html': str, 'author': str})
def load_taggings(data_folder=__data_folder):
    """Loads the type-of-crime human tagging of the articles."""
    uc_column_names = ['id', 'date', 'relevant',
                       'article_id', 'user_id', 'locations']
    uc = pd.read_csv(os.path.join(data_folder,
                                  'newsarticles_usercoding.csv'),
                     header=None,
                     names=uc_column_names)
    uc.set_index('id', drop=True, inplace=True)
    uc_tags_column_names = ['id', 'usercoding_id', 'category_id']
    uc_tags = pd.read_csv(
        os.path.join(data_folder, 'newsarticles_usercoding_categories.csv'),
        header=None,
        names=uc_tags_column_names
    )
    uc_tags.set_index('usercoding_id', drop=True, inplace=True)
    uc_tags['article_id'] = uc.loc[uc_tags.index, 'article_id']
    return uc_tags
def load_model_categories(data_folder=__data_folder):
    tcr_names = ['id', 'relevance', 'category_id', 'coding_id']
    tc_names = ['id', 'date', 'model_info', 'relevance', 'article_id']
    tcr = pd.read_csv(
        os.path.join(data_folder, 'newsarticles_trainedcategoryrelevance.csv'),
        names=tcr_names
    )
    tc = pd.read_csv(
        os.path.join(data_folder, 'newsarticles_trainedcoding.csv'),
        names=tc_names
    ).set_index('id', drop=True)
    tcr['article_id'] = tc.loc[tcr['coding_id']]['article_id'].values
    return tcr
def load_model_locations(data_folder=__data_folder):
    tl_names = ['id', 'text', 'latitude', 'longitude', 'coding_id']
    tc_names = ['id', 'date', 'model_info', 'relevance', 'article_id']
    tl = pd.read_csv(
        os.path.join(data_folder, 'newsarticles_trainedlocation.csv'),
        names=tl_names
    )
    tc = pd.read_csv(
        os.path.join(data_folder, 'newsarticles_trainedcoding.csv'),
        names=tc_names
    ).set_index('id', drop=True)
    tl['article_id'] = tc.loc[tl['coding_id']]['article_id'].values
    return tl
def load_locations(data_folder=__data_folder):
    """Load the human-extracted locations from the articles."""
    uc_column_names = ['id', 'date', 'relevant',
                       'article_id', 'user_id', 'locations']
    uc = pd.read_csv(os.path.join(data_folder,
                                  'newsarticles_usercoding.csv'),
                     header=None,
                     names=uc_column_names)
    uc['locations'] = uc['locations'].apply(lambda x: json.loads(x))
    return uc
def load_categories(data_folder=__data_folder):
    """Loads the mapping of id to names/abbrevations of categories"""
    column_names = ['id', 'category_name', 'abbreviation', 'created',
                    'active', 'kind']
    return pd.read_csv(os.path.join(data_folder, 'newsarticles_category.csv'),
                       header=None,
                       names=column_names)
def load_data(data_folder=__data_folder, nrows=None):
    """
    Creates a dataframe of the article information and k-hot encodes the tags
    into columns called cat_NUMBER. The k-hot encoding is done assuming that
    the categories are 1-indexed and there are as many categories as the
    maximum value of the numerical cateogry_id column.
    Inputs:
        data_folder:
            A folder containing the data files in CSV format.
        nrows:
            Number of articles to load. Defaults to all, which uses about 4
            GB of memory.
    """
    df = load_articles(data_folder=data_folder, nrows=nrows)
    df['relevant'] = df['relevant'] == 't'
    df.rename(columns={'id': 'article_id'}, inplace=True)
    df.set_index('article_id', drop=True, inplace=True)
    # hopefully this will save some memory/space, can add back if needed
    del(df['orig_html'])
    tags_df = load_taggings(data_folder)
    # will help cacheing
    tags_df.sort_values(by='article_id', inplace=True)
    tags_df = tags_df.loc[tags_df['article_id'].isin(
        df.index.intersection(tags_df['article_id']))]
    locs_df = load_locations(data_folder)
    locs_df.sort_values(by='article_id', inplace=True)
    locs_df = locs_df.loc[locs_df['article_id'].isin(
        df.index.intersection(locs_df['article_id']))]
    model_tags_df = load_model_categories(data_folder)
    # will help cacheing
    model_tags_df.sort_values(by='article_id', inplace=True)
    model_tags_df = model_tags_df.loc[model_tags_df['article_id'].isin(
        df.index.intersection(model_tags_df['article_id']))]
    # init with empty lists
    df['locations'] = np.empty([df.shape[0], 0]).tolist()
    loc_article_ids = locs_df['article_id'].values
    df.loc[loc_article_ids, 'locations'] = locs_df['locations'].values
    def find_loc_in_string(locs, string):
        """
        The locations are generated from JavaScript, which means there's
        going to be some problems getting things to line up exactly and
        neatly. This function will hopefully performa all necessary
        transformations to find the given location text within the
        larger string.
        Inputs:
            locs: list of locations as loaded by load_locations
            string: bodytext of article in which to find locs
        Returns:
            updated_locs: list of locations as loaded by
                load_locations, but with a couple
                extra fields ('cleaned text' and 'cleaned span').
        """
        for i, loc in enumerate(locs):
            loc_txt = loc['text']
            loc_txt = clean_string(loc_txt)
            string = clean_string(string)
            loc['cleaned text'] = loc_txt
            spans = [x.span() for x in re.finditer(re.escape(loc_txt), string)]
            if spans:
                # The string may have occurred multiple times, and since the
                # spans don't line up perfectly we can't know which one is the
                # "correct" one. Best we can do is find the python span closest
                # to the expected javascript span.
                closest = np.argmin(np.abs(
                    np.array([x[0] for x in spans]) - loc['start']
                ))
                loc['cleaned span'] = spans[closest]
            locs[i] = loc
        return locs
    df['locations'] = df.apply(
        lambda r: find_loc_in_string(r['locations'], r['bodytext']),
        axis=1
    )
    num_no_match = df['locations'].apply(
        lambda locs: any([('cleaned span' not in loc) for loc in locs])
    ).sum()
    if num_no_match:
        warnings.warn(('{} location strings were not found in'
                       ' the bodytext.').format(num_no_match),
                      RuntimeWarning)
    model_locations_df = load_model_locations(data_folder)
    model_locations_df = model_locations_df.set_index('article_id')
    model_locations_gb = model_locations_df.groupby('article_id')
    model_locations_text = model_locations_gb['text'].apply(list)
    df['model_location_text'] = model_locations_text
    categories_df = load_categories(data_folder)
    categories_df.set_index('id', drop=True, inplace=True)
    # tags_df['category_id'] = tags_df['category_id'].astype(str)
    tags_df['category_abbreviation'] = (categories_df
                                        ['abbreviation']
                                        [tags_df['category_id']]
                                        .values)
    model_tags_df['category_abbreviation'] = (categories_df
                                              ['abbreviation']
                                              [model_tags_df['category_id']]
                                              .values)
    if np.setdiff1d(tags_df['article_id'].values, df.index.values).size:
        warnings.warn('Tags were found for article IDs that do not exist.',
                      RuntimeWarning)
    def update_df_with_categories(article_ids, cat_abbreviations, vals,
                                  is_model):
        # for some reason, some articles that are tagged don't show up
        # in the articles CSV. filter those out.
        existing_ids_filter = np.isin(article_ids, df.index.values)
        article_ids = article_ids[existing_ids_filter]
        cat_abbreviations = cat_abbreviations[existing_ids_filter]
        vals = vals[existing_ids_filter]
        for i in range(categories_df.shape[0]):
            cat_name = categories_df.loc[i+1, 'abbreviation']
            if is_model:
                cat_name += '_model'
            df[cat_name] = 0
            if not is_model:
                df[cat_name] = df[cat_name].astype('int8')
            matches = cat_abbreviations == cat_name
            if not matches.sum():
                continue
            df.loc[article_ids[matches], cat_name] = vals[matches]
    update_df_with_categories(
        model_tags_df['article_id'].values,
        model_tags_df['category_abbreviation'].values + '_model',
        model_tags_df['relevance'].values,
        is_model=True
    )
    update_df_with_categories(
        tags_df['article_id'].values,
        tags_df['category_abbreviation'].values,
        np.ones((tags_df['article_id'].values.shape), dtype='int8'),
        is_model=False
    )
    df.loc[df['bodytext'].isnull(), 'bodytext'] = ''
    return df
def subsample_and_resave(out_folder, n=5, input_folder=__data_folder,
                         random_seed=5):
    """
    Subsamples the CSV data files so that we have at least
    `n` articles from each type-of-crime tag as determined
    by the human coding. Saves the subsampled CSV data
    into `out_folder`. If there are fewer than `n` articles
    tagged with a type-of-crime, then we will use all of
    the articles with that tag.
    Inputs
    ------
    out_folder : str
        Path to folder where data should be saved. Should already exist.
    n : int
        How many examples from each category should we have?
    input_folder : str
        Path to where the full CSV files are saved.
    random_seed : None or int
        np.random.RandomState() will be seeded with this value
        in order to perform the random subsampling.
    """
    out_folder = str(Path(out_folder).expanduser().absolute())
    input_folder = str(Path(input_folder).expanduser().absolute())
    if out_folder == input_folder:
        raise RuntimeError('out_folder cannot match input_folder.')
    random_state = np.random.RandomState(random_seed)
    df = load_data(input_folder)
    chosen_indexes = []
    for crime_type in df.loc[:, 'OEMC':].columns:
        is_type = df[crime_type].astype(bool)
        n_samps = min(n, is_type.sum())
        chosen_indexes += (df.loc[is_type, :]
                           .sample(n_samps, random_state=random_state)
                           .index
                           .tolist())
    del df
    chosen_indexes = sorted(list(set(chosen_indexes)))
    # newsarticles_article.csv
    articles_df = load_articles(input_folder)
    sample = (articles_df
              .reset_index()
              .set_index('id')
              .loc[chosen_indexes, 'index'])
    articles_df = articles_df.loc[sample, :]
    # garble garble
    articles_df['bodytext'] = articles_df['bodytext'].astype(str).apply(
        lambda x: codecs.encode(x, 'rot-13')
    )
    articles_df.to_csv(os.path.join(out_folder, 'newsarticles_article.csv'),
                       header=None, index=False)
    del articles_df
    # newsarticles_category.csv
    shutil.copyfile(os.path.join(input_folder, 'newsarticles_category.csv'),
                    os.path.join(out_folder, 'newsarticles_category.csv'))
    # newsarticles_usercoding.csv
    uc_column_names = ['id', 'date', 'relevant',
                       'article_id', 'user_id', 'locations']
    uc_df = pd.read_csv(os.path.join(input_folder,
                                     'newsarticles_usercoding.csv'),
                        header=None,
                        names=uc_column_names)
    sample = np.where(uc_df['article_id'].isin(chosen_indexes))[0]
    uc_df.loc[sample, :].to_csv(
        os.path.join(out_folder, 'newsarticles_usercoding.csv'),
        header=None, index=False
    )
    uc_tags_column_names = ['id', 'usercoding_id', 'category_id']
    # newsarticles_usercoding_categories.csv
    uc_tags_df = pd.read_csv(
        os.path.join(input_folder,
                     'newsarticles_usercoding_categories.csv'),
        header=None,
        names=uc_tags_column_names,
        dtype={'id': int, 'usercoding_id': int, 'category_id': int}
    )
    sample = np.where(uc_df
                      .set_index('id')
                      .loc[uc_tags_df['usercoding_id'], 'article_id']
                      .isin(chosen_indexes)
                      )[0]
    uc_tags_df = uc_tags_df.loc[sample, :]
    uc_tags_df.to_csv(
        os.path.join(out_folder, 'newsarticles_usercoding_categories.csv'),
        header=None, index=False
    )
    # newsarticles_trainedcoding
    tc_names = ['id', 'date', 'model_info', 'relevance', 'article_id']
    tc = pd.read_csv(
        'tagnews/data/newsarticles_trainedcoding.csv',
        names=tc_names
    )
    tc = tc.loc[tc['article_id'].isin(chosen_indexes)]
    tc.to_csv(
        os.path.join(out_folder, 'newsarticles_trainedcoding.csv'),
        header=False, index=False
    )
    # newsarticles_trainedcategoryrelevance
    tcr_names = ['id', 'relevance', 'category_id', 'coding_id']
    tcr = pd.read_csv(
        'tagnews/data/newsarticles_trainedcategoryrelevance.csv',
        names=tcr_names
    )
    tcr = tcr.loc[tcr['coding_id'].isin(tc['id'])]
    tcr.to_csv(
        os.path.join(out_folder, 'newsarticles_trainedcategoryrelevance.csv'),
        header=False, index=False
    )
    # newsarticles_trainedlocation
    tl_names = ['id', 'text', 'latitude', 'longitude', 'coding_id']
    tl = pd.read_csv(
        'tagnews/data/newsarticles_trainedlocation.csv',
        names=tl_names
    )
    tl = tl.loc[tl['coding_id'].isin(tc['id'])]
    tl.to_csv(
        os.path.join(out_folder, 'newsarticles_trainedlocation.csv'),
        header=False, index=False
    )
def load_crime_data(data_folder=__data_folder):
    crimes = pd.read_csv(os.path.join(data_folder, 'Crimes.csv'))
    crimes = crimes[crimes['Year'] > 2010]
    crime_string = pd.Series('', crimes.index)
    # ['ID', 'Case Number', 'Date', 'Block', 'IUCR', 'Primary Type',
    #  'Description', 'Location Description', 'Arrest', 'Domestic', 'Beat',
    #  'District', 'Ward', 'Community Area', 'FBI Code', 'X Coordinate',
    #  'Y Coordinate', 'Year', 'Updated On', 'Latitude', 'Longitude',
    #  'Location']
    # TODO: synonyms on this for month name, weekday name,
    # time of day (e.g. afternoon), etc.
    crime_string += crimes['Date'] + ' '
    # TODO: synonyms?
    crime_string += crimes['Primary Type'] + ' '
    # TODO: synonyms?
    crime_string += crimes['Description'] + ' '
    # TODO: synonyms?
    crime_string += crimes['Location Description'] + ' '
    # TODO: synonyms?
    iucr = pd.read_csv(os.path.join(data_folder, 'IUCR.csv'))
    iucr.set_index('IUCR', drop=True, inplace=True)
    idx = iucr.index
    idx_values = idx.values
    idx_values[idx.str.len() == 3] = '0' + idx_values[idx.str.len() == 3]
    crime_string += (iucr.loc[crimes['IUCR'], 'PRIMARY DESCRIPTION']
                     .fillna('')
                     .values
                     + ' ')
    crime_string += (iucr.loc[crimes['IUCR'], 'SECONDARY DESCRIPTION']
                     .fillna('')
                     .values
                     + ' ')
    community_areas = pd.read_csv(os.path.join(data_folder, 'CommAreas.csv'))
    community_areas.set_index('AREA_NUM_1', inplace=True, drop=True)
    crime_string += (community_areas.loc[crimes['Community Area'], 'COMMUNITY']
                     .fillna('')
                     .values
                     + ' ')
    return crimes, crime_string
def load_ner_data(data_folder=__data_folder):
    """
    Loads ner.csv from the specified data folder.
    The column 'stag' is a binary value indicating whether or not
    the row corresponds to the entity "geo". Typically, you will
    want to use column 'word' to predict the column 'stag'.
    """
    df = pd.read_csv(os.path.join(data_folder, 'ner.csv'),
                     encoding="ISO-8859-1",
                     error_bad_lines=False,
                     index_col=0)
    df.dropna(subset=['word', 'tag'], inplace=True)
    df.reset_index(inplace=True, drop=True)
    df['stag'] = (df['tag'] == 'B-geo') | (df['tag'] == 'I-geo')
    df['all_tags'] = df['tag']
    df['tag'] = df['stag']
    df = df[['word', 'all_tags', 'tag']]
    return df
 | 
	mit | 
| 
	victorbergelin/scikit-learn | 
	examples/neighbors/plot_approximate_nearest_neighbors_hyperparameters.py | 
	227 | 
	5170 | 
	"""
=================================================
Hyper-parameters of Approximate Nearest Neighbors
=================================================
This example demonstrates the behaviour of the
accuracy of the nearest neighbor queries of Locality Sensitive Hashing
Forest as the number of candidates and the number of estimators (trees)
vary.
In the first plot, accuracy is measured with the number of candidates. Here,
the term "number of candidates" refers to maximum bound for the number of
distinct points retrieved from each tree to calculate the distances. Nearest
neighbors are selected from this pool of candidates. Number of estimators is
maintained at three fixed levels (1, 5, 10).
In the second plot, the number of candidates is fixed at 50. Number of trees
is varied and the accuracy is plotted against those values. To measure the
accuracy, the true nearest neighbors are required, therefore
:class:`sklearn.neighbors.NearestNeighbors` is used to compute the exact
neighbors.
"""
from __future__ import division
print(__doc__)
# Author: Maheshakya Wijewardena <[email protected]>
#
# License: BSD 3 clause
###############################################################################
import numpy as np
from sklearn.datasets.samples_generator import make_blobs
from sklearn.neighbors import LSHForest
from sklearn.neighbors import NearestNeighbors
import matplotlib.pyplot as plt
# Initialize size of the database, iterations and required neighbors.
n_samples = 10000
n_features = 100
n_queries = 30
rng = np.random.RandomState(42)
# Generate sample data
X, _ = make_blobs(n_samples=n_samples + n_queries,
                  n_features=n_features, centers=10,
                  random_state=0)
X_index = X[:n_samples]
X_query = X[n_samples:]
# Get exact neighbors
nbrs = NearestNeighbors(n_neighbors=1, algorithm='brute',
                        metric='cosine').fit(X_index)
neighbors_exact = nbrs.kneighbors(X_query, return_distance=False)
# Set `n_candidate` values
n_candidates_values = np.linspace(10, 500, 5).astype(np.int)
n_estimators_for_candidate_value = [1, 5, 10]
n_iter = 10
stds_accuracies = np.zeros((len(n_estimators_for_candidate_value),
                            n_candidates_values.shape[0]),
                           dtype=float)
accuracies_c = np.zeros((len(n_estimators_for_candidate_value),
                         n_candidates_values.shape[0]), dtype=float)
# LSH Forest is a stochastic index: perform several iteration to estimate
# expected accuracy and standard deviation displayed as error bars in
# the plots
for j, value in enumerate(n_estimators_for_candidate_value):
    for i, n_candidates in enumerate(n_candidates_values):
        accuracy_c = []
        for seed in range(n_iter):
            lshf = LSHForest(n_estimators=value,
                             n_candidates=n_candidates, n_neighbors=1,
                             random_state=seed)
            # Build the LSH Forest index
            lshf.fit(X_index)
            # Get neighbors
            neighbors_approx = lshf.kneighbors(X_query,
                                               return_distance=False)
            accuracy_c.append(np.sum(np.equal(neighbors_approx,
                                              neighbors_exact)) /
                              n_queries)
        stds_accuracies[j, i] = np.std(accuracy_c)
        accuracies_c[j, i] = np.mean(accuracy_c)
# Set `n_estimators` values
n_estimators_values = [1, 5, 10, 20, 30, 40, 50]
accuracies_trees = np.zeros(len(n_estimators_values), dtype=float)
# Calculate average accuracy for each value of `n_estimators`
for i, n_estimators in enumerate(n_estimators_values):
    lshf = LSHForest(n_estimators=n_estimators, n_neighbors=1)
    # Build the LSH Forest index
    lshf.fit(X_index)
    # Get neighbors
    neighbors_approx = lshf.kneighbors(X_query, return_distance=False)
    accuracies_trees[i] = np.sum(np.equal(neighbors_approx,
                                          neighbors_exact))/n_queries
###############################################################################
# Plot the accuracy variation with `n_candidates`
plt.figure()
colors = ['c', 'm', 'y']
for i, n_estimators in enumerate(n_estimators_for_candidate_value):
    label = 'n_estimators = %d ' % n_estimators
    plt.plot(n_candidates_values, accuracies_c[i, :],
             'o-', c=colors[i], label=label)
    plt.errorbar(n_candidates_values, accuracies_c[i, :],
                 stds_accuracies[i, :], c=colors[i])
plt.legend(loc='upper left', fontsize='small')
plt.ylim([0, 1.2])
plt.xlim(min(n_candidates_values), max(n_candidates_values))
plt.ylabel("Accuracy")
plt.xlabel("n_candidates")
plt.grid(which='both')
plt.title("Accuracy variation with n_candidates")
# Plot the accuracy variation with `n_estimators`
plt.figure()
plt.scatter(n_estimators_values, accuracies_trees, c='k')
plt.plot(n_estimators_values, accuracies_trees, c='g')
plt.ylim([0, 1.2])
plt.xlim(min(n_estimators_values), max(n_estimators_values))
plt.ylabel("Accuracy")
plt.xlabel("n_estimators")
plt.grid(which='both')
plt.title("Accuracy variation with n_estimators")
plt.show()
 | 
	bsd-3-clause | 
| 
	wdxtub/Patriots | 
	static/code/sentiment_lstm.py | 
	1 | 
	10671 | 
	# -*- coding: utf-8 -*-
from __future__ import absolute_import #导入3.x的特征函数
from __future__ import print_function
import yaml
import sys
reload(sys)
sys.setdefaultencoding('utf8')
import pandas as pd #导入Pandas
import numpy as np #导入Numpy
import jieba #导入结巴分词
import h5py, pickle, os, datetime
from keras.models import model_from_json, save_model
from keras.preprocessing import sequence
from keras.optimizers import SGD, RMSprop, Adagrad
from keras.utils import np_utils
from keras.models import Sequential, model_from_yaml
from keras.layers.core import Dense, Dropout, Activation
from keras.layers.embeddings import Embedding
from keras.layers.recurrent import LSTM, GRU
sys.setrecursionlimit(1000000)
# save model http://www.linuxdiyf.com/linux/22940.html
# http://www.linuxdiyf.com/linux/22937.html
#
# http://spaces.ac.cn/archives/3414/
# https://github.com/BUPTLdy/Sentiment-Analysis
# http://blog.sina.com.cn/s/blog_735f29100102wjwu.html
# http://blog.csdn.net/weixin_36541072/article/details/53786020
# https://keras-cn.readthedocs.io/en/latest/getting_started/concepts/
# https://keras-cn.readthedocs.io/en/latest/getting_started/keras_linux/
# 设置参数
maxlen = 50
lstm_batch_size = 16
lstm_epochs = 15
datadir = ''
modeldir = '../model/lstm_didi'
testdir = ''
# 加载训练文件
def loadfile():
  print("读取语料数据")
  neg=pd.read_excel(datadir + '/neg.xls',header=None,index=None)
  mid=pd.read_excel(datadir + '/pos.xls',header=None,index=None) 
  print("读取训练语料完毕")
  print("给训练语料贴上标签")
  mid['mark']=1
  neg['mark']=0 
  print("合并语料")
  pn=pd.concat([mid,neg],ignore_index=True)
  neglen=len(neg)
  midlen=len(mid) #计算语料数目
  print('neg count:' + str(neglen))
  print('pos count:' + str(midlen))
  return pn
def tokenizer(text):
  cw = lambda x: list(jieba.cut(x)) #定义分词函数
  text['words'] = text[0].apply(cw)
  return text
def generatedict(text):
  # 计算词典并保存
  d2v_train = pd.concat([text['words']], ignore_index = True) 
  w = [] #将所有词语整合在一起
  for i in d2v_train:
    w.extend(i)
  dict = pd.DataFrame(pd.Series(w).value_counts()) #统计词的出现次数
  del w,d2v_train
  dict['id'] = list(range(1,len(dict)+1))
  # 这个 dict 需要保存下来
  outputFile = modeldir + '/dict.data'
  fw = open(outputFile, 'w')
  pickle.dump(dict,fw)
  fw.close()
  return dict
def word2index(text, dict):
  get_sent = lambda x: list(dict['id'][x])
  text['sent'] = text['words'].apply(get_sent)
  print("Pad sequences (samples x time)")
  text['sent'] = list(sequence.pad_sequences(text['sent'], maxlen=maxlen))
  return text
def getdata(text):
  x = np.array(list(text['sent']))[::2] #训练集
  y = np.array(list(text['mark']))[::2]
  xt = np.array(list(text['sent']))[1::2] #测试集
  yt = np.array(list(text['mark']))[1::2]
  xa = np.array(list(text['sent'])) #全集
  ya = np.array(list(text['mark']))
  return x,y,xt,yt,xa,ya
def train_lstm(dict,x,y,xt,yt):
  model = Sequential()
  model.add(Embedding(len(dict)+1, 256, input_length=maxlen))
  model.add(LSTM(output_dim=128, activation='sigmoid', inner_activation='hard_sigmoid'))
  model.add(Dropout(0.5))
  model.add(Dense(1))
  # model.add(Dense(input_dim = 32, output_dim = 1))
  model.add(Activation('sigmoid'))
  print ('模型构建完成')
  #model.compile(loss='binary_crossentropy', optimizer='adam', class_mode="binary")
  model.compile(optimizer='rmsprop', loss='binary_crossentropy', metrics=['accuracy'])
  print ("模型编译完成")
  model.fit(x, y, batch_size=lstm_batch_size, epochs=lstm_epochs, verbose=0)
  print ("模型训练完成")
  print ("保存模型")
  yaml_string = model.to_yaml()
  with open(modeldir + '/lstm.yml', 'w') as outfile:
    outfile.write( yaml.dump(yaml_string, default_flow_style=True) )
  model.save_weights(modeldir + '/lstm.h5')
  print ("测试集评估")
  score = model.evaluate(xt, yt, verbose=0)
  print ("准确率:",score[1])
  return model
def saveresult(model, xt, text):
  classes = model.predict_classes(xt, verbose=1) 
  proba = model.predict_proba(xt, verbose=1)
  print ("\n输出结果")
  filename = 'result.txt'
  f = open('result.txt', 'w')
  i = 1
  j = 0
  for c in classes:
    f.write(str(c))
    f.write(",")
    f.write(str(proba[j]))
    f.write(",")
    line = "".join(text['words'][i])
    f.write(line.encode('utf-8'))
    f.write("\n")
    i = i + 2
    j = j + 1
  f.close()
  print ("\n排序结果")
  num = 1
  result = []
  with open(filename, 'r') as f:
    while True:
      line = f.readline()
      if not line:
        break
      print("processing line #" + str(num))
      num = num + 1
      arr = line.split(',')
      item = (int(arr[0][1:-1]), float(arr[1][2:-1]), "".join(arr[2:]))
      result.append(item)
    result.sort(key=lambda tup:tup[1])
    print(len(result))
    f = open('sorted.txt', 'w')
    for item in result:
      f.write(str(item[0]))
      f.write(",")
      f.write(str(item[1]))
      f.write(",")
      f.write(item[2])
  print("done")
def loaddict():
  fr = open(modeldir + '/dict.data')
  dict = pickle.load(fr)
  return dict
#训练模型,并保存
def train():
  print('Loading Data...')
  pn = loadfile()
  print('Tokenising...')
  pn = tokenizer(pn)
  print('Generating Dict...')
  dict = generatedict(pn)
  print('Word to Index...')
  pn = word2index(pn, dict)
  print('Preparing data...')
  x,y,xt,yt,xa,ya = getdata(pn)
  print('Model Stage...')
  # 这里训练全量模型
  model = train_lstm(dict, xa, ya, xt, yt)
  #print('Save Test Result...')
  #saveresult(model, xt, pn)
  print("Done")
  
def batchtest(filepath):
  dict = loaddict()
  
  with open(modeldir + '/lstm.yml', 'r') as f:
    yaml_string = yaml.load(f)
  model = model_from_yaml(yaml_string)
  model.load_weights(modeldir + '/lstm.h5')
  model.compile(optimizer='rmsprop', loss='binary_crossentropy', metrics=['accuracy'])
  
  # 读取测试文件
  # 开始一个 for 循环,外面要统计
  test_count = 0
  correct_count = 0
  if os.path.exists(filepath):
    f = open(filepath, 'r')
    try:
      lines = f.readlines()
      for line in lines:
        if len(line) <= 0:
          continue
        else:
          arr = line.split(',')
          label = arr[0]
          test_count += 1
          text = ",".join(arr[1:])
          textarr = list(jieba.cut(text))
          textvec = []
          add = 1
          for item in textarr:
            # 如果不在词典里,则直接丢弃(因为出现的次数也非常少,不考虑)
            if item in dict['id']:
              textvec.append(dict['id'][item])
          textvec = pd.Series(textvec)  
          textvec = sequence.pad_sequences([textvec], maxlen=maxlen)
          # 概率
          proba = model.predict_proba(textvec, verbose=0)
          # 判断是否计算正确
          for s in proba:
            if s[0] > 0.5 and label == '1' or s[0] <= 0.5 and label == '0':
              correct_count += 1
              print('[' + str(test_count) + ']: ' + label + ' ' + str(s[0]) + ' ' + text[:-1])
            else:
              print('[' + str(test_count) + ']:[x] ' + label + ' ' + str(s[0]) + ' ' + text[:-1])
    finally:
      f.close() # 确保关闭
  return correct_count, test_count
# 批量预测,减少内存使用,传入一个字符串数组
def predict_arr(arr):
  dict = loaddict()
  
  probas = []
  with open(modeldir + '/lstm.yml', 'r') as f:
    yaml_string = yaml.load(f)
  model = model_from_yaml(yaml_string)
  model.load_weights(modeldir + '/lstm.h5')
  model.compile(optimizer='rmsprop', loss='binary_crossentropy', metrics=['accuracy'])
  for s in arr:
    textarr = list(jieba.cut(s))
    textvec = []
    add = 1
    for item in textarr:
      # 如果不在词典里,则直接丢弃(因为出现的次数也非常少,不考虑)
      if item in dict['id']:
        textvec.append(dict['id'][item])
    textvec = pd.Series(textvec)  
    textvec = sequence.pad_sequences([textvec], maxlen=maxlen)
    
    proba = model.predict_proba(textvec, verbose=0)
    probas.append(proba[0][0])
  return probas
def predict(text):
  print('Loading Dict Data..')
  dict = loaddict()
  # 把每个词转化为在词典里的数字,更新词典的计数(参考上面的格式)
  textarr = list(jieba.cut(text))
  
  textvec = []
  add = 1
  for item in textarr:
    # 如果不在词典里,则直接丢弃(因为出现的次数也非常少,不考虑)
    if item in dict['id']:
      textvec.append(dict['id'][item])
  textvec = pd.Series(textvec)  
  textvec = sequence.pad_sequences([textvec], maxlen=maxlen)
  
  # ---- 
  print('loading model......')
  with open(modeldir + '/lstm.yml', 'r') as f:
    yaml_string = yaml.load(f)
  model = model_from_yaml(yaml_string)
  print('loading weights......')
  model.load_weights(modeldir + '/lstm.h5')
  model.compile(optimizer='rmsprop', loss='binary_crossentropy', metrics=['accuracy'])
  # 至此模型已经载入完成,可以进行预测
  #classes = model.predict_classes(textvec, verbose=1)
  proba = model.predict_proba(textvec, verbose=0)
  # 这里因为知识图谱暂时改变输入格式
  #for s in proba:
  #  if s[0] > 0.5:
  #    print('positive ' + str(s[0]) + ' ' + text)
  #  else:
  #    print('negative ' + str(s[0]) + ' ' + text)
  return proba[0][0]
if __name__=='__main__':
    argvs_length = len(sys.argv)
    if argvs_length >= 4:
        argvs = sys.argv
        action = argvs[1]
        if action == 'train': # 训练
            datadir = argvs[2]
            modeldir = argvs[3]
            begin = datetime.datetime.now()
            train()
            end = datetime.datetime.now()
            # 统计训练时间、模型大小,写入到 result.txt 中
            with open(modeldir + '/result.txt', "w") as f:
                f.write('训练时长: ' + str(end-begin))
        elif action == 'predict':
            modeldir = argvs[2]
            sentence = " ".join(argvs[3:])
            predict(sentence)
        elif action == 'test':
            datadir = argvs[2]
            modeldir = argvs[3]
            testdir = argvs[4]
            begin = datetime.datetime.now()
            result = batchtest(datadir+'/test.txt')
            end = datetime.datetime.now()
            # 统计训练时间、模型大小,写入到 result.txt 中
            with open(testdir + '/result.txt', "w") as f:
                f.write('测试时长: ' + str(end-begin) + '\n')
                f.write('正确率: ' + str(float(result[0])/float(result[1])) + ' (' + str(result[0]) + '/' + str(result[1]) + ')\n')
 | 
	gpl-3.0 | 
| 
	ztultrebor/BARKEVIOUS | 
	BARKEVIOUS.py | 
	1 | 
	1924 | 
	# coding: utf-8
#read in libraries
import cPickle as pickle
from webcrawler import coredump
from dataloader import get_trawled_data, introduce_weighting
from ratings import PowerRater
from history import historical, model_the_model
from predict import predict
from oddsmaker import read_odds
from betting import wager
csv_file = 'NBA_data_2015' # whence the data come
weight_factor = 60 # number of days over which to decrease the weight by 40%
# weight factor needs justification
HCA = 1.8
sigma = 13.5
# ask user for guidence on reading in data from web and write to csv file
user_input = raw_input("Do you want to trawl the web for the latest data? ")
if user_input in ['y', 'Y', 'yes', 'Yes', 'YES']:
    website = 'http://www.basketball-reference.com/leagues/NBA_2016_games.html'
    coredump(website, csv_file)
#load data from csv as a pandas DataFrame
data = get_trawled_data(csv_file, ['Date', 'Away', 'Away Score', 'Home', 'Home Score'])
# compile a list of historical predictions and actual outcomes
history_file = 'Predictive_outcomes_2015'
past_predictions = historical(data, weight_factor, history_file, HCA, sigma)
# get the fit parameters needed to correct errors in the historical model
beta_correct = model_the_model(past_predictions)
print 'Checking on the model parameters: %s' % beta_correct
# make predictions
todays_schedule = pickle.load(open('Today', 'rb'))
data = introduce_weighting(data, weight_factor,home_court_advantage=HCA) # add weights column
PwrRt = PowerRater(data).power_ratings # generate latest ratings
print PwrRt.sort_values(by='rating', ascending=False)
prob = []
for i in xrange(todays_schedule.shape[0]):
    prob.append(predict(todays_schedule.iloc[i], PwrRt, HCA, sigma))
todays_schedule['Prob'] = prob
# pull in odds
odds =  read_odds('Odds.csv', todays_schedule)
# determine optimal betting strategy
print wager(odds)
# pull in 538 predictions
# model the 538 model
 | 
	mit | 
| 
	pratapvardhan/pandas | 
	pandas/core/tools/numeric.py | 
	1 | 
	6034 | 
	import numpy as np
import pandas as pd
from pandas.core.dtypes.common import (
    is_scalar,
    is_numeric_dtype,
    is_decimal,
    is_datetime_or_timedelta_dtype,
    is_number,
    _ensure_object)
from pandas.core.dtypes.generic import ABCSeries, ABCIndexClass
from pandas.core.dtypes.cast import maybe_downcast_to_dtype
from pandas._libs import lib
def to_numeric(arg, errors='raise', downcast=None):
    """
    Convert argument to a numeric type.
    The default return dtype is `float64` or `int64`
    depending on the data supplied. Use the `downcast` parameter
    to obtain other dtypes.
    Parameters
    ----------
    arg : list, tuple, 1-d array, or Series
    errors : {'ignore', 'raise', 'coerce'}, default 'raise'
        - If 'raise', then invalid parsing will raise an exception
        - If 'coerce', then invalid parsing will be set as NaN
        - If 'ignore', then invalid parsing will return the input
    downcast : {'integer', 'signed', 'unsigned', 'float'} , default None
        If not None, and if the data has been successfully cast to a
        numerical dtype (or if the data was numeric to begin with),
        downcast that resulting data to the smallest numerical dtype
        possible according to the following rules:
        - 'integer' or 'signed': smallest signed int dtype (min.: np.int8)
        - 'unsigned': smallest unsigned int dtype (min.: np.uint8)
        - 'float': smallest float dtype (min.: np.float32)
        As this behaviour is separate from the core conversion to
        numeric values, any errors raised during the downcasting
        will be surfaced regardless of the value of the 'errors' input.
        In addition, downcasting will only occur if the size
        of the resulting data's dtype is strictly larger than
        the dtype it is to be cast to, so if none of the dtypes
        checked satisfy that specification, no downcasting will be
        performed on the data.
        .. versionadded:: 0.19.0
    Returns
    -------
    ret : numeric if parsing succeeded.
        Return type depends on input.  Series if Series, otherwise ndarray
    Examples
    --------
    Take separate series and convert to numeric, coercing when told to
    >>> s = pd.Series(['1.0', '2', -3])
    >>> pd.to_numeric(s)
    0    1.0
    1    2.0
    2   -3.0
    dtype: float64
    >>> pd.to_numeric(s, downcast='float')
    0    1.0
    1    2.0
    2   -3.0
    dtype: float32
    >>> pd.to_numeric(s, downcast='signed')
    0    1
    1    2
    2   -3
    dtype: int8
    >>> s = pd.Series(['apple', '1.0', '2', -3])
    >>> pd.to_numeric(s, errors='ignore')
    0    apple
    1      1.0
    2        2
    3       -3
    dtype: object
    >>> pd.to_numeric(s, errors='coerce')
    0    NaN
    1    1.0
    2    2.0
    3   -3.0
    dtype: float64
    See also
    --------
    pandas.DataFrame.astype : Cast argument to a specified dtype.
    pandas.to_datetime : Convert argument to datetime.
    pandas.to_timedelta : Convert argument to timedelta.
    numpy.ndarray.astype : Cast a numpy array to a specified type.
    """
    if downcast not in (None, 'integer', 'signed', 'unsigned', 'float'):
        raise ValueError('invalid downcasting method provided')
    is_series = False
    is_index = False
    is_scalars = False
    if isinstance(arg, ABCSeries):
        is_series = True
        values = arg.values
    elif isinstance(arg, ABCIndexClass):
        is_index = True
        values = arg.asi8
        if values is None:
            values = arg.values
    elif isinstance(arg, (list, tuple)):
        values = np.array(arg, dtype='O')
    elif is_scalar(arg):
        if is_decimal(arg):
            return float(arg)
        if is_number(arg):
            return arg
        is_scalars = True
        values = np.array([arg], dtype='O')
    elif getattr(arg, 'ndim', 1) > 1:
        raise TypeError('arg must be a list, tuple, 1-d array, or Series')
    else:
        values = arg
    try:
        if is_numeric_dtype(values):
            pass
        elif is_datetime_or_timedelta_dtype(values):
            values = values.astype(np.int64)
        else:
            values = _ensure_object(values)
            coerce_numeric = False if errors in ('ignore', 'raise') else True
            values = lib.maybe_convert_numeric(values, set(),
                                               coerce_numeric=coerce_numeric)
    except Exception:
        if errors == 'raise':
            raise
    # attempt downcast only if the data has been successfully converted
    # to a numerical dtype and if a downcast method has been specified
    if downcast is not None and is_numeric_dtype(values):
        typecodes = None
        if downcast in ('integer', 'signed'):
            typecodes = np.typecodes['Integer']
        elif downcast == 'unsigned' and np.min(values) >= 0:
            typecodes = np.typecodes['UnsignedInteger']
        elif downcast == 'float':
            typecodes = np.typecodes['Float']
            # pandas support goes only to np.float32,
            # as float dtypes smaller than that are
            # extremely rare and not well supported
            float_32_char = np.dtype(np.float32).char
            float_32_ind = typecodes.index(float_32_char)
            typecodes = typecodes[float_32_ind:]
        if typecodes is not None:
            # from smallest to largest
            for dtype in typecodes:
                if np.dtype(dtype).itemsize <= values.dtype.itemsize:
                    values = maybe_downcast_to_dtype(values, dtype)
                    # successful conversion
                    if values.dtype == dtype:
                        break
    if is_series:
        return pd.Series(values, index=arg.index, name=arg.name)
    elif is_index:
        # because we want to coerce to numeric if possible,
        # do not use _shallow_copy_with_infer
        return pd.Index(values, name=arg.name)
    elif is_scalars:
        return values[0]
    else:
        return values
 | 
	bsd-3-clause | 
| 
	CoolProp/CoolProp | 
	wrappers/Python/CoolProp/Plots/PsychScript.py | 
	2 | 
	2020 | 
	
# This file was auto-generated by the PsychChart.py script in wrappers/Python/CoolProp/Plots
if __name__ == '__main__':
    import numpy, matplotlib
    from CoolProp.HumidAirProp import HAPropsSI
    from CoolProp.Plots.Plots import InlineLabel
    p = 101325
    Tdb = numpy.linspace(-10, 60, 100) + 273.15
    # Make the figure and the axes
    fig = matplotlib.pyplot.figure(figsize=(10, 8))
    ax = fig.add_axes((0.1, 0.1, 0.85, 0.85))
    # Saturation line
    w = [HAPropsSI('W', 'T', T, 'P', p, 'R', 1.0) for T in Tdb]
    ax.plot(Tdb - 273.15, w, lw=2)
    # Humidity lines
    RHValues = [0.05, 0.1, 0.15, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9]
    for RH in RHValues:
        w = [HAPropsSI('W', 'T', T, 'P', p, 'R', RH) for T in Tdb]
        ax.plot(Tdb - 273.15, w, 'r', lw=1)
    # Humidity lines
    for H in [-20000, -10000, 0, 10000, 20000, 30000, 40000, 50000, 60000, 70000, 80000, 90000]:
        # Line goes from saturation to zero humidity ratio for this enthalpy
        T1 = HAPropsSI('T', 'H', H, 'P', p, 'R', 1.0) - 273.15
        T0 = HAPropsSI('T', 'H', H, 'P', p, 'R', 0.0) - 273.15
        w1 = HAPropsSI('W', 'H', H, 'P', p, 'R', 1.0)
        w0 = HAPropsSI('W', 'H', H, 'P', p, 'R', 0.0)
        ax.plot(numpy.r_[T1, T0], numpy.r_[w1, w0], 'r', lw=1)
    ax.set_xlim(Tdb[0] - 273.15, Tdb[-1] - 273.15)
    ax.set_ylim(0, 0.03)
    ax.set_xlabel(r"$T_{db}$ [$^{\circ}$C]")
    ax.set_ylabel(r"$W$ ($m_{w}/m_{da}$) [-]")
    xv = Tdb  # [K]
    for RH in [0.05, 0.1, 0.15, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9]:
        yv = [HAPropsSI('W', 'T', T, 'P', p, 'R', RH) for T in Tdb]
        y = HAPropsSI('W', 'P', p, 'H', 65000.000000, 'R', RH)
        T_K, w, rot = InlineLabel(xv, yv, y=y, axis=ax)
        string = r'$\phi$=' + '{s:0.0f}'.format(s=RH * 100) + '%'
        bbox_opts = dict(boxstyle='square,pad=0.0', fc='white', ec='None', alpha=0.5)
        ax.text(T_K - 273.15, w, string, rotation=rot, ha='center', va='center', bbox=bbox_opts)
    matplotlib.pyplot.show()
 | 
	mit | 
| 
	convexopt/gpkit | 
	gpkit/tests/t_examples.py | 
	1 | 
	6270 | 
	"""Unit testing of tests in docs/source/examples"""
import unittest
import os
import numpy as np
from gpkit import settings
from gpkit.tests.helpers import generate_example_tests
from gpkit.small_scripts import mag
from gpkit.small_classes import Quantity
def assert_logtol(first, second, logtol=1e-6):
    "Asserts that the logs of two arrays have a given abstol"
    np.testing.assert_allclose(np.log(mag(first)), np.log(mag(second)),
                               atol=logtol, rtol=0)
# pylint: disable=too-many-public-methods
class TestExamples(unittest.TestCase):
    """
    To test a new example, add a function called `test_$EXAMPLENAME`, where
    $EXAMPLENAME is the name of your example in docs/source/examples without
    the file extension.
    This function should accept two arguments (e.g. 'self' and 'example').
    The imported example script will be passed to the second: anything that
    was a global variable (e.g, "sol") in the original script is available
    as an attribute (e.g., "example.sol")
    If you don't want to perform any checks on the example besides making
    sure it runs, just put "pass" as the function's body, e.g.:
          def test_dummy_example(self, example):
              pass
    But it's good practice to ensure the example's solution as well, e.g.:
          def test_dummy_example(self, example):
              self.assertAlmostEqual(example.sol["cost"], 3.121)
    """
    # TODO: allow enabling plotting examples, make plots in correct folder...
    # def test_plot_sweep1d(self, _):
    #     import matplotlib.pyplot as plt
    #     plt.close("all")
    def test_autosweep(self, example):
        from gpkit import ureg
        bst1, tol1 = example.bst1, example.tol1
        bst2, tol2 = example.bst2, example.tol2
        l_ = np.linspace(1, 10, 100)
        for bst in [bst1, example.bst1_loaded]:
            sol1 = bst.sample_at(l_)
            assert_logtol(sol1("l"), l_)
            assert_logtol(sol1("A"), l_**2 + 1, tol1)
            assert_logtol(sol1["cost"], (l_**2 + 1)**2, tol1)
            if hasattr(sol1["cost"], "units"):  # loaded costs are unitless
                self.assertEqual(Quantity(1.0, sol1["cost"].units),
                                 Quantity(1.0, ureg.m)**4)
            self.assertEqual(Quantity(1.0, sol1("A").units),
                             Quantity(1.0, ureg.m)**2)
        ndig = -int(np.log10(tol2))
        self.assertAlmostEqual(bst2.cost_at("cost", 3), 1.0, ndig)
        # before corner
        A_bc = np.linspace(1, 3, 50)
        sol_bc = bst2.sample_at(A_bc)
        assert_logtol(sol_bc("A"), (A_bc/3)**0.5, tol2)
        assert_logtol(sol_bc["cost"], A_bc/3, tol2)
        # after corner
        A_ac = np.linspace(3, 10, 50)
        sol_ac = bst2.sample_at(A_ac)
        assert_logtol(sol_ac("A"), (A_ac/3)**2, tol2)
        assert_logtol(sol_ac["cost"], (A_ac/3)**4, tol2)
    def test_model_var_access(self, example):
        model = example.PS
        _ = model["E"]
        with self.assertRaises(ValueError):
            _ = model["m"]  # multiple variables called m
    def test_performance_modeling(self, example):
        pass
    def test_sp_to_gp_sweep(self, example):
        pass
    def test_boundschecking(self, example):
        pass
    def test_vectorize(self, example):
        pass
    def test_primal_infeasible_ex1(self, example):
        with self.assertRaises(RuntimeWarning) as cm:
            example.m.solve(verbosity=0)
        err = cm.exception
        if "mosek" in err.message:
            self.assertIn("PRIM_INFEAS_CER", err.message)
        elif "cvxopt" in err.message:
            self.assertIn("unknown", err.message)
    def test_primal_infeasible_ex2(self, example):
        with self.assertRaises(RuntimeWarning):
            example.m.solve(verbosity=0)
    def test_docstringparsing(self, example):
        pass
    def test_debug(self, example):
        pass
    def test_simple_sp(self, example):
        pass
    def test_simple_box(self, example):
        pass
    def test_x_greaterthan_1(self, example):
        pass
    def test_beam(self, example):
        self.assertFalse(np.isnan(example.sol("w")).any())
    def test_water_tank(self, example):
        pass
    def test_sin_approx_example(self, example):
        pass
    def test_external_sp(self, example):
        pass
    def test_external_sp2(self, example):
        pass
    def test_simpleflight(self, example):
        self.assertTrue(example.sol.almost_equal(example.sol_loaded))
        for sol in [example.sol, example.sol_loaded]:
            freevarcheck = {
                "A": 8.46,
                "C_D": 0.0206,
                "C_f": 0.0036,
                "C_L": 0.499,
                "Re": 3.68e+06,
                "S": 16.4,
                "W": 7.34e+03,
                "V": 38.2,
                "W_w": 2.40e+03
            }
            # sensitivity values from p. 34 of W. Hoburg's thesis
            senscheck = {
                r"(\frac{S}{S_{wet}})": 0.4300,
                "e": -0.4785,
                "V_{min}": -0.3691,
                "k": 0.4300,
                r"\mu": 0.0860,
                "(CDA0)": 0.0915,
                "C_{L,max}": -0.1845,
                r"\tau": -0.2903,
                "N_{ult}": 0.2903,
                "W_0": 1.0107,
                r"\rho": -0.2275
            }
            for key in freevarcheck:
                sol_rat = mag(sol["variables"][key])/freevarcheck[key]
                self.assertTrue(abs(1-sol_rat) < 1e-2)
            for key in senscheck:
                sol_rat = sol["sensitivities"]["constants"][key]/senscheck[key]
                self.assertTrue(abs(1-sol_rat) < 1e-2)
    def test_relaxation(self, example):
        pass
    def test_unbounded(self, example):
        pass
FILE_DIR = os.path.dirname(os.path.realpath(__file__))
EXAMPLE_DIR = os.path.abspath(FILE_DIR + '../../../docs/source/examples')
SOLVERS = settings["installed_solvers"]
if os.path.isdir(EXAMPLE_DIR):
    TESTS = generate_example_tests(EXAMPLE_DIR, [TestExamples], SOLVERS)
else:
    TESTS = []
if __name__ == "__main__":
    # pylint:disable=wrong-import-position
    from gpkit.tests.helpers import run_tests
    run_tests(TESTS)
 | 
	mit | 
| 
	liffiton/ATLeS | 
	src/analysis/plot.py | 
	1 | 
	11295 | 
	import math
import re
import matplotlib
import matplotlib.pyplot as plt
from matplotlib import collections, lines, patches
from analysis import heatmaps
import config
# Source: https://gist.github.com/jasonmc/1160951
def _set_foregroundcolor(ax, color):
    '''For the specified axes, sets the color of the frame, major ticks,
    tick labels, axis labels, title and legend
    '''
    for tl in ax.get_xticklines() + ax.get_yticklines():
        tl.set_color(color)
    for spine in ax.spines:
        ax.spines[spine].set_edgecolor(color)
    for tick in ax.xaxis.get_major_ticks():
        tick.label1.set_color(color)
    for tick in ax.yaxis.get_major_ticks():
        tick.label1.set_color(color)
    ax.axes.xaxis.label.set_color(color)
    ax.axes.yaxis.label.set_color(color)
    ax.axes.xaxis.get_offset_text().set_color(color)
    ax.axes.yaxis.get_offset_text().set_color(color)
    ax.axes.title.set_color(color)
    lh = ax.get_legend()
    if lh is not None:
        lh.get_title().set_color(color)
        lh.legendPatch.set_edgecolor('none')
        labels = lh.get_texts()
        for lab in labels:
            lab.set_color(color)
    for tl in ax.get_xticklabels():
        tl.set_color(color)
    for tl in ax.get_yticklabels():
        tl.set_color(color)
# Source: https://gist.github.com/jasonmc/1160951
def _set_backgroundcolor(ax, color):
    '''Sets the background color of the current axes (and legend).
    Use 'None' (with quotes) for transparent. To get transparent
    background on saved figures, use:
    pp.savefig("fig1.svg", transparent=True)
    '''
    ax.patch.set_facecolor(color)
    lh = ax.get_legend()
    if lh is not None:
        lh.legendPatch.set_facecolor(color)
def format_axis(ax):
    _set_foregroundcolor(ax, '0.5')
    _set_backgroundcolor(ax, '0.08')
    # drop plot borders
    for spine in ax.spines:
        ax.spines[spine].set_visible(False)
def _format_figure(fig):
    fig.patch.set_facecolor('0.12')
    plt.tight_layout()
def show():
    ''' Shows the current figure (on screen, if using a GUI backend).
        Create a plot first using a TrackPlotter object. '''
    fig = plt.gcf()
    _format_figure(fig)
    plt.show()
    plt.close('all')
def savefig(outfile, format=None):
    ''' Saves the current figure to the given filename or file-like object.
        Format is inferred from the file extension if a name is given,
        otherwise specify it manually with the format parameter.
        Large (tall) figures are broken into multiple images (vertical tiles)
        if outfile is a string (filename).
        Create a plot first using a TrackPlotter object or other code that
        creates a pyplot figure.
    '''
    fig = plt.gcf()
    _format_figure(fig)
    # A bit of an ugly hack to split giant images into multiple parts
    # Only used if outfile is given as a string (filename)
    max_height = 100 if isinstance(outfile, str) else float('inf')
    # plot height in inches
    height = fig.get_window_extent().transformed(fig.dpi_scale_trans.inverted()).height
    if height > max_height:
        numparts = int(height / max_height) + 1
        for i in range(numparts):
            filename = re.sub(r"(\.[^\.]+)$", r"%02d\1" % (numparts-i), outfile)
            bbox = matplotlib.transforms.Bbox.from_extents([0,i*max_height,12,min(height,(i+1)*max_height)])
            plt.savefig(filename, facecolor=fig.get_facecolor(), edgecolor='none', bbox_inches=bbox, format=format)
    else:
        plt.savefig(outfile, facecolor=fig.get_facecolor(), edgecolor='none', format=format)
    plt.close('all')
class TrackPlotter(object):
    def __init__(self, track_processor, dbgframes=None):
        self._track = track_processor
        self._dbgframes = dbgframes
    @staticmethod
    def _speed2color(speed):
        # setup ranges, where 0 maps to first number, 1.0 maps to second
        color_ranges = {
            'r': (0.8, 1.0),
            'g': (0.8, 0.0),
            'b': (0.8, 0.0)
        }
        def scale(inval, color):
            range = color_ranges[color]
            scaled = range[0] + (range[1] - range[0]) * inval
            return min(1, max(0, scaled))  # constrain to 0-1
        r = scale(speed, 'r')
        g = scale(speed, 'g')
        b = scale(speed, 'b')
        return (r,g,b, 0.5)
    def plot_trace(self):
        # one minute per subplot
        numplots = self._track.len_minutes
        fig = plt.figure(figsize=(12,2*(numplots+1)))
        # Draw the legend at the top
        self.draw_legend(plt.subplot(numplots+1, 1, 1))
        for i in range(numplots):
            ax = plt.subplot(numplots+1, 1, i+2)
            self._plot_trace_portion(ax, start_min=i, end_min=i+1)
        return fig
    def draw_legend(self, legend_ax):
        # Make a legend with proxy artists
        xpos_artist = lines.Line2D([],[], color='orange')
        ypos_artist = lines.Line2D([],[], color='limegreen')
        numpts_artist = lines.Line2D([],[], color='purple', linewidth=1)
        frozen_artist = patches.Rectangle((0,0), 1, 1, fc='lightblue', ec='None')
        missing_artist = patches.Rectangle((0,0), 1, 1, fc='yellow', ec='None')
        lost_artist = patches.Rectangle((0,0), 1, 1, fc='red', ec='None')
        # Place it in center of top "subplot" area
        legend_ax.legend(
            [xpos_artist, ypos_artist, numpts_artist,
                frozen_artist, missing_artist, lost_artist],
            ['x-pos', 'y-pos', '# Detection pts',
                'Frozen', 'Missing', 'Lost'],
            loc='center',
            fontsize=12,
            ncol=4,
        )
        legend_ax.axis('off')
        format_axis(legend_ax)
    def plot_invalidheatmap(self):
        title = "Map of shame (loc of invalid data)"
        plt.figure(figsize=(4, 4))
        ax = plt.gca()
        ax.set_title(title)
        format_axis(ax)
        nbins = 50
        badpoints = (self._track.df.valid != True)  # noqa: E712
        heatmaps.plot_heatmap(ax, self._track.df.x[badpoints], self._track.df.y[badpoints], nbins=nbins)
    def plot_heatmap(self, plot_type='overall'):
        assert plot_type in ('per-minute', 'per-phase', 'overall')
        if plot_type == 'per-minute':
            numplots = self._track.len_minutes
        elif plot_type == 'per-phase':
            numplots = self._track.num_phases()
            phase_starts = self._track.phase_starts()
            phase_ends = phase_starts[1:] + [2**30]
        elif plot_type == 'overall':
            numplots = 1
        numrows = int(math.ceil(numplots / 10.0))
        if plot_type == 'overall':
            plt.figure(figsize=(4, 4))
        else:
            plt.figure(figsize=(2*min(numplots, 10), 2*numrows))
        for i in range(numplots):
            if plot_type == 'per-minute':
                start_min = i
                end_min = i+1
                title = "{}:00-{}:00".format(start_min, end_min)
            elif plot_type == 'per-phase':
                start_min = phase_starts[i]
                end_min = phase_ends[i]
                title = "Phase {} ({}:00-{}:00)".format(i+1, start_min, end_min)
            elif plot_type == 'overall':
                start_min = 0
                end_min = 2**30
                title = "Overall heatmap"
            ax = plt.subplot(numrows, min(numplots, 10), i+1)
            if numplots > 1:
                ax.axes.get_xaxis().set_visible(False)
                ax.axes.get_yaxis().set_visible(False)
            format_axis(ax)
            ax.set_title(title)
            nbins = 50
            start_sec = start_min*60
            end_sec = end_min*60
            heatmaps.plot_heatmap(ax, self._track.df.x[start_sec:end_sec], self._track.df.y[start_sec:end_sec], nbins=nbins)
    def _plot_trace_portion(self, ax, start_min, end_min):
        ''' Parameters:
                start_min, end_min:
                    Integer minutes.
                    Plot should be from start:00 to end:00.
        '''
        # shorthand
        df = self._track.df
        start = start_min * 60
        end = end_min * 60
        time = df.index.to_series()[start:end].values
        #theta = self._track.theta[start:end]
        #speed = self._track.speed[start:end]
        #valid = self._track.valid[start:end]
        lost = df.lost[start:end].values
        missing = df.missing[start:end].values
        frozen = df.frozen[start:end].values
        x = df.x[start:end].values
        y = df.y[start:end].values
        numpts = df.numpts[start:end].values
        # Format nicely
        format_axis(ax)
        ax.axes.get_yaxis().set_visible(False)
        # Get set axes (specifically, we don't want the y-axis to be autoscaled for us)
        ax.axis([start, end, -1.0, 1.0])
        # Mark lost/missing sections
        lost_collection = collections.BrokenBarHCollection.span_where(
            time,
            -1.0, -0.9,
            lost,
            edgecolors='none',
            facecolors='red',
        )
        ax.add_collection(lost_collection)
        missing_collection = collections.BrokenBarHCollection.span_where(
            time,
            -1.0, -0.9,
            missing,
            edgecolors='none',
            facecolors='yellow',
        )
        ax.add_collection(missing_collection)
        # Mark frozen sections
        frozen_collection = collections.BrokenBarHCollection.span_where(
            time,
            -0.85, -0.8,
            frozen,
            edgecolors='none',
            facecolors='lightblue',
        )
        ax.add_collection(frozen_collection)
        # Plot horizontal position
        ax.plot(time, x*2-1, color='orange', label='x position')
        # Plot height
        ax.plot(time, y*2-1, color='limegreen', label='y position')
        # Plot numpts (scaled so 0 = -1.0 (plot bottom), 20 = 1.0 (top))
        ax.plot(time, -1.0+(numpts/10.0), color='purple', linewidth=1, label='# detected points')
        # Add stick plot of movement (where valid)
#        ax.quiver(
#            time, [0] * len(time),
#            speed*np.cos(theta), speed*np.sin(theta),
#            color=[self._speed2color(s) for s in speed],
#            scale=1,  # scale all to a speed of 1, which should be close to max (tank is 1.0x1.0)
#            scale_units='y',
#            width=0.01,
#            units='inches',
#            headlength=0, headwidth=0, headaxislength=0     # no arrowheads
#        )
        # Add markers/links to debugframes if given
        # Get [tracking]:start_frame for proper offset of debug frame numbers into track data here
        start_frame = int(self._track.config['tracking']['start_frame'])
        for dbgframe in self._dbgframes:
            nameparts = dbgframe.name.split('_')
            frameindex = max(0, int(nameparts[1]) - start_frame)  # restrict to index 0 at minimum
            frametime = self._track.df.index[frameindex]
            if start <= frametime < end:
                marker = matplotlib.patches.Circle(
                    (frametime, -1.1), radius=0.08,
                    color='#337AB7',
                    clip_on=False,
                    url=str("/data" / dbgframe.relative_to(config.DATADIR))
                )
                ax.add_artist(marker)
 | 
	mit | 
| 
	tsai1993/aisixiang | 
	01.download_1.py | 
	1 | 
	2415 | 
	#!/usr/bin/python3
import os
from urllib.request import urlopen
from bs4 import BeautifulSoup
import pandas
import time
# 读取 00.get_metadata.R 获取的相关目录信息
D0 = pandas.read_csv("all_aisixiang_2017-05-24.csv")
# 意外中断时,可以修改 j 的值
j = 0
D = D0[j:]
for i in D['ID']:
    Url = "http://www.aisixiang.com/data/" + str(i) + ".html"
    print(Url)
    try:
        html = urlopen(Url)
    except:
        f1 = open("broken-new.txt", 'a')
        Broken = str(i) + '-' + Url + ',' + '\n'
        f1.write(Broken)
        f1.close
        print(Broken)
        j += 1
        Availability = 3
        f2 = open("Av.txt", 'a')
        f2.write(str(Availability) + '_' + str(i) + ',' + '\n')
        f2.close
    else:
        Soup = BeautifulSoup(html, "html.parser")
        Article = Soup.find(id = "content2")
        Article_page = ''
        if type(Article) == type(None):
            Availability = 0
        else:
            Availability = 1
            Page = Soup.find(class_ = "list_page")
            if type(Page) == type(None):
                Article_page = Article_page + Article.get_text()
            else:
                Page_number = Page.find_all("a")
                N = int(Page_number[-2].get_text())
                for k in range(1, N+1):
                    Url2 = Url[:(len(Url)-5)] + '-' + str(k) + '.html'
                    print(Url2)
                    try:
                        html2 = urlopen(Url2)
                    except:
                        k += 1
                        ft2 = open("broken2.txt", 'a')
                        Broken2 = str(i) + '-' + Url2 + ',' + '\n'
                        ft2.write(Broken2)
                        ft2.close
                        print(Broken2)
                    else:
                        Soup2 = BeautifulSoup(html2, "html.parser")
                        Article = Soup2.find(id = "content2")
                        Article_page = Article_page + Article.get_text()
                        time.sleep(1)
        Name = str(Availability) + '-' + str(i) + '-' + D0.iloc[j,0] + '.txt'
        Name = Name.replace('/','')
        f = open(Name, 'w')
        f.write(Article_page)
        f.close()
        print(Name + '\n')
        j += 1
        time.sleep(1)
        f2 = open("Av.txt", 'a')
        f2.write(str(Availability) + '_' + str(i) + ',' + '\n')
        f2.close
 | 
	mpl-2.0 | 
| 
	ambikeshwar1991/sandhi-2 | 
	module/gr36/gr-filter/examples/interpolate.py | 
	13 | 
	8584 | 
	#!/usr/bin/env python
#
# Copyright 2009,2012 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING.  If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr
from gnuradio import filter
import sys, time
try:
    import scipy
    from scipy import fftpack
except ImportError:
    print "Error: Program requires scipy (see: www.scipy.org)."
    sys.exit(1)
try:
    import pylab
    from pylab import mlab
except ImportError:
    print "Error: Program requires matplotlib (see: matplotlib.sourceforge.net)."
    sys.exit(1)
class pfb_top_block(gr.top_block):
    def __init__(self):
        gr.top_block.__init__(self)
        self._N = 100000        # number of samples to use
        self._fs = 2000         # initial sampling rate
        self._interp = 5        # Interpolation rate for PFB interpolator
        self._ainterp = 5.5       # Resampling rate for the PFB arbitrary resampler
        # Frequencies of the signals we construct
        freq1 = 100
        freq2 = 200
        # Create a set of taps for the PFB interpolator
        # This is based on the post-interpolation sample rate
        self._taps = filter.firdes.low_pass_2(self._interp,
                                              self._interp*self._fs,
                                              freq2+50, 50,
                                              attenuation_dB=120,
                                              window=filter.firdes.WIN_BLACKMAN_hARRIS)
        # Create a set of taps for the PFB arbitrary resampler
        # The filter size is the number of filters in the filterbank; 32 will give very low side-lobes,
        # and larger numbers will reduce these even farther
        # The taps in this filter are based on a sampling rate of the filter size since it acts
        # internally as an interpolator.
        flt_size = 32
        self._taps2 = filter.firdes.low_pass_2(flt_size,
                                               flt_size*self._fs,
                                               freq2+50, 150,
                                               attenuation_dB=120,
                                               window=filter.firdes.WIN_BLACKMAN_hARRIS)
        # Calculate the number of taps per channel for our own information
        tpc = scipy.ceil(float(len(self._taps)) /  float(self._interp))
        print "Number of taps:     ", len(self._taps)
        print "Number of filters:  ", self._interp
        print "Taps per channel:   ", tpc
        # Create a couple of signals at different frequencies
        self.signal1 = gr.sig_source_c(self._fs, gr.GR_SIN_WAVE, freq1, 0.5)
        self.signal2 = gr.sig_source_c(self._fs, gr.GR_SIN_WAVE, freq2, 0.5)
        self.signal = gr.add_cc()
        self.head = gr.head(gr.sizeof_gr_complex, self._N)
        # Construct the PFB interpolator filter
        self.pfb = filter.pfb.interpolator_ccf(self._interp, self._taps)
        # Construct the PFB arbitrary resampler filter
        self.pfb_ar = filter.pfb.arb_resampler_ccf(self._ainterp, self._taps2, flt_size)
        self.snk_i = gr.vector_sink_c()
        #self.pfb_ar.pfb.print_taps()
        #self.pfb.pfb.print_taps()
        # Connect the blocks
        self.connect(self.signal1, self.head, (self.signal,0))
        self.connect(self.signal2, (self.signal,1))
        self.connect(self.signal, self.pfb)
        self.connect(self.signal, self.pfb_ar)
        self.connect(self.signal, self.snk_i)
        # Create the sink for the interpolated signals
        self.snk1 = gr.vector_sink_c()
        self.snk2 = gr.vector_sink_c()
        self.connect(self.pfb, self.snk1)
        self.connect(self.pfb_ar, self.snk2)
def main():
    tb = pfb_top_block()
    tstart = time.time()
    tb.run()
    tend = time.time()
    print "Run time: %f" % (tend - tstart)
    if 1:
        fig1 = pylab.figure(1, figsize=(12,10), facecolor="w")
        fig2 = pylab.figure(2, figsize=(12,10), facecolor="w")
        fig3 = pylab.figure(3, figsize=(12,10), facecolor="w")
        Ns = 10000
        Ne = 10000
        fftlen = 8192
        winfunc = scipy.blackman
        # Plot input signal
        fs = tb._fs
        d = tb.snk_i.data()[Ns:Ns+Ne]
        sp1_f = fig1.add_subplot(2, 1, 1)
        X,freq = mlab.psd(d, NFFT=fftlen, noverlap=fftlen/4, Fs=fs,
                          window = lambda d: d*winfunc(fftlen),
                          scale_by_freq=True)
        X_in = 10.0*scipy.log10(abs(fftpack.fftshift(X)))
        f_in = scipy.arange(-fs/2.0, fs/2.0, fs/float(X_in.size))
        p1_f = sp1_f.plot(f_in, X_in, "b")
        sp1_f.set_xlim([min(f_in), max(f_in)+1])
        sp1_f.set_ylim([-200.0, 50.0])
        sp1_f.set_title("Input Signal", weight="bold")
        sp1_f.set_xlabel("Frequency (Hz)")
        sp1_f.set_ylabel("Power (dBW)")
        Ts = 1.0/fs
        Tmax = len(d)*Ts
        t_in = scipy.arange(0, Tmax, Ts)
        x_in = scipy.array(d)
        sp1_t = fig1.add_subplot(2, 1, 2)
        p1_t = sp1_t.plot(t_in, x_in.real, "b-o")
        #p1_t = sp1_t.plot(t_in, x_in.imag, "r-o")
        sp1_t.set_ylim([-2.5, 2.5])
        sp1_t.set_title("Input Signal", weight="bold")
        sp1_t.set_xlabel("Time (s)")
        sp1_t.set_ylabel("Amplitude")
        # Plot output of PFB interpolator
        fs_int = tb._fs*tb._interp
        sp2_f = fig2.add_subplot(2, 1, 1)
        d = tb.snk1.data()[Ns:Ns+(tb._interp*Ne)]
        X,freq = mlab.psd(d, NFFT=fftlen, noverlap=fftlen/4, Fs=fs,
                          window = lambda d: d*winfunc(fftlen),
                          scale_by_freq=True)
        X_o = 10.0*scipy.log10(abs(fftpack.fftshift(X)))
        f_o = scipy.arange(-fs_int/2.0, fs_int/2.0, fs_int/float(X_o.size))
        p2_f = sp2_f.plot(f_o, X_o, "b")
        sp2_f.set_xlim([min(f_o), max(f_o)+1])
        sp2_f.set_ylim([-200.0, 50.0])
        sp2_f.set_title("Output Signal from PFB Interpolator", weight="bold")
        sp2_f.set_xlabel("Frequency (Hz)")
        sp2_f.set_ylabel("Power (dBW)")
        Ts_int = 1.0/fs_int
        Tmax = len(d)*Ts_int
        t_o = scipy.arange(0, Tmax, Ts_int)
        x_o1 = scipy.array(d)
        sp2_t = fig2.add_subplot(2, 1, 2)
        p2_t = sp2_t.plot(t_o, x_o1.real, "b-o")
        #p2_t = sp2_t.plot(t_o, x_o.imag, "r-o")
        sp2_t.set_ylim([-2.5, 2.5])
        sp2_t.set_title("Output Signal from PFB Interpolator", weight="bold")
        sp2_t.set_xlabel("Time (s)")
        sp2_t.set_ylabel("Amplitude")
        # Plot output of PFB arbitrary resampler
        fs_aint = tb._fs * tb._ainterp
        sp3_f = fig3.add_subplot(2, 1, 1)
        d = tb.snk2.data()[Ns:Ns+(tb._interp*Ne)]
        X,freq = mlab.psd(d, NFFT=fftlen, noverlap=fftlen/4, Fs=fs,
                          window = lambda d: d*winfunc(fftlen),
                          scale_by_freq=True)
        X_o = 10.0*scipy.log10(abs(fftpack.fftshift(X)))
        f_o = scipy.arange(-fs_aint/2.0, fs_aint/2.0, fs_aint/float(X_o.size))
        p3_f = sp3_f.plot(f_o, X_o, "b")
        sp3_f.set_xlim([min(f_o), max(f_o)+1])
        sp3_f.set_ylim([-200.0, 50.0])
        sp3_f.set_title("Output Signal from PFB Arbitrary Resampler", weight="bold")
        sp3_f.set_xlabel("Frequency (Hz)")
        sp3_f.set_ylabel("Power (dBW)")
        Ts_aint = 1.0/fs_aint
        Tmax = len(d)*Ts_aint
        t_o = scipy.arange(0, Tmax, Ts_aint)
        x_o2 = scipy.array(d)
        sp3_f = fig3.add_subplot(2, 1, 2)
        p3_f = sp3_f.plot(t_o, x_o2.real, "b-o")
        p3_f = sp3_f.plot(t_o, x_o1.real, "m-o")
        #p3_f = sp3_f.plot(t_o, x_o2.imag, "r-o")
        sp3_f.set_ylim([-2.5, 2.5])
        sp3_f.set_title("Output Signal from PFB Arbitrary Resampler", weight="bold")
        sp3_f.set_xlabel("Time (s)")
        sp3_f.set_ylabel("Amplitude")
        pylab.show()
if __name__ == "__main__":
    try:
        main()
    except KeyboardInterrupt:
        pass
 | 
	gpl-3.0 | 
| 
	lgarren/spack | 
	var/spack/repos/builtin/packages/py-iminuit/package.py | 
	3 | 
	1800 | 
	##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, [email protected], All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/llnl/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class PyIminuit(PythonPackage):
    """Interactive IPython-Friendly Minimizer based on SEAL Minuit2."""
    homepage = "https://pypi.python.org/pypi/iminuit"
    url      = "https://pypi.io/packages/source/i/iminuit/iminuit-1.2.tar.gz"
    version('1.2', '4701ec472cae42015e26251703e6e984')
    # Required dependencies
    depends_on('py-setuptools', type='build')
    # Optional dependencies
    depends_on('py-numpy', type=('build', 'run'))
    depends_on('py-matplotlib', type=('build', 'run'))
    depends_on('py-cython', type='build')
 | 
	lgpl-2.1 | 
| 
	marcusrehm/serenata-de-amor | 
	rosie/rosie/chamber_of_deputies/classifiers/monthly_subquota_limit_classifier.py | 
	2 | 
	6711 | 
	import numpy as np
import pandas as pd
from sklearn.base import TransformerMixin
class MonthlySubquotaLimitClassifier(TransformerMixin):
    """
    Monthly Subquota Limit classifier.
    Dataset
    -------
    issue_date : datetime column
        Date when the expense was made.
    month : int column
        The quota month matching the expense request.
    net_value : float column
        The value of the expense.
    subquota_number : category column
        A number to classify a category of expenses.
    year : int column
        The quota year matching the expense request.
    """
    KEYS = ['applicant_id', 'month', 'year']
    COLS = ['applicant_id',
            'issue_date',
            'month',
            'net_value',
            'subquota_number',
            'year']
    def fit(self, X):
        self.X = X
        self._X = self.X[self.COLS].copy()
        self.__create_columns()
        return self
    def transform(self, X=None):
        self.limits = [
            {
                # Automotive vehicle renting or charter (From 12/2013 to 03/2015)
                'data': self._X.query('(subquota_number == "120") & '
                                      '(reimbursement_month >= datetime(2013, 12, 1)) & '
                                      '(reimbursement_month <= datetime(2015, 3, 1))'),
                'monthly_limit': 1000000,
            },
            {
                # Automotive vehicle renting or charter (From 04/2015 to 04/2017)
                'data': self._X.query('(subquota_number == "120") & '
                                      '(reimbursement_month >= datetime(2015, 4, 1)) & '
                                      '(reimbursement_month <= datetime(2017, 4, 1))'),
                'monthly_limit': 1090000,
            },
            {
                # Automotive vehicle renting or charter (From 05/2017)
                'data': self._X.query('(subquota_number == "120") & '
                                      '(reimbursement_month >= datetime(2017, 5, 1))'),
                'monthly_limit': 1271300,
            },
            {
                # Taxi, toll and parking (From 12/2013 to 03/2015)
                'data': self._X.query('(subquota_number == "122") & '
                                      '(reimbursement_month >= datetime(2013, 12, 1)) & '
                                      '(reimbursement_month <= datetime(2015, 3, 1))'),
                'monthly_limit': 250000,
            },
            {
                # Taxi, toll and parking (From 04/2015)
                'data': self._X.query('(subquota_number == "122") & '
                                      '(reimbursement_month >= datetime(2015, 4, 1))'),
                'monthly_limit': 270000,
            },
            {
                # Fuels and lubricants (From 07/2009 to 03/2015)
                'data': self._X.query('(subquota_number == "3") & '
                                      '(reimbursement_month >= datetime(2009, 7, 1)) & '
                                      '(reimbursement_month <= datetime(2015, 3, 1))'),
                'monthly_limit': 450000,
            },
            {
                # Fuels and lubricants (From 04/2015 to 08/2015)
                'data': self._X.query('(subquota_number == "3") & '
                                      '(reimbursement_month >= datetime(2015, 4, 1)) & '
                                      '(reimbursement_month <= datetime(2015, 8, 1))'),
                'monthly_limit': 490000,
            },
            {
                # Fuels and lubricants (From 9/2015)
                'data': self._X.query('(subquota_number == "3") & '
                                      '(reimbursement_month >= datetime(2015, 9, 1))'),
                'monthly_limit': 600000,
            },
            {
                # Security service provided by specialized company (From 07/2009 to 4/2014)
                'data': self._X.query('(subquota_number == "8") & '
                                      '(reimbursement_month >= datetime(2009, 7, 1)) & '
                                      '(reimbursement_month <= datetime(2014, 4, 1))'),
                'monthly_limit': 450000,
            },
            {
                # Security service provided by specialized company (From 05/2014 to 3/2015)
                'data': self._X.query('(subquota_number == "8") & '
                                      '(reimbursement_month >= datetime(2014, 5, 1)) & '
                                      '(reimbursement_month <= datetime(2015, 3, 1))'),
                'monthly_limit': 800000,
            },
            {
                # Security service provided by specialized company (From 04/2015)
                'data': self._X.query('(subquota_number == "8") & '
                                      '(reimbursement_month >= datetime(2015, 4, 1))'),
                'monthly_limit': 870000,
            },
            {
                # Participation in course, talk or similar event (From 10/2015)
                'data': self._X.query('(subquota_number == "137") & '
                                      '(reimbursement_month >= datetime(2015, 10, 1))'),
                'monthly_limit': 769716,
            },
        ]
        return self
    def predict(self, X=None):
        self._X['is_over_monthly_subquota_limit'] = False
        for metadata in self.limits:
            data, monthly_limit = metadata['data'], metadata['monthly_limit']
            if len(data):
                surplus_reimbursements = self.__find_surplus_reimbursements(data, monthly_limit)
                self._X.loc[surplus_reimbursements.index,
                            'is_over_monthly_subquota_limit'] = True
        results = self._X.loc[self.X.index, 'is_over_monthly_subquota_limit']
        return np.r_[results]
    def predict_proba(self, X=None):
        return 1.
    def __create_columns(self):
        self._X['net_value_int'] = (self._X['net_value'] * 100).apply(int)
        self._X['coerced_issue_date'] = \
            pd.to_datetime(self._X['issue_date'], errors='coerce')
        self._X.sort_values('coerced_issue_date', kind='mergesort', inplace=True)
        reimbursement_month = self._X[['year', 'month']].copy()
        reimbursement_month['day'] = 1
        self._X['reimbursement_month'] = pd.to_datetime(reimbursement_month)
    def __find_surplus_reimbursements(self, data, monthly_limit):
        grouped = data.groupby(self.KEYS).apply(self.__create_cumsum_cols)
        return grouped[grouped['cumsum_net_value'] > monthly_limit]
    def __create_cumsum_cols(self, subset):
        subset['cumsum_net_value'] = subset['net_value_int'].cumsum()
        return subset
 | 
	mit | 
| 
	MarineLasbleis/GrowYourIC | 
	notebooks/Yoshida.py | 
	1 | 
	4212 | 
	# -*- coding: UTF-8 -*-
import numpy as np
import matplotlib.pyplot as plt #for figures
#from mpl_toolkits.basemap import Basemap #to render maps
import math
from GrowYourIC import tracers, positions, geodyn, geodyn_trg, geodyn_static, plot_data, data, geodyn_analytical_flows
#plt.rcParams['figure.figsize'] = (8.0, 3.0) #size of figures
cm = plt.cm.get_cmap('viridis_r')
#V = 0.2 # translation velocity
#S2 = 1/5.
#Yoshida = geodyn_analytical_flows.Yoshida96(V, S=S2)
#file = "Fig/Yoshida_{}_S2_{}".format(V, S2)
#print(file)
V = [0.2, 0.4]
S2 = [1/5., 4/5., 2.]
    
for vitesse in V:
    for value_S in S2: 
        Yoshida = geodyn_analytical_flows.Yoshida96(vitesse, S=value_S)
        file = "Fig/Yoshida_{}_S2_{}".format(vitesse, value_S)
        print(file)
        npoints = 50 #number of points in the x direction for the data set. 
        data_set = data.PerfectSamplingCut(npoints, rICB = 1.)
        data_set.method = "bt_point"
        # Age plot with velocity field
        proxy = geodyn.evaluate_proxy(data_set, Yoshida, proxy_type="age", verbose = False)
        data_set.plot_c_vec(Yoshida, proxy=proxy, nameproxy="age")
        plt.savefig(file+"_age.pdf")
# accumulated deformation
#proxy = geodyn.evaluate_proxy(data_set, Yoshida, proxy_type="vMises_acc", verbose = False)
#data_set.plot_c_vec(Yoshida, proxy=proxy, cm=cm, nameproxy="vMises_acc")
#plt.savefig(file+"_vM_acc.pdf")
#data_set.plot_c_vec(Yoshida, proxy=np.log10(proxy), cm=cm, nameproxy="log_vMises_acc")
#plt.savefig(file+"_log_vM_acc.pdf")
# tracers with age
#tracers.Swarm(5, Yoshida, Yoshida.tau_ic/400, "ici", plane="meridional")
#data_set = data.PerfectSamplingCut(20, rICB = 1.)
#data_set.method = "bt_point"
#proxy = geodyn.evaluate_proxy(data_set, Yoshida, proxy_type="age", verbose = False)
#data_set.plot_c_vec(Yoshida, proxy=proxy, nameproxy="age")
#plt.show()
#proxy = geodyn.evaluate_proxy(data_set, Yoshida, proxy_type="vMises_tau_ic", verbose = False)
#data_set.plot_c_vec(Yoshida, proxy=proxy, cm=cm, nameproxy="vMises_tau_ic")
#proxy = geodyn.evaluate_proxy(data_set, Yoshida, proxy_type="vMises_acc", verbose = False)
#data_set.plot_c_vec(Yoshida, proxy=proxy, cm=cm, nameproxy="vMises_acc")
#Karato = geodyn_analytical_flows.Model_LorentzForce()
#proxy = geodyn.evaluate_proxy(data_set, Karato, proxy_type="vMises_tau_ic", verbose = False)
#data_set.plot_c_vec(Karato, proxy=proxy, cm=cm, nameproxy="vMises_tau_ic")
#Karato.P = 1e-4
#proxy = geodyn.evaluate_proxy(data_set, Karato, proxy_type="vMises_tau_ic", verbose = False)
#data_set.plot_c_vec(Karato, proxy=proxy, cm=cm, nameproxy="vMises_tau_ic")
#proxy = geodyn.evaluate_proxy(data_set, Karato, proxy_type="age", verbose = False)
#data_set.plot_c_vec(Karato, proxy=proxy, cm=cm, nameproxy="age")
#proxy = geodyn.evaluate_proxy(data_set, Yoshida, proxy_type="age", verbose = False)
#data_set.plot_c_vec(Yoshida, proxy=proxy, cm=cm, nameproxy="age")
#plt.savefig(file+"_tage.pdf")
#proxy = geodyn.evaluate_proxy(data_set, Yoshida, proxy_type="vMises_tau_ic", verbose = False)
#data_set.plot_c_vec(Yoshida, proxy=proxy, cm=cm, nameproxy="vMises_tau_ic")
#plt.savefig(file+"_t_vM.pdf")
#proxy = geodyn.evaluate_proxy(data_set, Yoshida, proxy_type="vMises_cart", verbose = False)
#data_set.plot_c_vec(Yoshida, proxy=proxy, cm=cm, nameproxy="vMises_cart")
#plt.savefig("Yoshida_vM.pdf")
#Karato.P = 1e4
#proxy_1 = geodyn.evaluate_proxy(data_set, Karato, proxy_type="vMises_tau_ic", verbose = False)
#data_set.plot_c_vec(Karato, proxy=proxy_1, cm=cm, nameproxy="vMises_tau_ic")
#npoints = 50 #number of points in the x direction for the data set. 
#data_set = data.PerfectSamplingCut(npoints, rICB = 1.)
#data_set.method = "bt_point"
#proxy_2 = geodyn.evaluate_proxy(data_set, Karato, proxy_type="age", verbose = False)
#data_set.plot_c_vec(Karato, proxy=proxy_2, cm=cm, nameproxy="age")
#npoints = 100 #number of points in the x direction for the data set. 
#data_set = data.PerfectSamplingCut(npoints, rICB = 1.)
#data_set.method = "bt_point"
#proxy = geodyn.evaluate_proxy(data_set, Yoshida, proxy_type="vMises_acc", verbose = False)
#data_set.plot_c_vec(Yoshida, proxy=proxy, cm=cm, nameproxy="vMises_acc")
plt.show() | 
	mit | 
| 
	xinfang/face-recognize | 
	tests/openface_neural_net_training_tests.py | 
	5 | 
	3071 | 
	# OpenFace training tests.
#
# Copyright 2015-2016 Carnegie Mellon University
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import shutil
import sys
import numpy as np
np.set_printoptions(precision=2)
import pandas as pd
import tempfile
from subprocess import Popen, PIPE
openfaceDir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
modelDir = os.path.join(openfaceDir, 'models')
exampleImages = os.path.join(openfaceDir, 'images', 'examples')
lfwSubset = os.path.join(openfaceDir, 'data', 'lfw-subset')
def test_dnn_training():
    assert os.path.isdir(
        lfwSubset), "Get lfw-subset by running ./data/download-lfw-subset.sh"
    imgWorkDir = tempfile.mkdtemp(prefix='OpenFaceTrainingTest-Img-')
    cmd = [sys.executable, os.path.join(openfaceDir, 'util', 'align-dlib.py'),
           os.path.join(lfwSubset, 'raw'), 'align', 'outerEyesAndNose',
           os.path.join(imgWorkDir, 'aligned')]
    p = Popen(cmd, stdout=PIPE, stderr=PIPE, universal_newlines=True)
    (out, err) = p.communicate()
    print(out)
    print(err)
    assert p.returncode == 0
    cmd = [sys.executable, os.path.join(openfaceDir, 'util', 'align-dlib.py'),
           os.path.join(lfwSubset, 'raw'), 'align', 'outerEyesAndNose',
           os.path.join(imgWorkDir, 'aligned')]
    p = Popen(cmd, stdout=PIPE, stderr=PIPE, universal_newlines=True)
    (out, err) = p.communicate()
    print(out)
    print(err)
    assert p.returncode == 0
    netWorkDir = tempfile.mkdtemp(prefix='OpenFaceTrainingTest-Net-')
    saveDir = os.path.join(netWorkDir, '1')
    cmd = ['th', './main.lua',
           '-data', os.path.join(imgWorkDir, 'aligned'),
           '-modelDef', '../models/openface/nn4.def.lua',
           '-peoplePerBatch', '3',
           '-imagesPerPerson', '10',
           '-nEpochs', '10',
           '-epochSize', '1',
           '-cache', netWorkDir,
           '-save', saveDir,
           '-cuda', '-cudnn', '-testing',
           '-nDonkeys', '-1']
    p = Popen(cmd, stdout=PIPE, stderr=PIPE,
              cwd=os.path.join(openfaceDir, 'training'), universal_newlines=True)
    (out, err) = p.communicate()
    print(out)
    print(err)
    assert p.returncode == 0
    # Training won't make much progress on lfw-subset, but as a sanity check,
    # make sure the training code runs and doesn't get worse than 0.2.
    trainLoss = pd.read_csv(os.path.join(saveDir, 'train.log'),
                            sep='\t').as_matrix()[:, 0]
    assert np.mean(trainLoss) < 0.3
    shutil.rmtree(imgWorkDir)
    shutil.rmtree(netWorkDir)
 | 
	apache-2.0 | 
| 
	hsuantien/scikit-learn | 
	sklearn/mixture/tests/test_gmm.py | 
	200 | 
	17427 | 
	import unittest
import copy
import sys
from nose.tools import assert_true
import numpy as np
from numpy.testing import (assert_array_equal, assert_array_almost_equal,
                           assert_raises)
from scipy import stats
from sklearn import mixture
from sklearn.datasets.samples_generator import make_spd_matrix
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_raise_message
from sklearn.metrics.cluster import adjusted_rand_score
from sklearn.externals.six.moves import cStringIO as StringIO
rng = np.random.RandomState(0)
def test_sample_gaussian():
    # Test sample generation from mixture.sample_gaussian where covariance
    # is diagonal, spherical and full
    n_features, n_samples = 2, 300
    axis = 1
    mu = rng.randint(10) * rng.rand(n_features)
    cv = (rng.rand(n_features) + 1.0) ** 2
    samples = mixture.sample_gaussian(
        mu, cv, covariance_type='diag', n_samples=n_samples)
    assert_true(np.allclose(samples.mean(axis), mu, atol=1.3))
    assert_true(np.allclose(samples.var(axis), cv, atol=1.5))
    # the same for spherical covariances
    cv = (rng.rand() + 1.0) ** 2
    samples = mixture.sample_gaussian(
        mu, cv, covariance_type='spherical', n_samples=n_samples)
    assert_true(np.allclose(samples.mean(axis), mu, atol=1.5))
    assert_true(np.allclose(
        samples.var(axis), np.repeat(cv, n_features), atol=1.5))
    # and for full covariances
    A = rng.randn(n_features, n_features)
    cv = np.dot(A.T, A) + np.eye(n_features)
    samples = mixture.sample_gaussian(
        mu, cv, covariance_type='full', n_samples=n_samples)
    assert_true(np.allclose(samples.mean(axis), mu, atol=1.3))
    assert_true(np.allclose(np.cov(samples), cv, atol=2.5))
    # Numerical stability check: in SciPy 0.12.0 at least, eigh may return
    # tiny negative values in its second return value.
    from sklearn.mixture import sample_gaussian
    x = sample_gaussian([0, 0], [[4, 3], [1, .1]],
                        covariance_type='full', random_state=42)
    print(x)
    assert_true(np.isfinite(x).all())
def _naive_lmvnpdf_diag(X, mu, cv):
    # slow and naive implementation of lmvnpdf
    ref = np.empty((len(X), len(mu)))
    stds = np.sqrt(cv)
    for i, (m, std) in enumerate(zip(mu, stds)):
        ref[:, i] = np.log(stats.norm.pdf(X, m, std)).sum(axis=1)
    return ref
def test_lmvnpdf_diag():
    # test a slow and naive implementation of lmvnpdf and
    # compare it to the vectorized version (mixture.lmvnpdf) to test
    # for correctness
    n_features, n_components, n_samples = 2, 3, 10
    mu = rng.randint(10) * rng.rand(n_components, n_features)
    cv = (rng.rand(n_components, n_features) + 1.0) ** 2
    X = rng.randint(10) * rng.rand(n_samples, n_features)
    ref = _naive_lmvnpdf_diag(X, mu, cv)
    lpr = mixture.log_multivariate_normal_density(X, mu, cv, 'diag')
    assert_array_almost_equal(lpr, ref)
def test_lmvnpdf_spherical():
    n_features, n_components, n_samples = 2, 3, 10
    mu = rng.randint(10) * rng.rand(n_components, n_features)
    spherecv = rng.rand(n_components, 1) ** 2 + 1
    X = rng.randint(10) * rng.rand(n_samples, n_features)
    cv = np.tile(spherecv, (n_features, 1))
    reference = _naive_lmvnpdf_diag(X, mu, cv)
    lpr = mixture.log_multivariate_normal_density(X, mu, spherecv,
                                                  'spherical')
    assert_array_almost_equal(lpr, reference)
def test_lmvnpdf_full():
    n_features, n_components, n_samples = 2, 3, 10
    mu = rng.randint(10) * rng.rand(n_components, n_features)
    cv = (rng.rand(n_components, n_features) + 1.0) ** 2
    X = rng.randint(10) * rng.rand(n_samples, n_features)
    fullcv = np.array([np.diag(x) for x in cv])
    reference = _naive_lmvnpdf_diag(X, mu, cv)
    lpr = mixture.log_multivariate_normal_density(X, mu, fullcv, 'full')
    assert_array_almost_equal(lpr, reference)
def test_lvmpdf_full_cv_non_positive_definite():
    n_features, n_samples = 2, 10
    rng = np.random.RandomState(0)
    X = rng.randint(10) * rng.rand(n_samples, n_features)
    mu = np.mean(X, 0)
    cv = np.array([[[-1, 0], [0, 1]]])
    expected_message = "'covars' must be symmetric, positive-definite"
    assert_raise_message(ValueError, expected_message,
                         mixture.log_multivariate_normal_density,
                         X, mu, cv, 'full')
def test_GMM_attributes():
    n_components, n_features = 10, 4
    covariance_type = 'diag'
    g = mixture.GMM(n_components, covariance_type, random_state=rng)
    weights = rng.rand(n_components)
    weights = weights / weights.sum()
    means = rng.randint(-20, 20, (n_components, n_features))
    assert_true(g.n_components == n_components)
    assert_true(g.covariance_type == covariance_type)
    g.weights_ = weights
    assert_array_almost_equal(g.weights_, weights)
    g.means_ = means
    assert_array_almost_equal(g.means_, means)
    covars = (0.1 + 2 * rng.rand(n_components, n_features)) ** 2
    g.covars_ = covars
    assert_array_almost_equal(g.covars_, covars)
    assert_raises(ValueError, g._set_covars, [])
    assert_raises(ValueError, g._set_covars,
                  np.zeros((n_components - 2, n_features)))
    assert_raises(ValueError, mixture.GMM, n_components=20,
                  covariance_type='badcovariance_type')
class GMMTester():
    do_test_eval = True
    def _setUp(self):
        self.n_components = 10
        self.n_features = 4
        self.weights = rng.rand(self.n_components)
        self.weights = self.weights / self.weights.sum()
        self.means = rng.randint(-20, 20, (self.n_components, self.n_features))
        self.threshold = -0.5
        self.I = np.eye(self.n_features)
        self.covars = {
            'spherical': (0.1 + 2 * rng.rand(self.n_components,
                                             self.n_features)) ** 2,
            'tied': (make_spd_matrix(self.n_features, random_state=0)
                     + 5 * self.I),
            'diag': (0.1 + 2 * rng.rand(self.n_components,
                                        self.n_features)) ** 2,
            'full': np.array([make_spd_matrix(self.n_features, random_state=0)
                              + 5 * self.I for x in range(self.n_components)])}
    def test_eval(self):
        if not self.do_test_eval:
            return  # DPGMM does not support setting the means and
        # covariances before fitting There is no way of fixing this
        # due to the variational parameters being more expressive than
        # covariance matrices
        g = self.model(n_components=self.n_components,
                       covariance_type=self.covariance_type, random_state=rng)
        # Make sure the means are far apart so responsibilities.argmax()
        # picks the actual component used to generate the observations.
        g.means_ = 20 * self.means
        g.covars_ = self.covars[self.covariance_type]
        g.weights_ = self.weights
        gaussidx = np.repeat(np.arange(self.n_components), 5)
        n_samples = len(gaussidx)
        X = rng.randn(n_samples, self.n_features) + g.means_[gaussidx]
        ll, responsibilities = g.score_samples(X)
        self.assertEqual(len(ll), n_samples)
        self.assertEqual(responsibilities.shape,
                         (n_samples, self.n_components))
        assert_array_almost_equal(responsibilities.sum(axis=1),
                                  np.ones(n_samples))
        assert_array_equal(responsibilities.argmax(axis=1), gaussidx)
    def test_sample(self, n=100):
        g = self.model(n_components=self.n_components,
                       covariance_type=self.covariance_type, random_state=rng)
        # Make sure the means are far apart so responsibilities.argmax()
        # picks the actual component used to generate the observations.
        g.means_ = 20 * self.means
        g.covars_ = np.maximum(self.covars[self.covariance_type], 0.1)
        g.weights_ = self.weights
        samples = g.sample(n)
        self.assertEqual(samples.shape, (n, self.n_features))
    def test_train(self, params='wmc'):
        g = mixture.GMM(n_components=self.n_components,
                        covariance_type=self.covariance_type)
        g.weights_ = self.weights
        g.means_ = self.means
        g.covars_ = 20 * self.covars[self.covariance_type]
        # Create a training set by sampling from the predefined distribution.
        X = g.sample(n_samples=100)
        g = self.model(n_components=self.n_components,
                       covariance_type=self.covariance_type,
                       random_state=rng, min_covar=1e-1,
                       n_iter=1, init_params=params)
        g.fit(X)
        # Do one training iteration at a time so we can keep track of
        # the log likelihood to make sure that it increases after each
        # iteration.
        trainll = []
        for _ in range(5):
            g.params = params
            g.init_params = ''
            g.fit(X)
            trainll.append(self.score(g, X))
        g.n_iter = 10
        g.init_params = ''
        g.params = params
        g.fit(X)  # finish fitting
        # Note that the log likelihood will sometimes decrease by a
        # very small amount after it has more or less converged due to
        # the addition of min_covar to the covariance (to prevent
        # underflow).  This is why the threshold is set to -0.5
        # instead of 0.
        delta_min = np.diff(trainll).min()
        self.assertTrue(
            delta_min > self.threshold,
            "The min nll increase is %f which is lower than the admissible"
            " threshold of %f, for model %s. The likelihoods are %s."
            % (delta_min, self.threshold, self.covariance_type, trainll))
    def test_train_degenerate(self, params='wmc'):
        # Train on degenerate data with 0 in some dimensions
        # Create a training set by sampling from the predefined distribution.
        X = rng.randn(100, self.n_features)
        X.T[1:] = 0
        g = self.model(n_components=2, covariance_type=self.covariance_type,
                       random_state=rng, min_covar=1e-3, n_iter=5,
                       init_params=params)
        g.fit(X)
        trainll = g.score(X)
        self.assertTrue(np.sum(np.abs(trainll / 100 / X.shape[1])) < 5)
    def test_train_1d(self, params='wmc'):
        # Train on 1-D data
        # Create a training set by sampling from the predefined distribution.
        X = rng.randn(100, 1)
        # X.T[1:] = 0
        g = self.model(n_components=2, covariance_type=self.covariance_type,
                       random_state=rng, min_covar=1e-7, n_iter=5,
                       init_params=params)
        g.fit(X)
        trainll = g.score(X)
        if isinstance(g, mixture.DPGMM):
            self.assertTrue(np.sum(np.abs(trainll / 100)) < 5)
        else:
            self.assertTrue(np.sum(np.abs(trainll / 100)) < 2)
    def score(self, g, X):
        return g.score(X).sum()
class TestGMMWithSphericalCovars(unittest.TestCase, GMMTester):
    covariance_type = 'spherical'
    model = mixture.GMM
    setUp = GMMTester._setUp
class TestGMMWithDiagonalCovars(unittest.TestCase, GMMTester):
    covariance_type = 'diag'
    model = mixture.GMM
    setUp = GMMTester._setUp
class TestGMMWithTiedCovars(unittest.TestCase, GMMTester):
    covariance_type = 'tied'
    model = mixture.GMM
    setUp = GMMTester._setUp
class TestGMMWithFullCovars(unittest.TestCase, GMMTester):
    covariance_type = 'full'
    model = mixture.GMM
    setUp = GMMTester._setUp
def test_multiple_init():
    # Test that multiple inits does not much worse than a single one
    X = rng.randn(30, 5)
    X[:10] += 2
    g = mixture.GMM(n_components=2, covariance_type='spherical',
                    random_state=rng, min_covar=1e-7, n_iter=5)
    train1 = g.fit(X).score(X).sum()
    g.n_init = 5
    train2 = g.fit(X).score(X).sum()
    assert_true(train2 >= train1 - 1.e-2)
def test_n_parameters():
    # Test that the right number of parameters is estimated
    n_samples, n_dim, n_components = 7, 5, 2
    X = rng.randn(n_samples, n_dim)
    n_params = {'spherical': 13, 'diag': 21, 'tied': 26, 'full': 41}
    for cv_type in ['full', 'tied', 'diag', 'spherical']:
        g = mixture.GMM(n_components=n_components, covariance_type=cv_type,
                        random_state=rng, min_covar=1e-7, n_iter=1)
        g.fit(X)
        assert_true(g._n_parameters() == n_params[cv_type])
def test_1d_1component():
    # Test all of the covariance_types return the same BIC score for
    # 1-dimensional, 1 component fits.
    n_samples, n_dim, n_components = 100, 1, 1
    X = rng.randn(n_samples, n_dim)
    g_full = mixture.GMM(n_components=n_components, covariance_type='full',
                         random_state=rng, min_covar=1e-7, n_iter=1)
    g_full.fit(X)
    g_full_bic = g_full.bic(X)
    for cv_type in ['tied', 'diag', 'spherical']:
        g = mixture.GMM(n_components=n_components, covariance_type=cv_type,
                        random_state=rng, min_covar=1e-7, n_iter=1)
        g.fit(X)
        assert_array_almost_equal(g.bic(X), g_full_bic)
def assert_fit_predict_correct(model, X):
    model2 = copy.deepcopy(model)
    predictions_1 = model.fit(X).predict(X)
    predictions_2 = model2.fit_predict(X)
    assert adjusted_rand_score(predictions_1, predictions_2) == 1.0
def test_fit_predict():
    """
    test that gmm.fit_predict is equivalent to gmm.fit + gmm.predict
    """
    lrng = np.random.RandomState(101)
    n_samples, n_dim, n_comps = 100, 2, 2
    mu = np.array([[8, 8]])
    component_0 = lrng.randn(n_samples, n_dim)
    component_1 = lrng.randn(n_samples, n_dim) + mu
    X = np.vstack((component_0, component_1))
    for m_constructor in (mixture.GMM, mixture.VBGMM, mixture.DPGMM):
        model = m_constructor(n_components=n_comps, covariance_type='full',
                              min_covar=1e-7, n_iter=5,
                              random_state=np.random.RandomState(0))
        assert_fit_predict_correct(model, X)
    model = mixture.GMM(n_components=n_comps, n_iter=0)
    z = model.fit_predict(X)
    assert np.all(z == 0), "Quick Initialization Failed!"
def test_aic():
    # Test the aic and bic criteria
    n_samples, n_dim, n_components = 50, 3, 2
    X = rng.randn(n_samples, n_dim)
    SGH = 0.5 * (X.var() + np.log(2 * np.pi))  # standard gaussian entropy
    for cv_type in ['full', 'tied', 'diag', 'spherical']:
        g = mixture.GMM(n_components=n_components, covariance_type=cv_type,
                        random_state=rng, min_covar=1e-7)
        g.fit(X)
        aic = 2 * n_samples * SGH * n_dim + 2 * g._n_parameters()
        bic = (2 * n_samples * SGH * n_dim +
               np.log(n_samples) * g._n_parameters())
        bound = n_dim * 3. / np.sqrt(n_samples)
        assert_true(np.abs(g.aic(X) - aic) / n_samples < bound)
        assert_true(np.abs(g.bic(X) - bic) / n_samples < bound)
def check_positive_definite_covars(covariance_type):
    r"""Test that covariance matrices do not become non positive definite
    Due to the accumulation of round-off errors, the computation of the
    covariance  matrices during the learning phase could lead to non-positive
    definite covariance matrices. Namely the use of the formula:
    .. math:: C = (\sum_i w_i  x_i x_i^T) - \mu \mu^T
    instead of:
    .. math:: C = \sum_i w_i (x_i - \mu)(x_i - \mu)^T
    while mathematically equivalent, was observed a ``LinAlgError`` exception,
    when computing a ``GMM`` with full covariance matrices and fixed mean.
    This function ensures that some later optimization will not introduce the
    problem again.
    """
    rng = np.random.RandomState(1)
    # we build a dataset with 2 2d component. The components are unbalanced
    # (respective weights 0.9 and 0.1)
    X = rng.randn(100, 2)
    X[-10:] += (3, 3)  # Shift the 10 last points
    gmm = mixture.GMM(2, params="wc", covariance_type=covariance_type,
                      min_covar=1e-3)
    # This is a non-regression test for issue #2640. The following call used
    # to trigger:
    # numpy.linalg.linalg.LinAlgError: 2-th leading minor not positive definite
    gmm.fit(X)
    if covariance_type == "diag" or covariance_type == "spherical":
        assert_greater(gmm.covars_.min(), 0)
    else:
        if covariance_type == "tied":
            covs = [gmm.covars_]
        else:
            covs = gmm.covars_
        for c in covs:
            assert_greater(np.linalg.det(c), 0)
def test_positive_definite_covars():
    # Check positive definiteness for all covariance types
    for covariance_type in ["full", "tied", "diag", "spherical"]:
        yield check_positive_definite_covars, covariance_type
def test_verbose_first_level():
    # Create sample data
    X = rng.randn(30, 5)
    X[:10] += 2
    g = mixture.GMM(n_components=2, n_init=2, verbose=1)
    old_stdout = sys.stdout
    sys.stdout = StringIO()
    try:
        g.fit(X)
    finally:
        sys.stdout = old_stdout
def test_verbose_second_level():
    # Create sample data
    X = rng.randn(30, 5)
    X[:10] += 2
    g = mixture.GMM(n_components=2, n_init=2, verbose=2)
    old_stdout = sys.stdout
    sys.stdout = StringIO()
    try:
        g.fit(X)
    finally:
        sys.stdout = old_stdout
 | 
	bsd-3-clause | 
| 
	PatrickOReilly/scikit-learn | 
	sklearn/gaussian_process/gpr.py | 
	7 | 
	18711 | 
	"""Gaussian processes regression. """
# Authors: Jan Hendrik Metzen <[email protected]>
#
# License: BSD 3 clause
import warnings
from operator import itemgetter
import numpy as np
from scipy.linalg import cholesky, cho_solve, solve_triangular
from scipy.optimize import fmin_l_bfgs_b
from sklearn.base import BaseEstimator, RegressorMixin, clone
from sklearn.gaussian_process.kernels import RBF, ConstantKernel as C
from sklearn.utils import check_random_state
from sklearn.utils.validation import check_X_y, check_array
class GaussianProcessRegressor(BaseEstimator, RegressorMixin):
    """Gaussian process regression (GPR).
    The implementation is based on Algorithm 2.1 of Gaussian Processes
    for Machine Learning (GPML) by Rasmussen and Williams.
    In addition to standard scikit-learn estimator API,
    GaussianProcessRegressor:
       * allows prediction without prior fitting (based on the GP prior)
       * provides an additional method sample_y(X), which evaluates samples
         drawn from the GPR (prior or posterior) at given inputs
       * exposes a method log_marginal_likelihood(theta), which can be used
         externally for other ways of selecting hyperparameters, e.g., via
         Markov chain Monte Carlo.
    Read more in the :ref:`User Guide <gaussian_process>`.
    Parameters
    ----------
    kernel : kernel object
        The kernel specifying the covariance function of the GP. If None is
        passed, the kernel "1.0 * RBF(1.0)" is used as default. Note that
        the kernel's hyperparameters are optimized during fitting.
    alpha : float or array-like, optional (default: 1e-10)
        Value added to the diagonal of the kernel matrix during fitting.
        Larger values correspond to increased noise level in the observations
        and reduce potential numerical issue during fitting. If an array is
        passed, it must have the same number of entries as the data used for
        fitting and is used as datapoint-dependent noise level. Note that this
        is equivalent to adding a WhiteKernel with c=alpha. Allowing to specify
        the noise level directly as a parameter is mainly for convenience and
        for consistency with Ridge.
    optimizer : string or callable, optional (default: "fmin_l_bfgs_b")
        Can either be one of the internally supported optimizers for optimizing
        the kernel's parameters, specified by a string, or an externally
        defined optimizer passed as a callable. If a callable is passed, it
        must have the signature::
            def optimizer(obj_func, initial_theta, bounds):
                # * 'obj_func' is the objective function to be maximized, which
                #   takes the hyperparameters theta as parameter and an
                #   optional flag eval_gradient, which determines if the
                #   gradient is returned additionally to the function value
                # * 'initial_theta': the initial value for theta, which can be
                #   used by local optimizers
                # * 'bounds': the bounds on the values of theta
                ....
                # Returned are the best found hyperparameters theta and
                # the corresponding value of the target function.
                return theta_opt, func_min
        Per default, the 'fmin_l_bfgs_b' algorithm from scipy.optimize
        is used. If None is passed, the kernel's parameters are kept fixed.
        Available internal optimizers are::
            'fmin_l_bfgs_b'
    n_restarts_optimizer: int, optional (default: 0)
        The number of restarts of the optimizer for finding the kernel's
        parameters which maximize the log-marginal likelihood. The first run
        of the optimizer is performed from the kernel's initial parameters,
        the remaining ones (if any) from thetas sampled log-uniform randomly
        from the space of allowed theta-values. If greater than 0, all bounds
        must be finite. Note that n_restarts_optimizer == 0 implies that one
        run is performed.
    normalize_y: boolean, optional (default: False)
        Whether the target values y are normalized, i.e., the mean of the
        observed target values become zero. This parameter should be set to
        True if the target values' mean is expected to differ considerable from
        zero. When enabled, the normalization effectively modifies the GP's
        prior based on the data, which contradicts the likelihood principle;
        normalization is thus disabled per default.
    copy_X_train : bool, optional (default: True)
        If True, a persistent copy of the training data is stored in the
        object. Otherwise, just a reference to the training data is stored,
        which might cause predictions to change if the data is modified
        externally.
    random_state : integer or numpy.RandomState, optional
        The generator used to initialize the centers. If an integer is
        given, it fixes the seed. Defaults to the global numpy random
        number generator.
    Attributes
    ----------
    X_train_ : array-like, shape = (n_samples, n_features)
        Feature values in training data (also required for prediction)
    y_train_: array-like, shape = (n_samples, [n_output_dims])
        Target values in training data (also required for prediction)
    kernel_: kernel object
        The kernel used for prediction. The structure of the kernel is the
        same as the one passed as parameter but with optimized hyperparameters
    L_: array-like, shape = (n_samples, n_samples)
        Lower-triangular Cholesky decomposition of the kernel in ``X_train_``
    alpha_: array-like, shape = (n_samples,)
        Dual coefficients of training data points in kernel space
    log_marginal_likelihood_value_: float
        The log-marginal-likelihood of ``self.kernel_.theta``
    """
    def __init__(self, kernel=None, alpha=1e-10,
                 optimizer="fmin_l_bfgs_b", n_restarts_optimizer=0,
                 normalize_y=False, copy_X_train=True, random_state=None):
        self.kernel = kernel
        self.alpha = alpha
        self.optimizer = optimizer
        self.n_restarts_optimizer = n_restarts_optimizer
        self.normalize_y = normalize_y
        self.copy_X_train = copy_X_train
        self.random_state = random_state
    def fit(self, X, y):
        """Fit Gaussian process regression model
        Parameters
        ----------
        X : array-like, shape = (n_samples, n_features)
            Training data
        y : array-like, shape = (n_samples, [n_output_dims])
            Target values
        Returns
        -------
        self : returns an instance of self.
        """
        if self.kernel is None:  # Use an RBF kernel as default
            self.kernel_ = C(1.0, constant_value_bounds="fixed") \
                * RBF(1.0, length_scale_bounds="fixed")
        else:
            self.kernel_ = clone(self.kernel)
        self.rng = check_random_state(self.random_state)
        X, y = check_X_y(X, y, multi_output=True, y_numeric=True)
        # Normalize target value
        if self.normalize_y:
            self.y_train_mean = np.mean(y, axis=0)
            # demean y
            y = y - self.y_train_mean
        else:
            self.y_train_mean = np.zeros(1)
        if np.iterable(self.alpha) \
           and self.alpha.shape[0] != y.shape[0]:
            if self.alpha.shape[0] == 1:
                self.alpha = self.alpha[0]
            else:
                raise ValueError("alpha must be a scalar or an array"
                                 " with same number of entries as y.(%d != %d)"
                                 % (self.alpha.shape[0], y.shape[0]))
        self.X_train_ = np.copy(X) if self.copy_X_train else X
        self.y_train_ = np.copy(y) if self.copy_X_train else y
        if self.optimizer is not None and self.kernel_.n_dims > 0:
            # Choose hyperparameters based on maximizing the log-marginal
            # likelihood (potentially starting from several initial values)
            def obj_func(theta, eval_gradient=True):
                if eval_gradient:
                    lml, grad = self.log_marginal_likelihood(
                        theta, eval_gradient=True)
                    return -lml, -grad
                else:
                    return -self.log_marginal_likelihood(theta)
            # First optimize starting from theta specified in kernel
            optima = [(self._constrained_optimization(obj_func,
                                                      self.kernel_.theta,
                                                      self.kernel_.bounds))]
            # Additional runs are performed from log-uniform chosen initial
            # theta
            if self.n_restarts_optimizer > 0:
                if not np.isfinite(self.kernel_.bounds).all():
                    raise ValueError(
                        "Multiple optimizer restarts (n_restarts_optimizer>0) "
                        "requires that all bounds are finite.")
                bounds = self.kernel_.bounds
                for iteration in range(self.n_restarts_optimizer):
                    theta_initial = \
                        self.rng.uniform(bounds[:, 0], bounds[:, 1])
                    optima.append(
                        self._constrained_optimization(obj_func, theta_initial,
                                                       bounds))
            # Select result from run with minimal (negative) log-marginal
            # likelihood
            lml_values = list(map(itemgetter(1), optima))
            self.kernel_.theta = optima[np.argmin(lml_values)][0]
            self.log_marginal_likelihood_value_ = -np.min(lml_values)
        else:
            self.log_marginal_likelihood_value_ = \
                self.log_marginal_likelihood(self.kernel_.theta)
        # Precompute quantities required for predictions which are independent
        # of actual query points
        K = self.kernel_(self.X_train_)
        K[np.diag_indices_from(K)] += self.alpha
        self.L_ = cholesky(K, lower=True)  # Line 2
        self.alpha_ = cho_solve((self.L_, True), self.y_train_)  # Line 3
        return self
    def predict(self, X, return_std=False, return_cov=False):
        """Predict using the Gaussian process regression model
        We can also predict based on an unfitted model by using the GP prior.
        In addition to the mean of the predictive distribution, also its
        standard deviation (return_std=True) or covariance (return_cov=True).
        Note that at most one of the two can be requested.
        Parameters
        ----------
        X : array-like, shape = (n_samples, n_features)
            Query points where the GP is evaluated
        return_std : bool, default: False
            If True, the standard-deviation of the predictive distribution at
            the query points is returned along with the mean.
        return_cov : bool, default: False
            If True, the covariance of the joint predictive distribution at
            the query points is returned along with the mean
        Returns
        -------
        y_mean : array, shape = (n_samples, [n_output_dims])
            Mean of predictive distribution a query points
        y_std : array, shape = (n_samples,), optional
            Standard deviation of predictive distribution at query points.
            Only returned when return_std is True.
        y_cov : array, shape = (n_samples, n_samples), optional
            Covariance of joint predictive distribution a query points.
            Only returned when return_cov is True.
        """
        if return_std and return_cov:
            raise RuntimeError(
                "Not returning standard deviation of predictions when "
                "returning full covariance.")
        X = check_array(X)
        if not hasattr(self, "X_train_"):  # Unfitted;predict based on GP prior
            y_mean = np.zeros(X.shape[0])
            if return_cov:
                y_cov = self.kernel(X)
                return y_mean, y_cov
            elif return_std:
                y_var = self.kernel.diag(X)
                return y_mean, np.sqrt(y_var)
            else:
                return y_mean
        else:  # Predict based on GP posterior
            K_trans = self.kernel_(X, self.X_train_)
            y_mean = K_trans.dot(self.alpha_)  # Line 4 (y_mean = f_star)
            y_mean = self.y_train_mean + y_mean  # undo normal.
            if return_cov:
                v = cho_solve((self.L_, True), K_trans.T)  # Line 5
                y_cov = self.kernel_(X) - K_trans.dot(v)  # Line 6
                return y_mean, y_cov
            elif return_std:
                # compute inverse K_inv of K based on its Cholesky
                # decomposition L and its inverse L_inv
                L_inv = solve_triangular(self.L_.T, np.eye(self.L_.shape[0]))
                K_inv = L_inv.dot(L_inv.T)
                # Compute variance of predictive distribution
                y_var = self.kernel_.diag(X)
                y_var -= np.einsum("ki,kj,ij->k", K_trans, K_trans, K_inv)
                # Check if any of the variances is negative because of
                # numerical issues. If yes: set the variance to 0.
                y_var_negative = y_var < 0
                if np.any(y_var_negative):
                    warnings.warn("Predicted variances smaller than 0. "
                                  "Setting those variances to 0.")
                    y_var[y_var_negative] = 0.0
                return y_mean, np.sqrt(y_var)
            else:
                return y_mean
    def sample_y(self, X, n_samples=1, random_state=0):
        """Draw samples from Gaussian process and evaluate at X.
        Parameters
        ----------
        X : array-like, shape = (n_samples_X, n_features)
            Query points where the GP samples are evaluated
        n_samples : int, default: 1
            The number of samples drawn from the Gaussian process
        random_state: RandomState or an int seed (0 by default)
            A random number generator instance
        Returns
        -------
        y_samples : array, shape = (n_samples_X, [n_output_dims], n_samples)
            Values of n_samples samples drawn from Gaussian process and
            evaluated at query points.
        """
        rng = check_random_state(random_state)
        y_mean, y_cov = self.predict(X, return_cov=True)
        if y_mean.ndim == 1:
            y_samples = rng.multivariate_normal(y_mean, y_cov, n_samples).T
        else:
            y_samples = \
                [rng.multivariate_normal(y_mean[:, i], y_cov,
                                         n_samples).T[:, np.newaxis]
                 for i in range(y_mean.shape[1])]
            y_samples = np.hstack(y_samples)
        return y_samples
    def log_marginal_likelihood(self, theta=None, eval_gradient=False):
        """Returns log-marginal likelihood of theta for training data.
        Parameters
        ----------
        theta : array-like, shape = (n_kernel_params,) or None
            Kernel hyperparameters for which the log-marginal likelihood is
            evaluated. If None, the precomputed log_marginal_likelihood
            of ``self.kernel_.theta`` is returned.
        eval_gradient : bool, default: False
            If True, the gradient of the log-marginal likelihood with respect
            to the kernel hyperparameters at position theta is returned
            additionally. If True, theta must not be None.
        Returns
        -------
        log_likelihood : float
            Log-marginal likelihood of theta for training data.
        log_likelihood_gradient : array, shape = (n_kernel_params,), optional
            Gradient of the log-marginal likelihood with respect to the kernel
            hyperparameters at position theta.
            Only returned when eval_gradient is True.
        """
        if theta is None:
            if eval_gradient:
                raise ValueError(
                    "Gradient can only be evaluated for theta!=None")
            return self.log_marginal_likelihood_value_
        kernel = self.kernel_.clone_with_theta(theta)
        if eval_gradient:
            K, K_gradient = kernel(self.X_train_, eval_gradient=True)
        else:
            K = kernel(self.X_train_)
        K[np.diag_indices_from(K)] += self.alpha
        try:
            L = cholesky(K, lower=True)  # Line 2
        except np.linalg.LinAlgError:
            return (-np.inf, np.zeros_like(theta)) \
                if eval_gradient else -np.inf
        # Support multi-dimensional output of self.y_train_
        y_train = self.y_train_
        if y_train.ndim == 1:
            y_train = y_train[:, np.newaxis]
        alpha = cho_solve((L, True), y_train)  # Line 3
        # Compute log-likelihood (compare line 7)
        log_likelihood_dims = -0.5 * np.einsum("ik,ik->k", y_train, alpha)
        log_likelihood_dims -= np.log(np.diag(L)).sum()
        log_likelihood_dims -= K.shape[0] / 2 * np.log(2 * np.pi)
        log_likelihood = log_likelihood_dims.sum(-1)  # sum over dimensions
        if eval_gradient:  # compare Equation 5.9 from GPML
            tmp = np.einsum("ik,jk->ijk", alpha, alpha)  # k: output-dimension
            tmp -= cho_solve((L, True), np.eye(K.shape[0]))[:, :, np.newaxis]
            # Compute "0.5 * trace(tmp.dot(K_gradient))" without
            # constructing the full matrix tmp.dot(K_gradient) since only
            # its diagonal is required
            log_likelihood_gradient_dims = \
                0.5 * np.einsum("ijl,ijk->kl", tmp, K_gradient)
            log_likelihood_gradient = log_likelihood_gradient_dims.sum(-1)
        if eval_gradient:
            return log_likelihood, log_likelihood_gradient
        else:
            return log_likelihood
    def _constrained_optimization(self, obj_func, initial_theta, bounds):
        if self.optimizer == "fmin_l_bfgs_b":
            theta_opt, func_min, convergence_dict = \
                fmin_l_bfgs_b(obj_func, initial_theta, bounds=bounds)
            if convergence_dict["warnflag"] != 0:
                warnings.warn("fmin_l_bfgs_b terminated abnormally with the "
                              " state: %s" % convergence_dict)
        elif callable(self.optimizer):
            theta_opt, func_min = \
                self.optimizer(obj_func, initial_theta, bounds=bounds)
        else:
            raise ValueError("Unknown optimizer %s." % self.optimizer)
        return theta_opt, func_min
 | 
	bsd-3-clause | 
| 
	lsiemens/lsiemens.github.io | 
	theory/fractional_calculus/code/old/FCC2.py | 
	1 | 
	1663 | 
	"""
Ideas about fractional calculus defined on C^2
J^b f(x, a) = f(x, a + b)
"""
import numpy
from matplotlib import pyplot
from scipy import special
def monomial(x, a, x_0, a_0):
    return (x - x_0)**(a - a_0)/special.gamma(a - a_0 + 1)
def exp(x, a, b):
    return b**(-a)*numpy.exp(b*x)
def projx(f, x, a):
    n = numpy.searchsorted(numpy.real(a), 0.0)
    pyplot.plot(x, f[-n, :])
    pyplot.show()
def proja(f, x, a):
    n = numpy.searchsorted(numpy.real(x), 0.0)
    pyplot.plot(a, f[:, -n])
    pyplot.show()
def plotR(f, vmin=-10, vmax=10):
    _plot_C3(numpy.real(f), vmin=vmin, vmax=vmax)
def plotI(f, vmin=-10, vmax=10):
    _plot_C3(numpy.imag(f), vmin=vmin, vmax=vmax)
def plotM(f, vmax=10):
    _plot_C3(numpy.abs(f), vmax=vmax)
def plotMl(f):
    _plot_C3(numpy.log(numpy.abs(f)))
def _plot_C3(f, vmin=None, vmax=None):
    pyplot.imshow(f, extent = [x_0, x_1, a_0, a_1], vmin=vmin, vmax=vmax)
    pyplot.show()
x_0, x_1, Nx = -5, 5, 1000
a_0, a_1, Na = -5, 5,  1000
X = numpy.linspace(x_0, x_1, Nx, dtype=numpy.complex)
dx = (x_1 - x_0)/(Nx - 1)
da = (a_1 - a_0)/(Na - 1)
A = numpy.linspace(a_0, a_1, Na, dtype=numpy.complex)
domain_x, domain_a = numpy.meshgrid(X, A[::-1])
F = monomial(domain_x, domain_a, 0, -1)
G = monomial(domain_x, domain_a, 1, -1) + monomial(domain_x, domain_a, 1, 0)
G = -monomial(domain_x, domain_a, 1, -1) + 0.5*monomial(domain_x, domain_a, 0, -3)
G = (exp(domain_x, domain_a, 1.0j) + exp(domain_x, domain_a, -1.0j))/2.0
#G = (exp(domain_x, domain_a, 2.0j) - exp(domain_x, domain_a, -2.0j))/2.0
#G = F
Gp = numpy.gradient(G)
#G = Gp[1]
projx(G, X, A)
proja(G, X, A)
plotR(G)
plotI(G)
plotM(G)
plotMl(G)
 | 
	mit | 
| 
	DistributedSystemsGroup/YELP-DS | 
	Blending.py | 
	2 | 
	2128 | 
	#!/usr/bin/env python
# encoding: utf-8
"""
This code implemented review texts classication by using Support Vector Machine, Support Vector Regression, 
Decision Tree and Random Forest, the evaluation function has been implemented as well.
"""
from time import gmtime, strftime
from sklearn import ensemble, svm
import Scikit_Classification as sc
features = []
labels = []
   
def main():
    starttime = strftime("%Y-%m-%d %H:%M:%S",gmtime())
    
    config = {}
    execfile("params.conf", config)
    inputfile = config["histogram_dataset"]    
    trainingSamples = config["trainingSamples"]
    testingSamples = config["testingSamples"]
    numberOfSamples = trainingSamples + testingSamples
    rf_selectedFeatures = "all"
    svm_selectedFeatures = [20, 21, 22, 23, 24]
    
    rf_features, rf_labels = sc.Data_Preparation(inputfile, rf_selectedFeatures)
    svm_features, svm_labels = sc.Data_Preparation(inputfile, svm_selectedFeatures)
    Scikit_RandomForest_Model = ensemble.RandomForestClassifier(n_estimators=510, criterion='gini', max_depth=7,
                                                                 min_samples_split=2, min_samples_leaf=1, max_features='sqrt',
                                                                 bootstrap=True, oob_score=False, n_jobs=-1, random_state=None, verbose=0,
                                                                 min_density=None, compute_importances=None)
    Scikit_SVM_Model = svm.SVC(C=1.0, kernel='rbf', degree=3, gamma=0.0, coef0=0.0, shrinking=True, probability=True, tol=0.001, cache_size=200, class_weight=None, verbose=False, max_iter=-1, random_state=None)
    
    accuracy, testing_Labels, predict_Labels =  sc.Classification_Blending(Scikit_RandomForest_Model, rf_features, rf_labels, Scikit_SVM_Model, svm_features, svm_labels, trainingSamples, testingSamples)
    
    sc.Result_Evaluation('data/evaluation_result/evaluation_Blending.txt', accuracy, testing_Labels, predict_Labels)
    endtime = strftime("%Y-%m-%d %H:%M:%S",gmtime())
    print(starttime)
    print(endtime)
if __name__  == "__main__":
    main()
 | 
	apache-2.0 | 
| 
	mayavanand/RMMAFinalProject | 
	azimuth/model_comparison.py | 
	1 | 
	31399 | 
	import predict as pd
import copy
import os
import numpy as np
import util
import shutil
import pickle
import pylab as plt
import pandas
import local_multiprocessing
import load_data
import features.featurization as feat
def check_feature_set_dims(feature_sets):
    F2 = None
    for set in feature_sets.keys():
        F = feature_sets[set].shape[0]
        if F2 is None: F = F2
        assert F == F2, "not same # individuals for feature %s" % set
    assert feature_sets !={}, "features are empty, check learn_options"
def set_target(learn_options, classification):
    assert 'target_name' not in learn_options.keys() or learn_options['target_name'] is not None, "changed it to be automatically set here"
    if not classification:
        learn_options["target_name"] = learn_options['rank-transformed target name']
        learn_options["training_metric"] = 'spearmanr'
        learn_options['ground_truth_label'] = learn_options['target_name']
    else:
        learn_options["target_name"] = learn_options['binary target name']
        learn_options["training_metric"] = 'AUC'
        learn_options['ground_truth_label'] = learn_options['binary target name']
    if learn_options["V"]==3:
        assert learn_options['target_name']=='score_drug_gene_rank' or learn_options['target_name']=='score_drug_gene_threshold', "cannot use raw scores when mergind data"
        assert learn_options["ground_truth_label"]=='score_drug_gene_rank' or learn_options["ground_truth_label"]=='score_drug_gene_threshold', "cannot use raw scores when mergind data"
    return learn_options
def GP_setup(learn_options, likelihood='gaussian', degree=3, set_target_fn=set_target):
    learn_options["method"] = "GPy"
    learn_options['kernel degree'] = degree
    if likelihood == 'warped':
        learn_options['warpedGP'] = True
    else:
        learn_options['warpedGP'] = False
    learn_options = set_target_fn(learn_options, classification=False)
    return learn_options
def SVC_setup(learn_options, likelihood='gaussian', degree=3,  set_target_fn=set_target):
    learn_options["method"] = "SVC"
    learn_options = set_target_fn(learn_options, classification=True)
    return learn_options
def L1_setup(learn_options, set_target_fn=set_target):
    learn_options = set_target_fn(learn_options, classification=False)
    learn_options["method"] = "linreg"
    learn_options["penalty"] = "L1"
    learn_options["feature_select"] = False
    if "alpha" not in learn_options.keys():
        learn_options["alpha"] = np.array([1e-6*pow(1.3,x) for x in range(0,100)])
    learn_options["loss"] = "squared"
    return learn_options
def L2_setup(learn_options, set_target_fn=set_target):
    learn_options = set_target_fn(learn_options, classification=False)
    learn_options["method"] = "linreg"
    learn_options["penalty"] = "L2"
    learn_options["feature_select"] = False
    if "alpha" not in learn_options.keys():
        learn_options["alpha"] = np.array([1e-6*pow(1.3,x) for x in range(0,100)])
    learn_options["loss"] = "squared"
    return learn_options
def mean_setup(learn_options, set_target_fn=set_target):
    learn_options = set_target_fn(learn_options, classification=False)
    learn_options['method'] = 'mean'
    return learn_options
def random_setup(learn_options, set_target_fn=set_target):
    learn_options = set_target_fn(learn_options, classification=False)
    learn_options['method'] = 'random'
    return learn_options
def elasticnet_setup(learn_options, set_target_fn=set_target):
    learn_options = set_target_fn(learn_options, classification=False)
    learn_options["method"] = "linreg"
    learn_options["penalty"] = "EN"
    learn_options["feature_select"] = False
    learn_options["loss"] = "squared"
    if "alpha" not in learn_options.keys():
        learn_options["alpha"] = np.array([1e-5*pow(2,x) for x in range(0,30)])
    return learn_options
def DNN_setup(learn_options, set_target_fn=set_target):
    learn_options = set_target_fn(learn_options, classification=False)
    learn_options['method'] = 'DNN'
    learn_options['DNN target variable'] = 'score'#'score_drug_gene_quantized'
    # learn_options['DNN architecture'] = (119, 10, 10, 10, 2)
    return learn_options
def RF_setup(learn_options, set_target_fn=set_target):
    learn_options = set_target_fn(learn_options, classification=False)
    learn_options['method'] = 'RandomForestRegressor'
    return learn_options
def doench_setup(learn_options, set_target_fn=set_target):
    learn_options = set_target_fn(learn_options, classification=True)
    learn_options['method'] = 'doench'
    return learn_options
def sgrna_from_doench_setup(learn_options, set_target_fn=set_target):
    learn_options = set_target_fn(learn_options, classification=False)
    learn_options['method'] = 'sgrna_from_doench'
    return learn_options
def linreg_setup(learn_options, set_target_fn=set_target):
    learn_options["method"] = "linreg"
    learn_options["penalty"] = "L1"
    learn_options["feature_select"] = False
    if "alpha" not in learn_options.keys():
        learn_options["alpha"] = np.array([0.0])
    learn_options["loss"] = "squared"
    learn_options = set_target_fn(learn_options, classification=False)
    return learn_options
def logregL1_setup(learn_options, set_target_fn=set_target):
    learn_options = set_target_fn(learn_options, classification=True)
    learn_options["method"] = "logregL1"
    learn_options["penalty"] = "L1"
    learn_options["feature_select"] = False
    if "alpha" not in learn_options.keys():
        learn_options["alpha"] = np.array([1e-6*pow(1.3,x) for x in range(0,100)])
    return learn_options
def LASSOs_ensemble_setup(learn_options, set_target_fn=set_target):
    learn_options = set_target_fn(learn_options, classification=False)
    learn_options["method"] = "lasso_ensemble"
    learn_options["penalty"] = "L1"
    learn_options["feature_select"] = False
    if "alpha" not in learn_options.keys():
        learn_options["alpha"] = np.array([1e-6*pow(1.3,x) for x in range(0,100)])
    learn_options["loss"] = "squared"
    return learn_options
def xu_et_al_setup(learn_options, set_target_fn=set_target):
    learn_options = set_target_fn(learn_options, classification=True)
    learn_options["method"] = "xu_et_al"
    return learn_options
def adaboost_setup(learn_options, num_estimators=100, max_depth=3, learning_rate=0.1, set_target_fn=set_target, model="AdaBoost"):
    """
    """
    learn_options = set_target_fn(learn_options, classification=False)
    if model=="AdaBoost":
        learn_options['method'] = "AdaBoostRegressor"
    elif model=="AdaBoostClassifier":
        learn_options['method'] = "AdaBoostClassifier"
    else:
        raise Exception("model must be either AdaBoost or AdaBoost Classifier")
    learn_options['adaboost_version'] = 'python' # "R" or "python"
    if 'adaboost_loss' not in learn_options.keys() and model=="AdaBoostRegressor":
        learn_options['adaboost_loss'] = 'ls' # alternatives: "lad", "huber", "quantile", see scikit docs for details
    if 'adaboost_alpha' not in learn_options.keys():
        learn_options['adaboost_alpha'] = 0.5 # this parameter is only used by the huber and quantile loss functions.
    if not learn_options['adaboost_CV']:
        learn_options['adaboost_learning_rate'] = learning_rate
        learn_options['adaboost_n_estimators'] = num_estimators
        learn_options['adaboost_max_depth'] = max_depth
    else:
        learn_options['adaboost_n_estimators'] = num_estimators
    return learn_options
def shared_setup(learn_options, order, test):
    if 'num_proc' not in learn_options.keys():
        learn_options['num_proc'] = None
    if 'num_thread_per_proc' not in learn_options.keys():
        learn_options['num_thread_per_proc'] = None
    num_proc = local_multiprocessing.configure(TEST=test, num_proc=learn_options["num_proc"],
                                                num_thread_per_proc=learn_options["num_thread_per_proc"])
    learn_options["num_proc"] = num_proc
    learn_options["order"] = order  # gets used many places in code, not just here
    if "cv" not in learn_options.keys():
        # if no CV preference is specified, use leave-one-gene-out
        learn_options["cv"] = "gene"
    if "normalize_features" not in learn_options.keys():
        # if no CV preference is specified, use leave-one-gene-out
        learn_options["normalize_features"] = True
    if "weighted" not in learn_options.keys():
        learn_options['weighted'] = None
    if "all pairs" not in learn_options.keys():
        learn_options["all pairs"] = False
    if "include_known_pairs" not in learn_options.keys():
        learn_options["include_known_pairs"] = False
    if "include_gene_guide_feature" not in learn_options.keys():
        learn_options["include_gene_guide_feature"] = 0 #used as window size, so 0 is none
    #these should default to true to match experiments before they were options:
    if "gc_features" not in learn_options.keys():
        learn_options["gc_features"] = True
    if "nuc_features" not in learn_options.keys():
        learn_options["nuc_features"] = True
    if 'train_genes' not in learn_options.keys():
        learn_options["train_genes"] = None
    if 'test_genes' not in learn_options.keys():
        learn_options["test_genes"] = None
    if "num_proc" not in learn_options:
        learn_options["num_proc"] = None
    if "num_thread_per_proc" not in learn_options:
        learn_options["num_thread_per_proc"] = None
    if 'seed' not in learn_options:
        learn_options['seed'] = 1
    if "flipV1target" not in learn_options:
        learn_options["flipV1target"] = False
    if 'num_genes_remove_train' not in learn_options:
        learn_options['num_genes_remove_train'] = None
    if "include_microhomology" not in learn_options:
        learn_options["include_microhomology"] = False
    if "algorithm_hyperparam_search" not in learn_options:
        learn_options["algorithm_hyperparam_search"] = "grid" # other options is bo for bayesian optimization
    return num_proc
def setup(test=False, order=1, learn_options=None, data_file=None, pam_audit=True, length_audit=True):
    num_proc = shared_setup(learn_options, order, test)
    assert "testing_non_binary_target_name" in learn_options.keys(), "need this in order to get metrics, though used to be not needed, so you may newly see this error"
    if learn_options["testing_non_binary_target_name"] not in ['ranks', 'raw', 'thrs']:
        raise Exception('learn_otions["testing_non_binary_target_name"] must be in ["ranks", "raw", "thrs"]')
    Xdf, Y, gene_position, target_genes = load_data.from_file(data_file, learn_options)
    learn_options['all_genes'] = target_genes
    if test:
        learn_options["order"] = 1
    if 'convert_30mer_to_31mer' in learn_options and learn_options['convert_30mer_to_31mer'] is True:
        print "WARNING!!! converting 30 mer to 31 mer (and then cutting off first nucleotide to go back to 30mer with a right shift)"
        for i in range(Xdf.shape[0]):
            Xdf['30mer'].iloc[i] = util.convert_to_thirty_one(Xdf.iloc[i]["30mer"], Xdf.index.values[i][1], Xdf.iloc[i]["Strand"])
        # to_keep = Xdf['30mer'].isnull() == False
        # Xdf = Xdf[to_keep]
        # gene_position = gene_position[to_keep]
        # Y = Y[to_keep]
        Xdf["30mer"] = Xdf["30mer"].apply(lambda x: x[1:]) # chop the first nucleotide
    if learn_options.has_key('left_right_guide_ind') and learn_options['left_right_guide_ind'] is not None:
        seq_start, seq_end, expected_length = learn_options['left_right_guide_ind']
        Xdf['30mer'] = Xdf['30mer'].apply(lambda seq: seq[seq_start:seq_end])
    feature_sets = feat.featurize_data(Xdf, learn_options, Y, gene_position, pam_audit=pam_audit, length_audit=length_audit)
    np.random.seed(learn_options['seed'])
    return Y, feature_sets, target_genes, learn_options, num_proc
def run_models(models, orders, GP_likelihoods=['gaussian', 'warped'], WD_kernel_degrees=[3],
               adaboost_learning_rates=[0.1], adaboost_num_estimators=[100], adaboost_max_depths=[3],
               learn_options_set=None, test=False, CV=True, setup_function=setup, set_target_fn=set_target, pam_audit=True, length_audit=True):
    '''
    CV is set to false if want to train a final model and not cross-validate, but it goes in to what
    looks like cv code
    '''
    results = {}
    assert learn_options_set is not None, "need to specify learn_options_set"
    all_learn_options = {}
    #shorten so easier to display on graphs
    feat_models_short = {'L1':"L1", 'L2':"L2", 'elasticnet':"EN", 'linreg':"LR",
                         'RandomForest': "RF",
                         'AdaBoost':"AB", 'AdaBoostClassifier':"ABClass", 'doench': 'doench',
                         "logregL1": "logregL1", "sgrna_from_doench":"sgrna_from_doench", 'SVC': 'SVC', 'xu_et_al': 'xu_et_al'}
    if not CV:
        print "Received option CV=False, so I'm training using all of the data"
        assert len(learn_options_set.keys()) == 1, "when CV is False, only 1 set of learn options is allowed"
        assert len(models) == 1, "when CV is False, only 1 model is allowed"
    for learn_options_str in learn_options_set.keys():
        # these options get augmented in setup
        partial_learn_opt = learn_options_set[learn_options_str]
        # if the model requires encoded features
        for model in models:
            # models requiring explicit featurization
            if model in feat_models_short.keys():
                for order in orders:
                    print "running %s, order %d for %s" % (model, order, learn_options_str)
                    Y, feature_sets, target_genes, learn_options, num_proc = setup_function(test=test, order=order, learn_options=partial_learn_opt, pam_audit=pam_audit, length_audit=length_audit) # TODO precompute features for all orders, as this is repated for each model
                    
                    if model == 'L1':
                        learn_options_model = L1_setup(copy.deepcopy(learn_options), set_target_fn=set_target_fn)
                    elif model == 'L2':
                        learn_options_model = L2_setup(copy.deepcopy(learn_options), set_target_fn=set_target_fn)
                    elif model == 'elasticnet':
                        learn_options_model = elasticnet_setup(copy.deepcopy(learn_options), set_target_fn=set_target_fn)
                    elif model == 'linreg':
                        learn_options_model = linreg_setup(copy.deepcopy(learn_options), set_target_fn=set_target_fn)
                    elif model == "logregL1":
                        learn_options_model = logregL1_setup(copy.deepcopy(learn_options), set_target_fn=set_target_fn)
                    elif model == 'RandomForest':
                        learn_options_model = RF_setup(copy.deepcopy(learn_options), set_target_fn=set_target_fn)
                    elif model == 'SVC':
                        learn_options_model = SVC_setup(copy.deepcopy(learn_options), set_target_fn=set_target_fn)
                    elif model == 'doench':
                        learn_options_model = doench_setup(copy.deepcopy(learn_options), set_target_fn=set_target_fn)
                    elif model == 'sgrna_from_doench':
                        learn_options_model = sgrna_from_doench_setup(copy.deepcopy(learn_options), set_target_fn=set_target_fn)
                    elif model == 'xu_et_al':
                        learn_options_model = xu_et_al_setup(copy.deepcopy(learn_options), set_target_fn=set_target_fn)
                    elif model == 'AdaBoost' or 'AdaBoostClassifier':
                        for learning_rate in adaboost_learning_rates:
                            for num_estimators in adaboost_num_estimators:
                                for max_depth in adaboost_max_depths:
                                    learn_options_model = adaboost_setup(copy.deepcopy(learn_options), learning_rate=learning_rate, num_estimators=num_estimators, max_depth=max_depth, set_target_fn=set_target_fn, model=model)
                        model_string = feat_models_short[model] + '_or%d_md%d_lr%.2f_n%d_%s' % (learn_options_set[learn_options_str]["order"], max_depth, learning_rate, num_estimators, learn_options_str)
                    if model != 'AdaBoost':
                        model_string = feat_models_short[model] + '_ord%d_%s' % (learn_options_set[learn_options_str]["order"], learn_options_str)
                    results[model_string] = pd.cross_validate(Y, feature_sets, learn_options=learn_options_model, TEST=test, CV=CV)
                    all_learn_options[model_string] = learn_options_model
            # if the model doesn't require explicit featurization
            else:
                assert setup_fn==setup, "not yet modified to handle this"
                print "running %s for %s" % (model, learn_options_str)
                Y, feature_sets, target_genes, learn_options, num_proc = setup(test=test, order=1, learn_options=partial_learn_opt, pam_audit=pam_audit, length_audit=length_audit)
                if model == 'mean':
                    learn_options_model = mean_setup(copy.deepcopy(learn_options))
                elif model == 'random':
                    learn_options_model = random_setup(copy.deepcopy(learn_options))
                elif model == 'DNN':
                    learn_options_model = DNN_setup(copy.deepcopy(learn_options))
                elif model == 'GP':
                    for likelihood in GP_likelihoods:
                        for degree in WD_kernel_degrees:
                            learn_options_model = GP_setup(copy.deepcopy(learn_options), likelihood=likelihood, degree=degree)
                            model_string = '%s_%s_degree%d_%s' % (model, likelihood, degree, learn_options_str)
                            results[model_string] = pd.cross_validate(Y, feature_sets, learn_options=learn_options_model,TEST=test, CV=CV)
                else:
                    raise NotImplementedError("model %s not supported" % model)
                # "GP" already calls pd.cross_validate() and has its own model_string, so skip this.
                if model != "GP":
                    model_string = model + '_%s' % learn_options_str
                    results[model_string] = pd.cross_validate(Y, feature_sets, learn_options=learn_options_model, TEST=test, CV=CV)
            all_learn_options[model_string] = learn_options_model
    return results, all_learn_options
def pickle_runner_results(exp_name, results, all_learn_options, relpath="/../" + "results"):
    abspath = os.path.abspath(__file__)
    dname = os.path.dirname(abspath) + relpath
    if not os.path.exists(dname):
        os.makedirs(dname)
        print "Created directory: %s" % str(dname)
    if exp_name is None:
        exp_name = results.keys()[0]
    myfile = dname+'/'+ exp_name + '.pickle'
    with open(myfile, 'wb') as f:
        print "writing results to %s" % myfile
        pickle.dump((results, all_learn_options), f, -1)
def runner(models, learn_options, GP_likelihoods=None, orders=None, WD_kernel_degrees=None, where='local', cluster_user='fusi', cluster='RR1-N13-09-H44', test=False, exp_name = None, **kwargs):
    if where == 'local':
        results, all_learn_options = run_models(models, orders=orders, GP_likelihoods=GP_likelihoods, learn_options_set=learn_options, WD_kernel_degrees=WD_kernel_degrees, test=test, **kwargs)
        all_metrics, gene_names = util.get_all_metrics(results, learn_options)
        util.plot_all_metrics(all_metrics, gene_names, all_learn_options, save=True)
        # for non-local (i.e. cluster), the comparable code is in cli_run_model.py
        pickle_runner_results(exp_name, results, all_learn_options)
        return results, all_learn_options, all_metrics, gene_names
    elif where == 'cluster':
        import cluster_job
        # create random cluster directory, dump learn options, and create cluster file
        tempdir, user, clust_filename = cluster_job.create(cluster_user, models, orders, WD_kernel_degrees, GP_likelihoods, exp_name=exp_name, learn_options=learn_options, **kwargs)
        # raw_input("Submit job to HPC and press any key when it's finished: ")
        # util.plot_cluster_results(directory=tempdir)
        #stdout = tempdir + r"/stdout"
        #stderr = tempdir + r"/stderr"
        #if not os.path.exists(stdout): os.makedirs(stdout)
        #if not os.path.exists(stderr): os.makedirs(stderr)
        return tempdir, clust_filename, user#, stdout, stderr
def save_final_model_V3(filename=None, include_position=True, learn_options=None, short_name='final', pam_audit=True, length_audit=True):
    '''
    run_models(produce_final_model=True) is what saves the model
    '''
    test = False
    assert filename is not None, "need to provide filename to save final model"
    if learn_options is None:
        if include_position:
            learn_options = {"V": 3,
                        'train_genes': load_data.get_V3_genes(),
                        'test_genes': load_data.get_V3_genes(),
                        "testing_non_binary_target_name": 'ranks',
                        'include_pi_nuc_feat': True,
                        "gc_features": True,
                        "pam_features": True,
                        "repeat_features": None,
                        "nuc_features": True,
                        "include_gene_position": True,
                        "include_NGGX_interaction": True,
                        "include_NGGXX_interaction": None,
                        "include_Tm": True,
                        "include_strand": False,
                        "include_gene_feature": False,
                        "include_gene_guide_feature": 0,
                        "extra pairs": False,
                        "weighted": None,
                        "training_metric": 'spearmanr',
                        "NDGC_k": 10,
                        "cv": "gene",
                        "include_gene_effect": False,
                        "include_drug": False,
                        "include_sgRNAscore": False,
                        'adaboost_loss' : 'ls', # main "ls", alternatives: "lad", "huber", "quantile", see scikit docs for details
                        'adaboost_alpha': 0.5, # this parameter is only used by the huber and quantile loss functions.
                        'normalize_features': False,
                        'adaboost_CV' : False
                        }
        else:
            learn_options = {"V": 3,
                'train_genes': load_data.get_V3_genes(),
                'test_genes': load_data.get_V3_genes(),
                "testing_non_binary_target_name": 'ranks',
                'include_pi_nuc_feat': True,
                "gc_features": True,
                "pam_features": True,
                "repeat_features": None,
                "nuc_features": True,
                "include_gene_position": False,
                "include_NGGX_interaction": True,
                "include_NGGXX_interaction": None,
                "include_Tm": True,
                "include_strand": False,
                "include_gene_feature": False,
                "include_gene_guide_feature": 0,
                "extra pairs": False,
                "weighted": None,
                "training_metric": 'spearmanr',
                "NDGC_k": 10,
                "cv": "gene",
                "include_gene_effect": False,
                "include_drug": False,
                "include_sgRNAscore": False,
                'adaboost_loss' : 'ls', # main "ls", alternatives: "lad", "huber", "quantile", see scikit docs for details
                'adaboost_alpha': 0.5, # this parameter is only used by the huber and quantile loss functions.
                'normalize_features': False,
                 'adaboost_CV' : False
                }
    learn_options_set = {short_name: learn_options}
    results, all_learn_options = run_models(["AdaBoost"], orders=[2], adaboost_learning_rates=[0.1],
                                            adaboost_max_depths=[3], adaboost_num_estimators=[100],
                                            learn_options_set=learn_options_set,
                                            test=test, CV=False, pam_audit=length_audit, length_audit=length_audit)
    model = results.values()[0][3][0]
    with open(filename, 'wb') as f:
        pickle.dump((model, learn_options), f, -1)
    return model
def predict(seq, aa_cut=-1, percent_peptide=-1, model=None, model_file=None, pam_audit=True, length_audit=False, learn_options_override=None):
    """
    if pam_audit==False, then it will not check for GG in the expected position
    this is useful if predicting on PAM mismatches, such as with off-target
    """
    print "predict function running"
    # assert not (model is None and model_file is None), "you have to specify either a model or a model_file"
    assert isinstance(seq, (np.ndarray)), "Please ensure seq is a numpy array"
    assert len(seq[0]) > 0, "Make sure that seq is not empty"
    assert isinstance(seq[0], str), "Please ensure input sequences are in string format, i.e. 'AGAG' rather than ['A' 'G' 'A' 'G'] or alternate representations"
    if aa_cut is not None:
        assert len(aa_cut) > 0, "Make sure that aa_cut is not empty"
        assert isinstance(aa_cut, (np.ndarray)), "Please ensure aa_cut is a numpy array"
        assert np.all(np.isreal(aa_cut)), "amino-acid cut position needs to be a real number"
    if percent_peptide is not None:
        assert len(percent_peptide) > 0, "Make sure that percent_peptide is not empty"
        assert isinstance(percent_peptide, (np.ndarray)), "Please ensure percent_peptide is a numpy array"
        assert np.all(np.isreal(percent_peptide)), "percent_peptide needs to be a real number"
    if model_file is None:
        azimuth_saved_model_dir = os.path.join(os.path.dirname(__file__), 'saved_models')
        if np.any(percent_peptide == -1) or (percent_peptide is None and aa_cut is None):
            print("No model file specified, using V3_model_nopos")
            model_name = 'V3_model_nopos.pickle'
        else:
            print("No model file specified, using V3_model_full")
            model_name = 'V3_model_full.pickle'
        model_file = os.path.join(azimuth_saved_model_dir, model_name)
    if model is None:
        with open(model_file, 'rb') as f:
            model, learn_options = pickle.load(f)
        print model_file
        print learn_options
    else:
        model, learn_options = model
        
    learn_options["V"] = 2
    learn_options = override_learn_options(learn_options_override, learn_options)
    # Y, feature_sets, target_genes, learn_options, num_proc = setup(test=False, order=2, learn_options=learn_options, data_file=test_filename)
    # inputs, dim, dimsum, feature_names = pd.concatenate_feature_sets(feature_sets)
    Xdf = pandas.DataFrame(columns=[u'30mer', u'Strand'], data=zip(seq, ['NA' for x in range(len(seq))]))
    if np.all(percent_peptide != -1) and (percent_peptide is not None and aa_cut is not None):
        gene_position = pandas.DataFrame(columns=[u'Percent Peptide', u'Amino Acid Cut position'], data=zip(percent_peptide, aa_cut))
    else:
        gene_position = pandas.DataFrame(columns=[u'Percent Peptide', u'Amino Acid Cut position'], data=zip(np.ones(seq.shape[0])*-1, np.ones(seq.shape[0])*-1))
    feature_sets = feat.featurize_data(Xdf, learn_options, pandas.DataFrame(), gene_position, pam_audit=pam_audit, length_audit=length_audit)
    inputs, dim, dimsum, feature_names = util.concatenate_feature_sets(feature_sets)
    # call to scikit-learn, returns a vector of predicted values
    preds = model.predict(inputs)
    # also check that predictions are not 0/1 from a classifier.predict() (instead of predict_proba() or decision_function())
    unique_preds = np.unique(preds)
    ok = False
    for pr in preds:
        if pr not in [0,1]:
            ok = True
    assert ok, "model returned only 0s and 1s"
    return preds
def override_learn_options(learn_options_override, learn_options):
    """
    override all keys seen in learn_options_override to alter learn_options
    """
    if learn_options_override is not None:
        for k in learn_options_override.keys():
            learn_options[k] = learn_options_override[k]
    return learn_options
def fill_learn_options(learn_options_fill, learn_options):
    """
    only fill in keys that are missing form learn_options from learn_options_fill
    """
    if learn_options_fill is not None:
        for k in learn_options_fill.keys():
            if not learn_options.has_key(k):
                learn_options[k] = learn_options_fill[k]
    return learn_options
def write_results(predictions, file_to_predict):
    newfile = file_to_predict.replace(".csv", ".pred.csv")
    data = pandas.read_csv(file_to_predict)
    data['predictions'] = predictions
    data.to_csv(newfile)
    print "wrote results to %s" % newfile
    return data, newfile
if __name__ == '__main__':
    #save_final_model_V3(filename='azimuth/azure_models/V3_model_full.pickle', include_position=True)
    save_final_model_V3(filename='saved_models/model_8_nopos.pickle', include_position=False)
    save_final_model_V3(filename='saved_models/model_8.pickle', include_position=True)
    # predict('GGGCCGCTGTTGCAGGTGGCGGGTAGGATC', 'sense', 1200, 30.3, model_file='../saved_models/final_model_nicolo.pickle')
    learn_options = {"V": 3,
                "train_genes": load_data.get_V3_genes(),
                "test_genes": load_data.get_V3_genes(),
                "target_name": 'score_drug_gene_rank',
                "testing_non_binary_target_name": 'ranks',
                'include_pi_nuc_feat': True,
                "gc_features": True,
                "pam_features": True,
                "repeat_features": True,
                "nuc_features": True,
                "include_gene_position": True,
                "include_NGGX_interaction": None,
                "include_NGGXX_interaction": True,
                "include_Tm": True,
                "include_strand": False,
                "include_gene_feature": False,
                "include_gene_guide_feature": 0,
                "extra pairs": False,
                "weighted": None,
                "training_metric": 'spearmanr',
                "NDGC_k": 10,
                "cv": "gene",
                "adaboost_loss" : 'ls',
                "include_gene_effect": False,
                "include_drug": False,
                "include_sgRNAscore": False,
                'adaboost_loss' : 'ls', # main "ls", alternatives: "lad", "huber", "quantile", see scikit docs for details
                'adaboost_alpha': 0.5, # this parameter is only used by the huber and quantile loss functions.
                'adaboost_CV' : False
                }
    learn_options_set = {"post bug fix":learn_options}
    #runner(['AdaBoost'], learn_options_set, orders=[2], where='local', adaboost_learning_rates=[0.1],  adaboost_max_depths=[3], adaboost_num_estimators=[100], exp_name='post-index-fix')
# #util.feature_importances(results)
 | 
	bsd-3-clause | 
| 
	phoebe-project/phoebe2-docs | 
	2.1/tutorials/saving_and_loading.py | 
	1 | 
	2914 | 
	#!/usr/bin/env python
# coding: utf-8
# Saving and Loading
# ============================
# 
# Setup
# -----------------------------
# Let's first make sure we have the latest version of PHOEBE 2.1 installed. (You can comment out this line if you don't use pip for your installation or don't want to update to the latest release).
# In[ ]:
get_ipython().system('pip install -I "phoebe>=2.1,<2.2"')
# As always, let's do imports and initialize a logger and a new bundle.  See [Building a System](building_a_system.ipynb) for more details.
# In[1]:
import phoebe
from phoebe import u # units
import numpy as np
import matplotlib.pyplot as plt
logger = phoebe.logger(clevel='INFO')
b = phoebe.default_binary()
# Saving a Bundle
# -----------------------
# 
# 
# In[2]:
b['incl@orbit'] = 56.789
# To save the Bundle to a file, we can call the [save](../api/phoebe.parameters.ParameterSet.save.md) method of the Bundle and pass a filename.
# In[3]:
print b.save('test.phoebe')
# We can now inspect the contents of the created file.
# 
# This file is in the JSON-format and is simply a list of dictionaries - where each dictionary represents the attributes of a single Parameter.
# 
# You could edit this file in a text-editor - but do be careful if changing any of the tags.  For example: if you want to change the component tag of one of your stars, make sure to change ALL instances of the component tag to match (as well as the hierarchy Parameter).
# In[4]:
get_ipython().system('head -n 30 test.phoebe')
# Loading a Bundle
# ----------------------
# To open an existing Bundle from the file we just created, call [Bundle.open](../api/phoebe.frontend.bundle.Bundle.open.md) and pass the filename.
# In[5]:
b2 = phoebe.Bundle.open('test.phoebe')
# Just to prove this worked, we can check to make sure we retained the changed value of inclination.
# In[6]:
print b2.get_value('incl@orbit')
# Support for Other Codes
# ------------------------------
# 
# ### Legacy
# 
# Importing from a PHOEBE Legacy file is as simple as passing the filename to [from_legacy](../api/phoebe.frontend.bundle.Bundle.from_legacy.md):
# In[7]:
b = phoebe.Bundle.from_legacy('legacy.phoebe')
# Exporting to a PHOEBE Legacy file is also possible (although note that some parameters don't translate exactly or are not supported in PHOEBE Legacy), via [b.export_legacy](../api/phoebe.frontend.bundle.Bundle.export_legacy.md).
# In[8]:
b.export_legacy('legacy_export.phoebe')
# For the parameters that could not be directly translated, you should see a warning message (if you have warning messages enabled in your logger).
# 
# We can now look at the beginning of the saved file and see that it matches the PHOEBE Legacy file-format.
# In[9]:
get_ipython().system('head -n 30 legacy_export.phoebe')
# Next
# ---------
# 
# Next up: let's learn all about [constraints](constraints.ipynb)
 | 
	gpl-3.0 | 
| 
	idlead/scikit-learn | 
	examples/linear_model/plot_lasso_lars.py | 
	363 | 
	1080 | 
	#!/usr/bin/env python
"""
=====================
Lasso path using LARS
=====================
Computes Lasso Path along the regularization parameter using the LARS
algorithm on the diabetes dataset. Each color represents a different
feature of the coefficient vector, and this is displayed as a function
of the regularization parameter.
"""
print(__doc__)
# Author: Fabian Pedregosa <[email protected]>
#         Alexandre Gramfort <[email protected]>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model
from sklearn import datasets
diabetes = datasets.load_diabetes()
X = diabetes.data
y = diabetes.target
print("Computing regularization path using the LARS ...")
alphas, _, coefs = linear_model.lars_path(X, y, method='lasso', verbose=True)
xx = np.sum(np.abs(coefs.T), axis=1)
xx /= xx[-1]
plt.plot(xx, coefs.T)
ymin, ymax = plt.ylim()
plt.vlines(xx, ymin, ymax, linestyle='dashed')
plt.xlabel('|coef| / max|coef|')
plt.ylabel('Coefficients')
plt.title('LASSO Path')
plt.axis('tight')
plt.show()
 | 
	bsd-3-clause | 
| 
	sarahgrogan/scikit-learn | 
	sklearn/utils/multiclass.py | 
	83 | 
	12343 | 
	
# Author: Arnaud Joly, Joel Nothman, Hamzeh Alsalhi
#
# License: BSD 3 clause
"""
Multi-class / multi-label utility function
==========================================
"""
from __future__ import division
from collections import Sequence
from itertools import chain
from scipy.sparse import issparse
from scipy.sparse.base import spmatrix
from scipy.sparse import dok_matrix
from scipy.sparse import lil_matrix
import numpy as np
from ..externals.six import string_types
from .validation import check_array
from ..utils.fixes import bincount
def _unique_multiclass(y):
    if hasattr(y, '__array__'):
        return np.unique(np.asarray(y))
    else:
        return set(y)
def _unique_indicator(y):
    return np.arange(check_array(y, ['csr', 'csc', 'coo']).shape[1])
_FN_UNIQUE_LABELS = {
    'binary': _unique_multiclass,
    'multiclass': _unique_multiclass,
    'multilabel-indicator': _unique_indicator,
}
def unique_labels(*ys):
    """Extract an ordered array of unique labels
    We don't allow:
        - mix of multilabel and multiclass (single label) targets
        - mix of label indicator matrix and anything else,
          because there are no explicit labels)
        - mix of label indicator matrices of different sizes
        - mix of string and integer labels
    At the moment, we also don't allow "multiclass-multioutput" input type.
    Parameters
    ----------
    *ys : array-likes,
    Returns
    -------
    out : numpy array of shape [n_unique_labels]
        An ordered array of unique labels.
    Examples
    --------
    >>> from sklearn.utils.multiclass import unique_labels
    >>> unique_labels([3, 5, 5, 5, 7, 7])
    array([3, 5, 7])
    >>> unique_labels([1, 2, 3, 4], [2, 2, 3, 4])
    array([1, 2, 3, 4])
    >>> unique_labels([1, 2, 10], [5, 11])
    array([ 1,  2,  5, 10, 11])
    """
    if not ys:
        raise ValueError('No argument has been passed.')
    # Check that we don't mix label format
    ys_types = set(type_of_target(x) for x in ys)
    if ys_types == set(["binary", "multiclass"]):
        ys_types = set(["multiclass"])
    if len(ys_types) > 1:
        raise ValueError("Mix type of y not allowed, got types %s" % ys_types)
    label_type = ys_types.pop()
    # Check consistency for the indicator format
    if (label_type == "multilabel-indicator" and
            len(set(check_array(y, ['csr', 'csc', 'coo']).shape[1]
                    for y in ys)) > 1):
        raise ValueError("Multi-label binary indicator input with "
                         "different numbers of labels")
    # Get the unique set of labels
    _unique_labels = _FN_UNIQUE_LABELS.get(label_type, None)
    if not _unique_labels:
        raise ValueError("Unknown label type: %s" % repr(ys))
    ys_labels = set(chain.from_iterable(_unique_labels(y) for y in ys))
    # Check that we don't mix string type with number type
    if (len(set(isinstance(label, string_types) for label in ys_labels)) > 1):
        raise ValueError("Mix of label input types (string and number)")
    return np.array(sorted(ys_labels))
def _is_integral_float(y):
    return y.dtype.kind == 'f' and np.all(y.astype(int) == y)
def is_multilabel(y):
    """ Check if ``y`` is in a multilabel format.
    Parameters
    ----------
    y : numpy array of shape [n_samples]
        Target values.
    Returns
    -------
    out : bool,
        Return ``True``, if ``y`` is in a multilabel format, else ```False``.
    Examples
    --------
    >>> import numpy as np
    >>> from sklearn.utils.multiclass import is_multilabel
    >>> is_multilabel([0, 1, 0, 1])
    False
    >>> is_multilabel([[1], [0, 2], []])
    False
    >>> is_multilabel(np.array([[1, 0], [0, 0]]))
    True
    >>> is_multilabel(np.array([[1], [0], [0]]))
    False
    >>> is_multilabel(np.array([[1, 0, 0]]))
    True
    """
    if hasattr(y, '__array__'):
        y = np.asarray(y)
    if not (hasattr(y, "shape") and y.ndim == 2 and y.shape[1] > 1):
        return False
    if issparse(y):
        if isinstance(y, (dok_matrix, lil_matrix)):
            y = y.tocsr()
        return (len(y.data) == 0 or np.ptp(y.data) == 0 and
                (y.dtype.kind in 'biu' or  # bool, int, uint
                 _is_integral_float(np.unique(y.data))))
    else:
        labels = np.unique(y)
        return len(labels) < 3 and (y.dtype.kind in 'biu' or  # bool, int, uint
                                    _is_integral_float(labels))
def type_of_target(y):
    """Determine the type of data indicated by target `y`
    Parameters
    ----------
    y : array-like
    Returns
    -------
    target_type : string
        One of:
        * 'continuous': `y` is an array-like of floats that are not all
          integers, and is 1d or a column vector.
        * 'continuous-multioutput': `y` is a 2d array of floats that are
          not all integers, and both dimensions are of size > 1.
        * 'binary': `y` contains <= 2 discrete values and is 1d or a column
          vector.
        * 'multiclass': `y` contains more than two discrete values, is not a
          sequence of sequences, and is 1d or a column vector.
        * 'multiclass-multioutput': `y` is a 2d array that contains more
          than two discrete values, is not a sequence of sequences, and both
          dimensions are of size > 1.
        * 'multilabel-indicator': `y` is a label indicator matrix, an array
          of two dimensions with at least two columns, and at most 2 unique
          values.
        * 'unknown': `y` is array-like but none of the above, such as a 3d
          array, sequence of sequences, or an array of non-sequence objects.
    Examples
    --------
    >>> import numpy as np
    >>> type_of_target([0.1, 0.6])
    'continuous'
    >>> type_of_target([1, -1, -1, 1])
    'binary'
    >>> type_of_target(['a', 'b', 'a'])
    'binary'
    >>> type_of_target([1.0, 2.0])
    'binary'
    >>> type_of_target([1, 0, 2])
    'multiclass'
    >>> type_of_target([1.0, 0.0, 3.0])
    'multiclass'
    >>> type_of_target(['a', 'b', 'c'])
    'multiclass'
    >>> type_of_target(np.array([[1, 2], [3, 1]]))
    'multiclass-multioutput'
    >>> type_of_target([[1, 2]])
    'multiclass-multioutput'
    >>> type_of_target(np.array([[1.5, 2.0], [3.0, 1.6]]))
    'continuous-multioutput'
    >>> type_of_target(np.array([[0, 1], [1, 1]]))
    'multilabel-indicator'
    """
    valid = ((isinstance(y, (Sequence, spmatrix)) or hasattr(y, '__array__'))
             and not isinstance(y, string_types))
    if not valid:
        raise ValueError('Expected array-like (array or non-string sequence), '
                         'got %r' % y)
    if is_multilabel(y):
        return 'multilabel-indicator'
    try:
        y = np.asarray(y)
    except ValueError:
        # Known to fail in numpy 1.3 for array of arrays
        return 'unknown'
    # The old sequence of sequences format
    try:
        if (not hasattr(y[0], '__array__') and isinstance(y[0], Sequence)
                and not isinstance(y[0], string_types)):
            raise ValueError('You appear to be using a legacy multi-label data'
                             ' representation. Sequence of sequences are no'
                             ' longer supported; use a binary array or sparse'
                             ' matrix instead.')
    except IndexError:
        pass
    # Invalid inputs
    if y.ndim > 2 or (y.dtype == object and len(y) and
                      not isinstance(y.flat[0], string_types)):
        return 'unknown'  # [[[1, 2]]] or [obj_1] and not ["label_1"]
    if y.ndim == 2 and y.shape[1] == 0:
        return 'unknown'  # [[]]
    if y.ndim == 2 and y.shape[1] > 1:
        suffix = "-multioutput"  # [[1, 2], [1, 2]]
    else:
        suffix = ""  # [1, 2, 3] or [[1], [2], [3]]
    # check float and contains non-integer float values
    if y.dtype.kind == 'f' and np.any(y != y.astype(int)):
        # [.1, .2, 3] or [[.1, .2, 3]] or [[1., .2]] and not [1., 2., 3.]
        return 'continuous' + suffix
    if (len(np.unique(y)) > 2) or (y.ndim >= 2 and len(y[0]) > 1):
        return 'multiclass' + suffix  # [1, 2, 3] or [[1., 2., 3]] or [[1, 2]]
    else:
        return 'binary'  # [1, 2] or [["a"], ["b"]]
def _check_partial_fit_first_call(clf, classes=None):
    """Private helper function for factorizing common classes param logic
    Estimators that implement the ``partial_fit`` API need to be provided with
    the list of possible classes at the first call to partial_fit.
    Subsequent calls to partial_fit should check that ``classes`` is still
    consistent with a previous value of ``clf.classes_`` when provided.
    This function returns True if it detects that this was the first call to
    ``partial_fit`` on ``clf``. In that case the ``classes_`` attribute is also
    set on ``clf``.
    """
    if getattr(clf, 'classes_', None) is None and classes is None:
        raise ValueError("classes must be passed on the first call "
                         "to partial_fit.")
    elif classes is not None:
        if getattr(clf, 'classes_', None) is not None:
            if not np.all(clf.classes_ == unique_labels(classes)):
                raise ValueError(
                    "`classes=%r` is not the same as on last call "
                    "to partial_fit, was: %r" % (classes, clf.classes_))
        else:
            # This is the first call to partial_fit
            clf.classes_ = unique_labels(classes)
            return True
    # classes is None and clf.classes_ has already previously been set:
    # nothing to do
    return False
def class_distribution(y, sample_weight=None):
    """Compute class priors from multioutput-multiclass target data
    Parameters
    ----------
    y : array like or sparse matrix of size (n_samples, n_outputs)
        The labels for each example.
    sample_weight : array-like of shape = (n_samples,), optional
        Sample weights.
    Returns
    -------
    classes : list of size n_outputs of arrays of size (n_classes,)
        List of classes for each column.
    n_classes : list of integrs of size n_outputs
        Number of classes in each column
    class_prior : list of size n_outputs of arrays of size (n_classes,)
        Class distribution of each column.
    """
    classes = []
    n_classes = []
    class_prior = []
    n_samples, n_outputs = y.shape
    if issparse(y):
        y = y.tocsc()
        y_nnz = np.diff(y.indptr)
        for k in range(n_outputs):
            col_nonzero = y.indices[y.indptr[k]:y.indptr[k + 1]]
            # separate sample weights for zero and non-zero elements
            if sample_weight is not None:
                nz_samp_weight = np.asarray(sample_weight)[col_nonzero]
                zeros_samp_weight_sum = (np.sum(sample_weight) -
                                         np.sum(nz_samp_weight))
            else:
                nz_samp_weight = None
                zeros_samp_weight_sum = y.shape[0] - y_nnz[k]
            classes_k, y_k = np.unique(y.data[y.indptr[k]:y.indptr[k + 1]],
                                       return_inverse=True)
            class_prior_k = bincount(y_k, weights=nz_samp_weight)
            # An explicit zero was found, combine its wieght with the wieght
            # of the implicit zeros
            if 0 in classes_k:
                class_prior_k[classes_k == 0] += zeros_samp_weight_sum
            # If an there is an implict zero and it is not in classes and
            # class_prior, make an entry for it
            if 0 not in classes_k and y_nnz[k] < y.shape[0]:
                classes_k = np.insert(classes_k, 0, 0)
                class_prior_k = np.insert(class_prior_k, 0,
                                          zeros_samp_weight_sum)
            classes.append(classes_k)
            n_classes.append(classes_k.shape[0])
            class_prior.append(class_prior_k / class_prior_k.sum())
    else:
        for k in range(n_outputs):
            classes_k, y_k = np.unique(y[:, k], return_inverse=True)
            classes.append(classes_k)
            n_classes.append(classes_k.shape[0])
            class_prior_k = bincount(y_k, weights=sample_weight)
            class_prior.append(class_prior_k / class_prior_k.sum())
    return (classes, n_classes, class_prior)
 | 
	bsd-3-clause | 
| 
	b0noI/AIF2 | 
	src/test/integration/python/threshold_p_for_first_filter_separator_character.py | 
	3 | 
	50964 | 
	# data collected by PropertyBasedSettingsTest.experimentWith_threshold_p_for_first_filter_separator_character
data = [
{"value": 0.000000, "errors": 55},
{"value": 0.000500, "errors": 55},
{"value": 0.001000, "errors": 55},
{"value": 0.001500, "errors": 54},
{"value": 0.002000, "errors": 54},
{"value": 0.002500, "errors": 54},
{"value": 0.003000, "errors": 53},
{"value": 0.003500, "errors": 53},
{"value": 0.004000, "errors": 53},
{"value": 0.004500, "errors": 53},
{"value": 0.005000, "errors": 53},
{"value": 0.005500, "errors": 53},
{"value": 0.006000, "errors": 53},
{"value": 0.006500, "errors": 53},
{"value": 0.007000, "errors": 53},
{"value": 0.007500, "errors": 53},
{"value": 0.008000, "errors": 53},
{"value": 0.008500, "errors": 53},
{"value": 0.009000, "errors": 53},
{"value": 0.009500, "errors": 53},
{"value": 0.010000, "errors": 53},
{"value": 0.010500, "errors": 53},
{"value": 0.011000, "errors": 53},
{"value": 0.011500, "errors": 53},
{"value": 0.012000, "errors": 53},
{"value": 0.012500, "errors": 53},
{"value": 0.013000, "errors": 53},
{"value": 0.013500, "errors": 53},
{"value": 0.014000, "errors": 53},
{"value": 0.014500, "errors": 53},
{"value": 0.015000, "errors": 53},
{"value": 0.015500, "errors": 53},
{"value": 0.016000, "errors": 53},
{"value": 0.016500, "errors": 53},
{"value": 0.017000, "errors": 53},
{"value": 0.017500, "errors": 53},
{"value": 0.018000, "errors": 53},
{"value": 0.018500, "errors": 53},
{"value": 0.019000, "errors": 53},
{"value": 0.019500, "errors": 53},
{"value": 0.020000, "errors": 53},
{"value": 0.020500, "errors": 53},
{"value": 0.021000, "errors": 53},
{"value": 0.021500, "errors": 53},
{"value": 0.022000, "errors": 53},
{"value": 0.022500, "errors": 53},
{"value": 0.023000, "errors": 53},
{"value": 0.023500, "errors": 53},
{"value": 0.024000, "errors": 53},
{"value": 0.024500, "errors": 53},
{"value": 0.025000, "errors": 53},
{"value": 0.025500, "errors": 53},
{"value": 0.026000, "errors": 53},
{"value": 0.026500, "errors": 53},
{"value": 0.027000, "errors": 53},
{"value": 0.027500, "errors": 53},
{"value": 0.028000, "errors": 53},
{"value": 0.028500, "errors": 53},
{"value": 0.029000, "errors": 53},
{"value": 0.029500, "errors": 53},
{"value": 0.030000, "errors": 53},
{"value": 0.030500, "errors": 53},
{"value": 0.031000, "errors": 53},
{"value": 0.031500, "errors": 53},
{"value": 0.032000, "errors": 53},
{"value": 0.032500, "errors": 53},
{"value": 0.033000, "errors": 53},
{"value": 0.033500, "errors": 53},
{"value": 0.034000, "errors": 53},
{"value": 0.034500, "errors": 53},
{"value": 0.035000, "errors": 53},
{"value": 0.035500, "errors": 53},
{"value": 0.036000, "errors": 53},
{"value": 0.036500, "errors": 53},
{"value": 0.037000, "errors": 53},
{"value": 0.037500, "errors": 53},
{"value": 0.038000, "errors": 53},
{"value": 0.038500, "errors": 53},
{"value": 0.039000, "errors": 53},
{"value": 0.039500, "errors": 53},
{"value": 0.040000, "errors": 53},
{"value": 0.040500, "errors": 53},
{"value": 0.041000, "errors": 53},
{"value": 0.041500, "errors": 53},
{"value": 0.042000, "errors": 53},
{"value": 0.042500, "errors": 53},
{"value": 0.043000, "errors": 53},
{"value": 0.043500, "errors": 53},
{"value": 0.044000, "errors": 53},
{"value": 0.044500, "errors": 53},
{"value": 0.045000, "errors": 53},
{"value": 0.045500, "errors": 53},
{"value": 0.046000, "errors": 53},
{"value": 0.046500, "errors": 53},
{"value": 0.047000, "errors": 53},
{"value": 0.047500, "errors": 53},
{"value": 0.048000, "errors": 53},
{"value": 0.048500, "errors": 53},
{"value": 0.049000, "errors": 53},
{"value": 0.049500, "errors": 53},
{"value": 0.050000, "errors": 53},
{"value": 0.050500, "errors": 53},
{"value": 0.051000, "errors": 53},
{"value": 0.051500, "errors": 53},
{"value": 0.052000, "errors": 53},
{"value": 0.052500, "errors": 53},
{"value": 0.053000, "errors": 53},
{"value": 0.053500, "errors": 53},
{"value": 0.054000, "errors": 53},
{"value": 0.054500, "errors": 53},
{"value": 0.055000, "errors": 53},
{"value": 0.055500, "errors": 53},
{"value": 0.056000, "errors": 53},
{"value": 0.056500, "errors": 53},
{"value": 0.057000, "errors": 53},
{"value": 0.057500, "errors": 53},
{"value": 0.058000, "errors": 53},
{"value": 0.058500, "errors": 53},
{"value": 0.059000, "errors": 53},
{"value": 0.059500, "errors": 53},
{"value": 0.060000, "errors": 53},
{"value": 0.060500, "errors": 53},
{"value": 0.061000, "errors": 53},
{"value": 0.061500, "errors": 53},
{"value": 0.062000, "errors": 53},
{"value": 0.062500, "errors": 53},
{"value": 0.063000, "errors": 53},
{"value": 0.063500, "errors": 53},
{"value": 0.064000, "errors": 53},
{"value": 0.064500, "errors": 53},
{"value": 0.065000, "errors": 53},
{"value": 0.065500, "errors": 53},
{"value": 0.066000, "errors": 53},
{"value": 0.066500, "errors": 53},
{"value": 0.067000, "errors": 53},
{"value": 0.067500, "errors": 53},
{"value": 0.068000, "errors": 53},
{"value": 0.068500, "errors": 53},
{"value": 0.069000, "errors": 53},
{"value": 0.069500, "errors": 53},
{"value": 0.070000, "errors": 53},
{"value": 0.070500, "errors": 53},
{"value": 0.071000, "errors": 53},
{"value": 0.071500, "errors": 53},
{"value": 0.072000, "errors": 53},
{"value": 0.072500, "errors": 53},
{"value": 0.073000, "errors": 53},
{"value": 0.073500, "errors": 53},
{"value": 0.074000, "errors": 53},
{"value": 0.074500, "errors": 53},
{"value": 0.075000, "errors": 53},
{"value": 0.075500, "errors": 53},
{"value": 0.076000, "errors": 53},
{"value": 0.076500, "errors": 53},
{"value": 0.077000, "errors": 53},
{"value": 0.077500, "errors": 53},
{"value": 0.078000, "errors": 53},
{"value": 0.078500, "errors": 53},
{"value": 0.079000, "errors": 53},
{"value": 0.079500, "errors": 53},
{"value": 0.080000, "errors": 53},
{"value": 0.080500, "errors": 53},
{"value": 0.081000, "errors": 53},
{"value": 0.081500, "errors": 53},
{"value": 0.082000, "errors": 53},
{"value": 0.082500, "errors": 53},
{"value": 0.083000, "errors": 53},
{"value": 0.083500, "errors": 53},
{"value": 0.084000, "errors": 53},
{"value": 0.084500, "errors": 53},
{"value": 0.085000, "errors": 53},
{"value": 0.085500, "errors": 53},
{"value": 0.086000, "errors": 53},
{"value": 0.086500, "errors": 53},
{"value": 0.087000, "errors": 53},
{"value": 0.087500, "errors": 53},
{"value": 0.088000, "errors": 53},
{"value": 0.088500, "errors": 53},
{"value": 0.089000, "errors": 53},
{"value": 0.089500, "errors": 53},
{"value": 0.090000, "errors": 55},
{"value": 0.090500, "errors": 55},
{"value": 0.091000, "errors": 55},
{"value": 0.091500, "errors": 55},
{"value": 0.092000, "errors": 55},
{"value": 0.092500, "errors": 55},
{"value": 0.093000, "errors": 55},
{"value": 0.093500, "errors": 55},
{"value": 0.094000, "errors": 55},
{"value": 0.094500, "errors": 55},
{"value": 0.095000, "errors": 55},
{"value": 0.095500, "errors": 55},
{"value": 0.096000, "errors": 55},
{"value": 0.096500, "errors": 55},
{"value": 0.097000, "errors": 55},
{"value": 0.097500, "errors": 55},
{"value": 0.098000, "errors": 54},
{"value": 0.098500, "errors": 54},
{"value": 0.099000, "errors": 54},
{"value": 0.099500, "errors": 54},
{"value": 0.100000, "errors": 54},
{"value": 0.100500, "errors": 54},
{"value": 0.101000, "errors": 54},
{"value": 0.101500, "errors": 54},
{"value": 0.102000, "errors": 54},
{"value": 0.102500, "errors": 54},
{"value": 0.103000, "errors": 54},
{"value": 0.103500, "errors": 54},
{"value": 0.104000, "errors": 54},
{"value": 0.104500, "errors": 54},
{"value": 0.105000, "errors": 54},
{"value": 0.105500, "errors": 54},
{"value": 0.106000, "errors": 54},
{"value": 0.106500, "errors": 54},
{"value": 0.107000, "errors": 54},
{"value": 0.107500, "errors": 54},
{"value": 0.108000, "errors": 54},
{"value": 0.108500, "errors": 54},
{"value": 0.109000, "errors": 54},
{"value": 0.109500, "errors": 54},
{"value": 0.110000, "errors": 54},
{"value": 0.110500, "errors": 54},
{"value": 0.111000, "errors": 54},
{"value": 0.111500, "errors": 54},
{"value": 0.112000, "errors": 54},
{"value": 0.112500, "errors": 54},
{"value": 0.113000, "errors": 54},
{"value": 0.113500, "errors": 57},
{"value": 0.114000, "errors": 57},
{"value": 0.114500, "errors": 57},
{"value": 0.115000, "errors": 57},
{"value": 0.115500, "errors": 60},
{"value": 0.116000, "errors": 63},
{"value": 0.116500, "errors": 63},
{"value": 0.117000, "errors": 65},
{"value": 0.117500, "errors": 67},
{"value": 0.118000, "errors": 67},
{"value": 0.118500, "errors": 67},
{"value": 0.119000, "errors": 69},
{"value": 0.119500, "errors": 69},
{"value": 0.120000, "errors": 73},
{"value": 0.120500, "errors": 75},
{"value": 0.121000, "errors": 77},
{"value": 0.121500, "errors": 81},
{"value": 0.122000, "errors": 83},
{"value": 0.122500, "errors": 87},
{"value": 0.123000, "errors": 89},
{"value": 0.123500, "errors": 89},
{"value": 0.124000, "errors": 92},
{"value": 0.124500, "errors": 92},
{"value": 0.125000, "errors": 92},
{"value": 0.125500, "errors": 95},
{"value": 0.126000, "errors": 95},
{"value": 0.126500, "errors": 98},
{"value": 0.127000, "errors": 98},
{"value": 0.127500, "errors": 101},
{"value": 0.128000, "errors": 101},
{"value": 0.128500, "errors": 101},
{"value": 0.129000, "errors": 103},
{"value": 0.129500, "errors": 103},
{"value": 0.130000, "errors": 103},
{"value": 0.130500, "errors": 103},
{"value": 0.131000, "errors": 105},
{"value": 0.131500, "errors": 107},
{"value": 0.132000, "errors": 107},
{"value": 0.132500, "errors": 109},
{"value": 0.133000, "errors": 109},
{"value": 0.133500, "errors": 109},
{"value": 0.134000, "errors": 109},
{"value": 0.134500, "errors": 109},
{"value": 0.135000, "errors": 112},
{"value": 0.135500, "errors": 118},
{"value": 0.136000, "errors": 119},
{"value": 0.136500, "errors": 122},
{"value": 0.137000, "errors": 122},
{"value": 0.137500, "errors": 125},
{"value": 0.138000, "errors": 127},
{"value": 0.138500, "errors": 127},
{"value": 0.139000, "errors": 132},
{"value": 0.139500, "errors": 138},
{"value": 0.140000, "errors": 138},
{"value": 0.140500, "errors": 140},
{"value": 0.141000, "errors": 140},
{"value": 0.141500, "errors": 140},
{"value": 0.142000, "errors": 144},
{"value": 0.142500, "errors": 144},
{"value": 0.143000, "errors": 147},
{"value": 0.143500, "errors": 153},
{"value": 0.144000, "errors": 155},
{"value": 0.144500, "errors": 154},
{"value": 0.145000, "errors": 158},
{"value": 0.145500, "errors": 171},
{"value": 0.146000, "errors": 177},
{"value": 0.146500, "errors": 180},
{"value": 0.147000, "errors": 186},
{"value": 0.147500, "errors": 188},
{"value": 0.148000, "errors": 194},
{"value": 0.148500, "errors": 196},
{"value": 0.149000, "errors": 208},
{"value": 0.149500, "errors": 209},
{"value": 0.150000, "errors": 215},
{"value": 0.150500, "errors": 225},
{"value": 0.151000, "errors": 233},
{"value": 0.151500, "errors": 254},
{"value": 0.152000, "errors": 261},
{"value": 0.152500, "errors": 270},
{"value": 0.153000, "errors": 279},
{"value": 0.153500, "errors": 284},
{"value": 0.154000, "errors": 294},
{"value": 0.154500, "errors": 297},
{"value": 0.155000, "errors": 301},
{"value": 0.155500, "errors": 315},
{"value": 0.156000, "errors": 324},
{"value": 0.156500, "errors": 326},
{"value": 0.157000, "errors": 334},
{"value": 0.157500, "errors": 341},
{"value": 0.158000, "errors": 346},
{"value": 0.158500, "errors": 354},
{"value": 0.159000, "errors": 365},
{"value": 0.159500, "errors": 371},
{"value": 0.160000, "errors": 388},
{"value": 0.160500, "errors": 400},
{"value": 0.161000, "errors": 412},
{"value": 0.161500, "errors": 414},
{"value": 0.162000, "errors": 419},
{"value": 0.162500, "errors": 428},
{"value": 0.163000, "errors": 429},
{"value": 0.163500, "errors": 434},
{"value": 0.164000, "errors": 438},
{"value": 0.164500, "errors": 449},
{"value": 0.165000, "errors": 452},
{"value": 0.165500, "errors": 462},
{"value": 0.166000, "errors": 475},
{"value": 0.166500, "errors": 478},
{"value": 0.167000, "errors": 478},
{"value": 0.167500, "errors": 478},
{"value": 0.168000, "errors": 488},
{"value": 0.168500, "errors": 492},
{"value": 0.169000, "errors": 498},
{"value": 0.169500, "errors": 504},
{"value": 0.170000, "errors": 509},
{"value": 0.170500, "errors": 521},
{"value": 0.171000, "errors": 525},
{"value": 0.171500, "errors": 530},
{"value": 0.172000, "errors": 534},
{"value": 0.172500, "errors": 549},
{"value": 0.173000, "errors": 559},
{"value": 0.173500, "errors": 565},
{"value": 0.174000, "errors": 570},
{"value": 0.174500, "errors": 575},
{"value": 0.175000, "errors": 579},
{"value": 0.175500, "errors": 587},
{"value": 0.176000, "errors": 588},
{"value": 0.176500, "errors": 594},
{"value": 0.177000, "errors": 600},
{"value": 0.177500, "errors": 606},
{"value": 0.178000, "errors": 623},
{"value": 0.178500, "errors": 627},
{"value": 0.179000, "errors": 637},
{"value": 0.179500, "errors": 643},
{"value": 0.180000, "errors": 643},
{"value": 0.180500, "errors": 648},
{"value": 0.181000, "errors": 650},
{"value": 0.181500, "errors": 651},
{"value": 0.182000, "errors": 656},
{"value": 0.182500, "errors": 665},
{"value": 0.183000, "errors": 666},
{"value": 0.183500, "errors": 669},
{"value": 0.184000, "errors": 673},
{"value": 0.184500, "errors": 675},
{"value": 0.185000, "errors": 677},
{"value": 0.185500, "errors": 679},
{"value": 0.186000, "errors": 680},
{"value": 0.186500, "errors": 686},
{"value": 0.187000, "errors": 686},
{"value": 0.187500, "errors": 688},
{"value": 0.188000, "errors": 691},
{"value": 0.188500, "errors": 693},
{"value": 0.189000, "errors": 701},
{"value": 0.189500, "errors": 701},
{"value": 0.190000, "errors": 704},
{"value": 0.190500, "errors": 704},
{"value": 0.191000, "errors": 707},
{"value": 0.191500, "errors": 707},
{"value": 0.192000, "errors": 709},
{"value": 0.192500, "errors": 711},
{"value": 0.193000, "errors": 717},
{"value": 0.193500, "errors": 717},
{"value": 0.194000, "errors": 719},
{"value": 0.194500, "errors": 720},
{"value": 0.195000, "errors": 721},
{"value": 0.195500, "errors": 721},
{"value": 0.196000, "errors": 721},
{"value": 0.196500, "errors": 721},
{"value": 0.197000, "errors": 721},
{"value": 0.197500, "errors": 721},
{"value": 0.198000, "errors": 724},
{"value": 0.198500, "errors": 724},
{"value": 0.199000, "errors": 724},
{"value": 0.199500, "errors": 726},
{"value": 0.200000, "errors": 726},
{"value": 0.200500, "errors": 726},
{"value": 0.201000, "errors": 730},
{"value": 0.201500, "errors": 735},
{"value": 0.202000, "errors": 735},
{"value": 0.202500, "errors": 735},
{"value": 0.203000, "errors": 735},
{"value": 0.203500, "errors": 736},
{"value": 0.204000, "errors": 736},
{"value": 0.204500, "errors": 736},
{"value": 0.205000, "errors": 736},
{"value": 0.205500, "errors": 736},
{"value": 0.206000, "errors": 736},
{"value": 0.206500, "errors": 738},
{"value": 0.207000, "errors": 738},
{"value": 0.207500, "errors": 738},
{"value": 0.208000, "errors": 738},
{"value": 0.208500, "errors": 738},
{"value": 0.209000, "errors": 739},
{"value": 0.209500, "errors": 740},
{"value": 0.210000, "errors": 743},
{"value": 0.210500, "errors": 743},
{"value": 0.211000, "errors": 745},
{"value": 0.211500, "errors": 745},
{"value": 0.212000, "errors": 745},
{"value": 0.212500, "errors": 747},
{"value": 0.213000, "errors": 747},
{"value": 0.213500, "errors": 747},
{"value": 0.214000, "errors": 747},
{"value": 0.214500, "errors": 747},
{"value": 0.215000, "errors": 747},
{"value": 0.215500, "errors": 747},
{"value": 0.216000, "errors": 747},
{"value": 0.216500, "errors": 747},
{"value": 0.217000, "errors": 747},
{"value": 0.217500, "errors": 749},
{"value": 0.218000, "errors": 749},
{"value": 0.218500, "errors": 749},
{"value": 0.219000, "errors": 749},
{"value": 0.219500, "errors": 749},
{"value": 0.220000, "errors": 750},
{"value": 0.220500, "errors": 750},
{"value": 0.221000, "errors": 750},
{"value": 0.221500, "errors": 750},
{"value": 0.222000, "errors": 750},
{"value": 0.222500, "errors": 750},
{"value": 0.223000, "errors": 750},
{"value": 0.223500, "errors": 750},
{"value": 0.224000, "errors": 750},
{"value": 0.224500, "errors": 750},
{"value": 0.225000, "errors": 750},
{"value": 0.225500, "errors": 750},
{"value": 0.226000, "errors": 750},
{"value": 0.226500, "errors": 750},
{"value": 0.227000, "errors": 750},
{"value": 0.227500, "errors": 750},
{"value": 0.228000, "errors": 750},
{"value": 0.228500, "errors": 750},
{"value": 0.229000, "errors": 750},
{"value": 0.229500, "errors": 750},
{"value": 0.230000, "errors": 750},
{"value": 0.230500, "errors": 750},
{"value": 0.231000, "errors": 750},
{"value": 0.231500, "errors": 750},
{"value": 0.232000, "errors": 750},
{"value": 0.232500, "errors": 750},
{"value": 0.233000, "errors": 750},
{"value": 0.233500, "errors": 750},
{"value": 0.234000, "errors": 750},
{"value": 0.234500, "errors": 751},
{"value": 0.235000, "errors": 751},
{"value": 0.235500, "errors": 751},
{"value": 0.236000, "errors": 751},
{"value": 0.236500, "errors": 751},
{"value": 0.237000, "errors": 751},
{"value": 0.237500, "errors": 751},
{"value": 0.238000, "errors": 751},
{"value": 0.238500, "errors": 752},
{"value": 0.239000, "errors": 752},
{"value": 0.239500, "errors": 752},
{"value": 0.240000, "errors": 754},
{"value": 0.240500, "errors": 754},
{"value": 0.241000, "errors": 754},
{"value": 0.241500, "errors": 754},
{"value": 0.242000, "errors": 754},
{"value": 0.242500, "errors": 754},
{"value": 0.243000, "errors": 754},
{"value": 0.243500, "errors": 754},
{"value": 0.244000, "errors": 754},
{"value": 0.244500, "errors": 754},
{"value": 0.245000, "errors": 754},
{"value": 0.245500, "errors": 754},
{"value": 0.246000, "errors": 754},
{"value": 0.246500, "errors": 754},
{"value": 0.247000, "errors": 754},
{"value": 0.247500, "errors": 754},
{"value": 0.248000, "errors": 754},
{"value": 0.248500, "errors": 754},
{"value": 0.249000, "errors": 754},
{"value": 0.249500, "errors": 754},
{"value": 0.250000, "errors": 754},
{"value": 0.250500, "errors": 754},
{"value": 0.251000, "errors": 754},
{"value": 0.251500, "errors": 754},
{"value": 0.252000, "errors": 754},
{"value": 0.252500, "errors": 754},
{"value": 0.253000, "errors": 754},
{"value": 0.253500, "errors": 754},
{"value": 0.254000, "errors": 754},
{"value": 0.254500, "errors": 754},
{"value": 0.255000, "errors": 754},
{"value": 0.255500, "errors": 754},
{"value": 0.256000, "errors": 754},
{"value": 0.256500, "errors": 754},
{"value": 0.257000, "errors": 754},
{"value": 0.257500, "errors": 754},
{"value": 0.258000, "errors": 754},
{"value": 0.258500, "errors": 754},
{"value": 0.259000, "errors": 754},
{"value": 0.259500, "errors": 754},
{"value": 0.260000, "errors": 754},
{"value": 0.260500, "errors": 754},
{"value": 0.261000, "errors": 754},
{"value": 0.261500, "errors": 754},
{"value": 0.262000, "errors": 754},
{"value": 0.262500, "errors": 754},
{"value": 0.263000, "errors": 754},
{"value": 0.263500, "errors": 754},
{"value": 0.264000, "errors": 754},
{"value": 0.264500, "errors": 754},
{"value": 0.265000, "errors": 754},
{"value": 0.265500, "errors": 754},
{"value": 0.266000, "errors": 754},
{"value": 0.266500, "errors": 754},
{"value": 0.267000, "errors": 754},
{"value": 0.267500, "errors": 754},
{"value": 0.268000, "errors": 754},
{"value": 0.268500, "errors": 754},
{"value": 0.269000, "errors": 754},
{"value": 0.269500, "errors": 754},
{"value": 0.270000, "errors": 754},
{"value": 0.270500, "errors": 754},
{"value": 0.271000, "errors": 754},
{"value": 0.271500, "errors": 754},
{"value": 0.272000, "errors": 754},
{"value": 0.272500, "errors": 754},
{"value": 0.273000, "errors": 754},
{"value": 0.273500, "errors": 754},
{"value": 0.274000, "errors": 754},
{"value": 0.274500, "errors": 754},
{"value": 0.275000, "errors": 754},
{"value": 0.275500, "errors": 754},
{"value": 0.276000, "errors": 754},
{"value": 0.276500, "errors": 754},
{"value": 0.277000, "errors": 754},
{"value": 0.277500, "errors": 754},
{"value": 0.278000, "errors": 754},
{"value": 0.278500, "errors": 754},
{"value": 0.279000, "errors": 754},
{"value": 0.279500, "errors": 754},
{"value": 0.280000, "errors": 754},
{"value": 0.280500, "errors": 754},
{"value": 0.281000, "errors": 754},
{"value": 0.281500, "errors": 754},
{"value": 0.282000, "errors": 754},
{"value": 0.282500, "errors": 754},
{"value": 0.283000, "errors": 753},
{"value": 0.283500, "errors": 753},
{"value": 0.284000, "errors": 753},
{"value": 0.284500, "errors": 753},
{"value": 0.285000, "errors": 753},
{"value": 0.285500, "errors": 753},
{"value": 0.286000, "errors": 753},
{"value": 0.286500, "errors": 753},
{"value": 0.287000, "errors": 753},
{"value": 0.287500, "errors": 753},
{"value": 0.288000, "errors": 753},
{"value": 0.288500, "errors": 753},
{"value": 0.289000, "errors": 753},
{"value": 0.289500, "errors": 753},
{"value": 0.290000, "errors": 753},
{"value": 0.290500, "errors": 753},
{"value": 0.291000, "errors": 753},
{"value": 0.291500, "errors": 753},
{"value": 0.292000, "errors": 753},
{"value": 0.292500, "errors": 753},
{"value": 0.293000, "errors": 753},
{"value": 0.293500, "errors": 753},
{"value": 0.294000, "errors": 753},
{"value": 0.294500, "errors": 753},
{"value": 0.295000, "errors": 753},
{"value": 0.295500, "errors": 753},
{"value": 0.296000, "errors": 753},
{"value": 0.296500, "errors": 753},
{"value": 0.297000, "errors": 753},
{"value": 0.297500, "errors": 753},
{"value": 0.298000, "errors": 753},
{"value": 0.298500, "errors": 753},
{"value": 0.299000, "errors": 753},
{"value": 0.299500, "errors": 753},
{"value": 0.300000, "errors": 753},
{"value": 0.300500, "errors": 753},
{"value": 0.301000, "errors": 753},
{"value": 0.301500, "errors": 753},
{"value": 0.302000, "errors": 753},
{"value": 0.302500, "errors": 753},
{"value": 0.303000, "errors": 753},
{"value": 0.303500, "errors": 753},
{"value": 0.304000, "errors": 753},
{"value": 0.304500, "errors": 753},
{"value": 0.305000, "errors": 753},
{"value": 0.305500, "errors": 753},
{"value": 0.306000, "errors": 753},
{"value": 0.306500, "errors": 752},
{"value": 0.307000, "errors": 752},
{"value": 0.307500, "errors": 752},
{"value": 0.308000, "errors": 752},
{"value": 0.308500, "errors": 752},
{"value": 0.309000, "errors": 752},
{"value": 0.309500, "errors": 752},
{"value": 0.310000, "errors": 752},
{"value": 0.310500, "errors": 752},
{"value": 0.311000, "errors": 752},
{"value": 0.311500, "errors": 752},
{"value": 0.312000, "errors": 752},
{"value": 0.312500, "errors": 752},
{"value": 0.313000, "errors": 752},
{"value": 0.313500, "errors": 752},
{"value": 0.314000, "errors": 752},
{"value": 0.314500, "errors": 752},
{"value": 0.315000, "errors": 752},
{"value": 0.315500, "errors": 752},
{"value": 0.316000, "errors": 752},
{"value": 0.316500, "errors": 752},
{"value": 0.317000, "errors": 752},
{"value": 0.317500, "errors": 752},
{"value": 0.318000, "errors": 752},
{"value": 0.318500, "errors": 752},
{"value": 0.319000, "errors": 752},
{"value": 0.319500, "errors": 752},
{"value": 0.320000, "errors": 752},
{"value": 0.320500, "errors": 752},
{"value": 0.321000, "errors": 752},
{"value": 0.321500, "errors": 752},
{"value": 0.322000, "errors": 752},
{"value": 0.322500, "errors": 752},
{"value": 0.323000, "errors": 752},
{"value": 0.323500, "errors": 752},
{"value": 0.324000, "errors": 752},
{"value": 0.324500, "errors": 752},
{"value": 0.325000, "errors": 752},
{"value": 0.325500, "errors": 752},
{"value": 0.326000, "errors": 752},
{"value": 0.326500, "errors": 752},
{"value": 0.327000, "errors": 752},
{"value": 0.327500, "errors": 752},
{"value": 0.328000, "errors": 752},
{"value": 0.328500, "errors": 752},
{"value": 0.329000, "errors": 752},
{"value": 0.329500, "errors": 752},
{"value": 0.330000, "errors": 752},
{"value": 0.330500, "errors": 752},
{"value": 0.331000, "errors": 752},
{"value": 0.331500, "errors": 752},
{"value": 0.332000, "errors": 752},
{"value": 0.332500, "errors": 752},
{"value": 0.333000, "errors": 752},
{"value": 0.333500, "errors": 752},
{"value": 0.334000, "errors": 752},
{"value": 0.334500, "errors": 752},
{"value": 0.335000, "errors": 752},
{"value": 0.335500, "errors": 752},
{"value": 0.336000, "errors": 752},
{"value": 0.336500, "errors": 752},
{"value": 0.337000, "errors": 752},
{"value": 0.337500, "errors": 752},
{"value": 0.338000, "errors": 752},
{"value": 0.338500, "errors": 752},
{"value": 0.339000, "errors": 752},
{"value": 0.339500, "errors": 752},
{"value": 0.340000, "errors": 752},
{"value": 0.340500, "errors": 752},
{"value": 0.341000, "errors": 752},
{"value": 0.341500, "errors": 752},
{"value": 0.342000, "errors": 752},
{"value": 0.342500, "errors": 752},
{"value": 0.343000, "errors": 751},
{"value": 0.343500, "errors": 751},
{"value": 0.344000, "errors": 751},
{"value": 0.344500, "errors": 751},
{"value": 0.345000, "errors": 751},
{"value": 0.345500, "errors": 751},
{"value": 0.346000, "errors": 751},
{"value": 0.346500, "errors": 751},
{"value": 0.347000, "errors": 751},
{"value": 0.347500, "errors": 751},
{"value": 0.348000, "errors": 751},
{"value": 0.348500, "errors": 751},
{"value": 0.349000, "errors": 751},
{"value": 0.349500, "errors": 751},
{"value": 0.350000, "errors": 751},
{"value": 0.350500, "errors": 751},
{"value": 0.351000, "errors": 751},
{"value": 0.351500, "errors": 751},
{"value": 0.352000, "errors": 751},
{"value": 0.352500, "errors": 751},
{"value": 0.353000, "errors": 751},
{"value": 0.353500, "errors": 751},
{"value": 0.354000, "errors": 751},
{"value": 0.354500, "errors": 751},
{"value": 0.355000, "errors": 751},
{"value": 0.355500, "errors": 751},
{"value": 0.356000, "errors": 751},
{"value": 0.356500, "errors": 751},
{"value": 0.357000, "errors": 751},
{"value": 0.357500, "errors": 751},
{"value": 0.358000, "errors": 751},
{"value": 0.358500, "errors": 751},
{"value": 0.359000, "errors": 751},
{"value": 0.359500, "errors": 751},
{"value": 0.360000, "errors": 751},
{"value": 0.360500, "errors": 751},
{"value": 0.361000, "errors": 751},
{"value": 0.361500, "errors": 751},
{"value": 0.362000, "errors": 751},
{"value": 0.362500, "errors": 751},
{"value": 0.363000, "errors": 751},
{"value": 0.363500, "errors": 751},
{"value": 0.364000, "errors": 751},
{"value": 0.364500, "errors": 751},
{"value": 0.365000, "errors": 751},
{"value": 0.365500, "errors": 751},
{"value": 0.366000, "errors": 751},
{"value": 0.366500, "errors": 751},
{"value": 0.367000, "errors": 751},
{"value": 0.367500, "errors": 751},
{"value": 0.368000, "errors": 751},
{"value": 0.368500, "errors": 751},
{"value": 0.369000, "errors": 751},
{"value": 0.369500, "errors": 751},
{"value": 0.370000, "errors": 751},
{"value": 0.370500, "errors": 751},
{"value": 0.371000, "errors": 751},
{"value": 0.371500, "errors": 751},
{"value": 0.372000, "errors": 751},
{"value": 0.372500, "errors": 751},
{"value": 0.373000, "errors": 751},
{"value": 0.373500, "errors": 751},
{"value": 0.374000, "errors": 751},
{"value": 0.374500, "errors": 751},
{"value": 0.375000, "errors": 751},
{"value": 0.375500, "errors": 751},
{"value": 0.376000, "errors": 751},
{"value": 0.376500, "errors": 751},
{"value": 0.377000, "errors": 751},
{"value": 0.377500, "errors": 751},
{"value": 0.378000, "errors": 751},
{"value": 0.378500, "errors": 751},
{"value": 0.379000, "errors": 751},
{"value": 0.379500, "errors": 751},
{"value": 0.380000, "errors": 751},
{"value": 0.380500, "errors": 751},
{"value": 0.381000, "errors": 751},
{"value": 0.381500, "errors": 751},
{"value": 0.382000, "errors": 751},
{"value": 0.382500, "errors": 751},
{"value": 0.383000, "errors": 751},
{"value": 0.383500, "errors": 751},
{"value": 0.384000, "errors": 751},
{"value": 0.384500, "errors": 751},
{"value": 0.385000, "errors": 751},
{"value": 0.385500, "errors": 751},
{"value": 0.386000, "errors": 751},
{"value": 0.386500, "errors": 751},
{"value": 0.387000, "errors": 751},
{"value": 0.387500, "errors": 751},
{"value": 0.388000, "errors": 751},
{"value": 0.388500, "errors": 751},
{"value": 0.389000, "errors": 751},
{"value": 0.389500, "errors": 751},
{"value": 0.390000, "errors": 751},
{"value": 0.390500, "errors": 751},
{"value": 0.391000, "errors": 751},
{"value": 0.391500, "errors": 751},
{"value": 0.392000, "errors": 751},
{"value": 0.392500, "errors": 751},
{"value": 0.393000, "errors": 751},
{"value": 0.393500, "errors": 751},
{"value": 0.394000, "errors": 751},
{"value": 0.394500, "errors": 751},
{"value": 0.395000, "errors": 751},
{"value": 0.395500, "errors": 751},
{"value": 0.396000, "errors": 751},
{"value": 0.396500, "errors": 751},
{"value": 0.397000, "errors": 751},
{"value": 0.397500, "errors": 751},
{"value": 0.398000, "errors": 751},
{"value": 0.398500, "errors": 751},
{"value": 0.399000, "errors": 751},
{"value": 0.399500, "errors": 751},
{"value": 0.400000, "errors": 751},
{"value": 0.400500, "errors": 751},
{"value": 0.401000, "errors": 751},
{"value": 0.401500, "errors": 751},
{"value": 0.402000, "errors": 751},
{"value": 0.402500, "errors": 751},
{"value": 0.403000, "errors": 751},
{"value": 0.403500, "errors": 751},
{"value": 0.404000, "errors": 751},
{"value": 0.404500, "errors": 751},
{"value": 0.405000, "errors": 751},
{"value": 0.405500, "errors": 751},
{"value": 0.406000, "errors": 751},
{"value": 0.406500, "errors": 751},
{"value": 0.407000, "errors": 751},
{"value": 0.407500, "errors": 751},
{"value": 0.408000, "errors": 751},
{"value": 0.408500, "errors": 751},
{"value": 0.409000, "errors": 751},
{"value": 0.409500, "errors": 751},
{"value": 0.410000, "errors": 751},
{"value": 0.410500, "errors": 751},
{"value": 0.411000, "errors": 751},
{"value": 0.411500, "errors": 751},
{"value": 0.412000, "errors": 751},
{"value": 0.412500, "errors": 751},
{"value": 0.413000, "errors": 751},
{"value": 0.413500, "errors": 751},
{"value": 0.414000, "errors": 751},
{"value": 0.414500, "errors": 751},
{"value": 0.415000, "errors": 751},
{"value": 0.415500, "errors": 751},
{"value": 0.416000, "errors": 751},
{"value": 0.416500, "errors": 751},
{"value": 0.417000, "errors": 751},
{"value": 0.417500, "errors": 751},
{"value": 0.418000, "errors": 751},
{"value": 0.418500, "errors": 751},
{"value": 0.419000, "errors": 751},
{"value": 0.419500, "errors": 751},
{"value": 0.420000, "errors": 751},
{"value": 0.420500, "errors": 751},
{"value": 0.421000, "errors": 751},
{"value": 0.421500, "errors": 751},
{"value": 0.422000, "errors": 751},
{"value": 0.422500, "errors": 751},
{"value": 0.423000, "errors": 751},
{"value": 0.423500, "errors": 751},
{"value": 0.424000, "errors": 751},
{"value": 0.424500, "errors": 751},
{"value": 0.425000, "errors": 751},
{"value": 0.425500, "errors": 751},
{"value": 0.426000, "errors": 751},
{"value": 0.426500, "errors": 751},
{"value": 0.427000, "errors": 751},
{"value": 0.427500, "errors": 751},
{"value": 0.428000, "errors": 751},
{"value": 0.428500, "errors": 751},
{"value": 0.429000, "errors": 751},
{"value": 0.429500, "errors": 751},
{"value": 0.430000, "errors": 751},
{"value": 0.430500, "errors": 751},
{"value": 0.431000, "errors": 751},
{"value": 0.431500, "errors": 751},
{"value": 0.432000, "errors": 751},
{"value": 0.432500, "errors": 751},
{"value": 0.433000, "errors": 751},
{"value": 0.433500, "errors": 751},
{"value": 0.434000, "errors": 751},
{"value": 0.434500, "errors": 751},
{"value": 0.435000, "errors": 751},
{"value": 0.435500, "errors": 751},
{"value": 0.436000, "errors": 751},
{"value": 0.436500, "errors": 751},
{"value": 0.437000, "errors": 751},
{"value": 0.437500, "errors": 751},
{"value": 0.438000, "errors": 751},
{"value": 0.438500, "errors": 751},
{"value": 0.439000, "errors": 751},
{"value": 0.439500, "errors": 751},
{"value": 0.440000, "errors": 751},
{"value": 0.440500, "errors": 751},
{"value": 0.441000, "errors": 751},
{"value": 0.441500, "errors": 751},
{"value": 0.442000, "errors": 751},
{"value": 0.442500, "errors": 751},
{"value": 0.443000, "errors": 751},
{"value": 0.443500, "errors": 751},
{"value": 0.444000, "errors": 751},
{"value": 0.444500, "errors": 751},
{"value": 0.445000, "errors": 751},
{"value": 0.445500, "errors": 751},
{"value": 0.446000, "errors": 751},
{"value": 0.446500, "errors": 751},
{"value": 0.447000, "errors": 751},
{"value": 0.447500, "errors": 751},
{"value": 0.448000, "errors": 751},
{"value": 0.448500, "errors": 751},
{"value": 0.449000, "errors": 751},
{"value": 0.449500, "errors": 751},
{"value": 0.450000, "errors": 751},
{"value": 0.450500, "errors": 751},
{"value": 0.451000, "errors": 751},
{"value": 0.451500, "errors": 751},
{"value": 0.452000, "errors": 751},
{"value": 0.452500, "errors": 751},
{"value": 0.453000, "errors": 751},
{"value": 0.453500, "errors": 751},
{"value": 0.454000, "errors": 751},
{"value": 0.454500, "errors": 751},
{"value": 0.455000, "errors": 751},
{"value": 0.455500, "errors": 751},
{"value": 0.456000, "errors": 751},
{"value": 0.456500, "errors": 751},
{"value": 0.457000, "errors": 751},
{"value": 0.457500, "errors": 751},
{"value": 0.458000, "errors": 751},
{"value": 0.458500, "errors": 751},
{"value": 0.459000, "errors": 750},
{"value": 0.459500, "errors": 750},
{"value": 0.460000, "errors": 750},
{"value": 0.460500, "errors": 750},
{"value": 0.461000, "errors": 750},
{"value": 0.461500, "errors": 750},
{"value": 0.462000, "errors": 750},
{"value": 0.462500, "errors": 750},
{"value": 0.463000, "errors": 750},
{"value": 0.463500, "errors": 750},
{"value": 0.464000, "errors": 750},
{"value": 0.464500, "errors": 750},
{"value": 0.465000, "errors": 750},
{"value": 0.465500, "errors": 750},
{"value": 0.466000, "errors": 750},
{"value": 0.466500, "errors": 750},
{"value": 0.467000, "errors": 750},
{"value": 0.467500, "errors": 750},
{"value": 0.468000, "errors": 750},
{"value": 0.468500, "errors": 750},
{"value": 0.469000, "errors": 750},
{"value": 0.469500, "errors": 750},
{"value": 0.470000, "errors": 750},
{"value": 0.470500, "errors": 750},
{"value": 0.471000, "errors": 750},
{"value": 0.471500, "errors": 750},
{"value": 0.472000, "errors": 750},
{"value": 0.472500, "errors": 750},
{"value": 0.473000, "errors": 750},
{"value": 0.473500, "errors": 750},
{"value": 0.474000, "errors": 750},
{"value": 0.474500, "errors": 750},
{"value": 0.475000, "errors": 750},
{"value": 0.475500, "errors": 750},
{"value": 0.476000, "errors": 750},
{"value": 0.476500, "errors": 750},
{"value": 0.477000, "errors": 750},
{"value": 0.477500, "errors": 750},
{"value": 0.478000, "errors": 750},
{"value": 0.478500, "errors": 750},
{"value": 0.479000, "errors": 750},
{"value": 0.479500, "errors": 749},
{"value": 0.480000, "errors": 749},
{"value": 0.480500, "errors": 749},
{"value": 0.481000, "errors": 749},
{"value": 0.481500, "errors": 749},
{"value": 0.482000, "errors": 749},
{"value": 0.482500, "errors": 749},
{"value": 0.483000, "errors": 749},
{"value": 0.483500, "errors": 749},
{"value": 0.484000, "errors": 749},
{"value": 0.484500, "errors": 749},
{"value": 0.485000, "errors": 749},
{"value": 0.485500, "errors": 749},
{"value": 0.486000, "errors": 749},
{"value": 0.486500, "errors": 749},
{"value": 0.487000, "errors": 749},
{"value": 0.487500, "errors": 749},
{"value": 0.488000, "errors": 749},
{"value": 0.488500, "errors": 749},
{"value": 0.489000, "errors": 749},
{"value": 0.489500, "errors": 749},
{"value": 0.490000, "errors": 749},
{"value": 0.490500, "errors": 749},
{"value": 0.491000, "errors": 749},
{"value": 0.491500, "errors": 749},
{"value": 0.492000, "errors": 749},
{"value": 0.492500, "errors": 749},
{"value": 0.493000, "errors": 749},
{"value": 0.493500, "errors": 749},
{"value": 0.494000, "errors": 749},
{"value": 0.494500, "errors": 749},
{"value": 0.495000, "errors": 749},
{"value": 0.495500, "errors": 749},
{"value": 0.496000, "errors": 749},
{"value": 0.496500, "errors": 749},
{"value": 0.497000, "errors": 749},
{"value": 0.497500, "errors": 749},
{"value": 0.498000, "errors": 749},
{"value": 0.498500, "errors": 749},
{"value": 0.499000, "errors": 749},
{"value": 0.499500, "errors": 749},
{"value": 0.500000, "errors": 749},
{"value": 0.500500, "errors": 749},
{"value": 0.501000, "errors": 749},
{"value": 0.501500, "errors": 749},
{"value": 0.502000, "errors": 749},
{"value": 0.502500, "errors": 749},
{"value": 0.503000, "errors": 749},
{"value": 0.503500, "errors": 749},
{"value": 0.504000, "errors": 749},
{"value": 0.504500, "errors": 749},
{"value": 0.505000, "errors": 749},
{"value": 0.505500, "errors": 749},
{"value": 0.506000, "errors": 749},
{"value": 0.506500, "errors": 749},
{"value": 0.507000, "errors": 749},
{"value": 0.507500, "errors": 749},
{"value": 0.508000, "errors": 749},
{"value": 0.508500, "errors": 749},
{"value": 0.509000, "errors": 749},
{"value": 0.509500, "errors": 749},
{"value": 0.510000, "errors": 749},
{"value": 0.510500, "errors": 749},
{"value": 0.511000, "errors": 749},
{"value": 0.511500, "errors": 749},
{"value": 0.512000, "errors": 749},
{"value": 0.512500, "errors": 749},
{"value": 0.513000, "errors": 749},
{"value": 0.513500, "errors": 749},
{"value": 0.514000, "errors": 749},
{"value": 0.514500, "errors": 749},
{"value": 0.515000, "errors": 749},
{"value": 0.515500, "errors": 749},
{"value": 0.516000, "errors": 749},
{"value": 0.516500, "errors": 749},
{"value": 0.517000, "errors": 749},
{"value": 0.517500, "errors": 749},
{"value": 0.518000, "errors": 749},
{"value": 0.518500, "errors": 749},
{"value": 0.519000, "errors": 749},
{"value": 0.519500, "errors": 749},
{"value": 0.520000, "errors": 749},
{"value": 0.520500, "errors": 749},
{"value": 0.521000, "errors": 749},
{"value": 0.521500, "errors": 749},
{"value": 0.522000, "errors": 749},
{"value": 0.522500, "errors": 749},
{"value": 0.523000, "errors": 749},
{"value": 0.523500, "errors": 749},
{"value": 0.524000, "errors": 749},
{"value": 0.524500, "errors": 749},
{"value": 0.525000, "errors": 749},
{"value": 0.525500, "errors": 749},
{"value": 0.526000, "errors": 749},
{"value": 0.526500, "errors": 749},
{"value": 0.527000, "errors": 749},
{"value": 0.527500, "errors": 749},
{"value": 0.528000, "errors": 749},
{"value": 0.528500, "errors": 749},
{"value": 0.529000, "errors": 749},
{"value": 0.529500, "errors": 749},
{"value": 0.530000, "errors": 749},
{"value": 0.530500, "errors": 749},
{"value": 0.531000, "errors": 749},
{"value": 0.531500, "errors": 749},
{"value": 0.532000, "errors": 749},
{"value": 0.532500, "errors": 749},
{"value": 0.533000, "errors": 749},
{"value": 0.533500, "errors": 749},
{"value": 0.534000, "errors": 749},
{"value": 0.534500, "errors": 749},
{"value": 0.535000, "errors": 749},
{"value": 0.535500, "errors": 749},
{"value": 0.536000, "errors": 749},
{"value": 0.536500, "errors": 749},
{"value": 0.537000, "errors": 749},
{"value": 0.537500, "errors": 749},
{"value": 0.538000, "errors": 748},
{"value": 0.538500, "errors": 748},
{"value": 0.539000, "errors": 748},
{"value": 0.539500, "errors": 748},
{"value": 0.540000, "errors": 748},
{"value": 0.540500, "errors": 748},
{"value": 0.541000, "errors": 748},
{"value": 0.541500, "errors": 748},
{"value": 0.542000, "errors": 748},
{"value": 0.542500, "errors": 748},
{"value": 0.543000, "errors": 748},
{"value": 0.543500, "errors": 748},
{"value": 0.544000, "errors": 748},
{"value": 0.544500, "errors": 748},
{"value": 0.545000, "errors": 748},
{"value": 0.545500, "errors": 748},
{"value": 0.546000, "errors": 748},
{"value": 0.546500, "errors": 748},
{"value": 0.547000, "errors": 748},
{"value": 0.547500, "errors": 748},
{"value": 0.548000, "errors": 748},
{"value": 0.548500, "errors": 748},
{"value": 0.549000, "errors": 748},
{"value": 0.549500, "errors": 748},
{"value": 0.550000, "errors": 748},
{"value": 0.550500, "errors": 748},
{"value": 0.551000, "errors": 748},
{"value": 0.551500, "errors": 748},
{"value": 0.552000, "errors": 748},
{"value": 0.552500, "errors": 748},
{"value": 0.553000, "errors": 748},
{"value": 0.553500, "errors": 748},
{"value": 0.554000, "errors": 748},
{"value": 0.554500, "errors": 748},
{"value": 0.555000, "errors": 748},
{"value": 0.555500, "errors": 748},
{"value": 0.556000, "errors": 748},
{"value": 0.556500, "errors": 748},
{"value": 0.557000, "errors": 748},
{"value": 0.557500, "errors": 748},
{"value": 0.558000, "errors": 748},
{"value": 0.558500, "errors": 748},
{"value": 0.559000, "errors": 748},
{"value": 0.559500, "errors": 748},
{"value": 0.560000, "errors": 748},
{"value": 0.560500, "errors": 748},
{"value": 0.561000, "errors": 748},
{"value": 0.561500, "errors": 748},
{"value": 0.562000, "errors": 748},
{"value": 0.562500, "errors": 748},
{"value": 0.563000, "errors": 748},
{"value": 0.563500, "errors": 748},
{"value": 0.564000, "errors": 748},
{"value": 0.564500, "errors": 748},
{"value": 0.565000, "errors": 748},
{"value": 0.565500, "errors": 748},
{"value": 0.566000, "errors": 748},
{"value": 0.566500, "errors": 748},
{"value": 0.567000, "errors": 748},
{"value": 0.567500, "errors": 748},
{"value": 0.568000, "errors": 748},
{"value": 0.568500, "errors": 748},
{"value": 0.569000, "errors": 748},
{"value": 0.569500, "errors": 748},
{"value": 0.570000, "errors": 748},
{"value": 0.570500, "errors": 748},
{"value": 0.571000, "errors": 748},
{"value": 0.571500, "errors": 748},
{"value": 0.572000, "errors": 747},
{"value": 0.572500, "errors": 747},
{"value": 0.573000, "errors": 747},
{"value": 0.573500, "errors": 747},
{"value": 0.574000, "errors": 747},
{"value": 0.574500, "errors": 747},
{"value": 0.575000, "errors": 747},
{"value": 0.575500, "errors": 747},
{"value": 0.576000, "errors": 747},
{"value": 0.576500, "errors": 747},
{"value": 0.577000, "errors": 747},
{"value": 0.577500, "errors": 747},
{"value": 0.578000, "errors": 747},
{"value": 0.578500, "errors": 747},
{"value": 0.579000, "errors": 747},
{"value": 0.579500, "errors": 747},
{"value": 0.580000, "errors": 747},
{"value": 0.580500, "errors": 747},
{"value": 0.581000, "errors": 747},
{"value": 0.581500, "errors": 747},
{"value": 0.582000, "errors": 747},
{"value": 0.582500, "errors": 747},
{"value": 0.583000, "errors": 747},
{"value": 0.583500, "errors": 747},
{"value": 0.584000, "errors": 747},
{"value": 0.584500, "errors": 747},
{"value": 0.585000, "errors": 747},
{"value": 0.585500, "errors": 747},
{"value": 0.586000, "errors": 747},
{"value": 0.586500, "errors": 747},
{"value": 0.587000, "errors": 747},
{"value": 0.587500, "errors": 747},
{"value": 0.588000, "errors": 747},
{"value": 0.588500, "errors": 747},
{"value": 0.589000, "errors": 747},
{"value": 0.589500, "errors": 747},
{"value": 0.590000, "errors": 747},
{"value": 0.590500, "errors": 747},
{"value": 0.591000, "errors": 747},
{"value": 0.591500, "errors": 747},
{"value": 0.592000, "errors": 747},
{"value": 0.592500, "errors": 747},
{"value": 0.593000, "errors": 747},
{"value": 0.593500, "errors": 747},
{"value": 0.594000, "errors": 747},
{"value": 0.594500, "errors": 747},
{"value": 0.595000, "errors": 747},
{"value": 0.595500, "errors": 747},
{"value": 0.596000, "errors": 747},
{"value": 0.596500, "errors": 747},
{"value": 0.597000, "errors": 747},
{"value": 0.597500, "errors": 747},
{"value": 0.598000, "errors": 747},
{"value": 0.598500, "errors": 747},
{"value": 0.599000, "errors": 747},
{"value": 0.599500, "errors": 747},
{"value": 0.600000, "errors": 747},
{"value": 0.600500, "errors": 747},
{"value": 0.601000, "errors": 747},
{"value": 0.601500, "errors": 747},
{"value": 0.602000, "errors": 747},
{"value": 0.602500, "errors": 747},
{"value": 0.603000, "errors": 747},
{"value": 0.603500, "errors": 747},
{"value": 0.604000, "errors": 747},
{"value": 0.604500, "errors": 747},
{"value": 0.605000, "errors": 747},
{"value": 0.605500, "errors": 747},
{"value": 0.606000, "errors": 747},
{"value": 0.606500, "errors": 747},
{"value": 0.607000, "errors": 747},
{"value": 0.607500, "errors": 747},
{"value": 0.608000, "errors": 747},
{"value": 0.608500, "errors": 747},
{"value": 0.609000, "errors": 747},
{"value": 0.609500, "errors": 747},
{"value": 0.610000, "errors": 747},
{"value": 0.610500, "errors": 747},
{"value": 0.611000, "errors": 747},
{"value": 0.611500, "errors": 747},
{"value": 0.612000, "errors": 747},
{"value": 0.612500, "errors": 747},
{"value": 0.613000, "errors": 747},
{"value": 0.613500, "errors": 747},
{"value": 0.614000, "errors": 747},
{"value": 0.614500, "errors": 747},
{"value": 0.615000, "errors": 747},
{"value": 0.615500, "errors": 747},
{"value": 0.616000, "errors": 747},
{"value": 0.616500, "errors": 747},
{"value": 0.617000, "errors": 747},
{"value": 0.617500, "errors": 747},
{"value": 0.618000, "errors": 747},
{"value": 0.618500, "errors": 747},
{"value": 0.619000, "errors": 747},
{"value": 0.619500, "errors": 747},
{"value": 0.620000, "errors": 747},
{"value": 0.620500, "errors": 747},
{"value": 0.621000, "errors": 747},
{"value": 0.621500, "errors": 747},
{"value": 0.622000, "errors": 747},
{"value": 0.622500, "errors": 747},
{"value": 0.623000, "errors": 747},
{"value": 0.623500, "errors": 747},
{"value": 0.624000, "errors": 747},
{"value": 0.624500, "errors": 747},
{"value": 0.625000, "errors": 747},
{"value": 0.625500, "errors": 747},
{"value": 0.626000, "errors": 747},
{"value": 0.626500, "errors": 747},
{"value": 0.627000, "errors": 747},
{"value": 0.627500, "errors": 747},
{"value": 0.628000, "errors": 747},
{"value": 0.628500, "errors": 747},
{"value": 0.629000, "errors": 747},
{"value": 0.629500, "errors": 747},
{"value": 0.630000, "errors": 747},
{"value": 0.630500, "errors": 747},
{"value": 0.631000, "errors": 747},
{"value": 0.631500, "errors": 747},
{"value": 0.632000, "errors": 747},
{"value": 0.632500, "errors": 747},
{"value": 0.633000, "errors": 747},
{"value": 0.633500, "errors": 747},
{"value": 0.634000, "errors": 747},
{"value": 0.634500, "errors": 747},
{"value": 0.635000, "errors": 747},
{"value": 0.635500, "errors": 747},
{"value": 0.636000, "errors": 747},
{"value": 0.636500, "errors": 747},
{"value": 0.637000, "errors": 747},
{"value": 0.637500, "errors": 747},
{"value": 0.638000, "errors": 747},
{"value": 0.638500, "errors": 747},
{"value": 0.639000, "errors": 747},
{"value": 0.639500, "errors": 747},
{"value": 0.640000, "errors": 747},
{"value": 0.640500, "errors": 747},
{"value": 0.641000, "errors": 747},
{"value": 0.641500, "errors": 747},
{"value": 0.642000, "errors": 747},
{"value": 0.642500, "errors": 747},
{"value": 0.643000, "errors": 747},
{"value": 0.643500, "errors": 747},
{"value": 0.644000, "errors": 747},
{"value": 0.644500, "errors": 747},
{"value": 0.645000, "errors": 747},
{"value": 0.645500, "errors": 747},
{"value": 0.646000, "errors": 747},
{"value": 0.646500, "errors": 747},
{"value": 0.647000, "errors": 747},
{"value": 0.647500, "errors": 747},
{"value": 0.648000, "errors": 747},
{"value": 0.648500, "errors": 747},
{"value": 0.649000, "errors": 747},
{"value": 0.649500, "errors": 747},
{"value": 0.650000, "errors": 747},
{"value": 0.650500, "errors": 747},
{"value": 0.651000, "errors": 747},
{"value": 0.651500, "errors": 747},
{"value": 0.652000, "errors": 747},
{"value": 0.652500, "errors": 747},
{"value": 0.653000, "errors": 747},
{"value": 0.653500, "errors": 747},
{"value": 0.654000, "errors": 747},
{"value": 0.654500, "errors": 747},
{"value": 0.655000, "errors": 747},
{"value": 0.655500, "errors": 747},
{"value": 0.656000, "errors": 747},
{"value": 0.656500, "errors": 747},
{"value": 0.657000, "errors": 747},
{"value": 0.657500, "errors": 747},
{"value": 0.658000, "errors": 747},
{"value": 0.658500, "errors": 747},
{"value": 0.659000, "errors": 747},
{"value": 0.659500, "errors": 747},
{"value": 0.660000, "errors": 747},
{"value": 0.660500, "errors": 747},
{"value": 0.661000, "errors": 747},
{"value": 0.661500, "errors": 747},
{"value": 0.662000, "errors": 747},
{"value": 0.662500, "errors": 747},
{"value": 0.663000, "errors": 747},
{"value": 0.663500, "errors": 747},
{"value": 0.664000, "errors": 747},
{"value": 0.664500, "errors": 747},
{"value": 0.665000, "errors": 747},
{"value": 0.665500, "errors": 747},
{"value": 0.666000, "errors": 747},
{"value": 0.666500, "errors": 747},
{"value": 0.667000, "errors": 747},
{"value": 0.667500, "errors": 747},
{"value": 0.668000, "errors": 747},
{"value": 0.668500, "errors": 747},
{"value": 0.669000, "errors": 747},
{"value": 0.669500, "errors": 747},
{"value": 0.670000, "errors": 747},
{"value": 0.670500, "errors": 747},
{"value": 0.671000, "errors": 747},
{"value": 0.671500, "errors": 747},
{"value": 0.672000, "errors": 747},
{"value": 0.672500, "errors": 747},
{"value": 0.673000, "errors": 747},
{"value": 0.673500, "errors": 747},
{"value": 0.674000, "errors": 747},
{"value": 0.674500, "errors": 747},
{"value": 0.675000, "errors": 747},
{"value": 0.675500, "errors": 747},
{"value": 0.676000, "errors": 747},
{"value": 0.676500, "errors": 747},
{"value": 0.677000, "errors": 747},
{"value": 0.677500, "errors": 747},
{"value": 0.678000, "errors": 747},
{"value": 0.678500, "errors": 747},
{"value": 0.679000, "errors": 747},
{"value": 0.679500, "errors": 747},
{"value": 0.680000, "errors": 747},
{"value": 0.680500, "errors": 747},
{"value": 0.681000, "errors": 747},
{"value": 0.681500, "errors": 747},
{"value": 0.682000, "errors": 747},
{"value": 0.682500, "errors": 747},
{"value": 0.683000, "errors": 747},
{"value": 0.683500, "errors": 747},
{"value": 0.684000, "errors": 747},
{"value": 0.684500, "errors": 747},
{"value": 0.685000, "errors": 747},
{"value": 0.685500, "errors": 747},
{"value": 0.686000, "errors": 747},
{"value": 0.686500, "errors": 747},
{"value": 0.687000, "errors": 747},
{"value": 0.687500, "errors": 747},
{"value": 0.688000, "errors": 747},
{"value": 0.688500, "errors": 747},
{"value": 0.689000, "errors": 747},
{"value": 0.689500, "errors": 747},
{"value": 0.690000, "errors": 747},
{"value": 0.690500, "errors": 747},
{"value": 0.691000, "errors": 747},
{"value": 0.691500, "errors": 747},
{"value": 0.692000, "errors": 747},
{"value": 0.692500, "errors": 747},
{"value": 0.693000, "errors": 747},
{"value": 0.693500, "errors": 747},
{"value": 0.694000, "errors": 747},
{"value": 0.694500, "errors": 747},
{"value": 0.695000, "errors": 747},
{"value": 0.695500, "errors": 747},
{"value": 0.696000, "errors": 747},
{"value": 0.696500, "errors": 747},
{"value": 0.697000, "errors": 747},
{"value": 0.697500, "errors": 747},
{"value": 0.698000, "errors": 747},
{"value": 0.698500, "errors": 747},
{"value": 0.699000, "errors": 747},
{"value": 0.699500, "errors": 747},
{"value": 0.700000, "errors": 747},
]
x = []
y = []
for value in data:
    x.append(value["value"])
    y.append(value["errors"])
from pandas import *
d = {"x": x, "y": y}
df = DataFrame(d)
import matplotlib.pyplot as plt
from pandas.tools.rplot import *
plt.plot(x, y, 'ro')
plt.ylabel('errors')
plt.xlabel('threshold_p_for_first_filter_separator_character')
plt.title('threshold_p_for_first_filter_separator_character vs errors count')
polynomial = Polynomial(x, y, 4)
new_x = []
new_y = []
current_x = 0.
while current_x < 0.62:
    new_x.append(current_x)
    new_y.append(polynomial.getval(current_x))
    current_x += 0.00005
plt.plot(new_x, new_y, 'ro')
print (polynomial.getval(0.)) | 
	mit | 
| 
	vivekmishra1991/scikit-learn | 
	sklearn/metrics/classification.py | 
	95 | 
	67713 | 
	"""Metrics to assess performance on classification task given classe prediction
Functions named as ``*_score`` return a scalar value to maximize: the higher
the better
Function named as ``*_error`` or ``*_loss`` return a scalar value to minimize:
the lower the better
"""
# Authors: Alexandre Gramfort <[email protected]>
#          Mathieu Blondel <[email protected]>
#          Olivier Grisel <[email protected]>
#          Arnaud Joly <[email protected]>
#          Jochen Wersdorfer <[email protected]>
#          Lars Buitinck <[email protected]>
#          Joel Nothman <[email protected]>
#          Noel Dawe <[email protected]>
#          Jatin Shah <[email protected]>
#          Saurabh Jha <[email protected]>
# License: BSD 3 clause
from __future__ import division
import warnings
import numpy as np
from scipy.sparse import coo_matrix
from scipy.sparse import csr_matrix
from scipy.spatial.distance import hamming as sp_hamming
from ..preprocessing import LabelBinarizer, label_binarize
from ..preprocessing import LabelEncoder
from ..utils import check_array
from ..utils import check_consistent_length
from ..utils import column_or_1d
from ..utils.multiclass import unique_labels
from ..utils.multiclass import type_of_target
from ..utils.validation import _num_samples
from ..utils.sparsefuncs import count_nonzero
from ..utils.fixes import bincount
from .base import UndefinedMetricWarning
def _check_targets(y_true, y_pred):
    """Check that y_true and y_pred belong to the same classification task
    This converts multiclass or binary types to a common shape, and raises a
    ValueError for a mix of multilabel and multiclass targets, a mix of
    multilabel formats, for the presence of continuous-valued or multioutput
    targets, or for targets of different lengths.
    Column vectors are squeezed to 1d, while multilabel formats are returned
    as CSR sparse label indicators.
    Parameters
    ----------
    y_true : array-like
    y_pred : array-like
    Returns
    -------
    type_true : one of {'multilabel-indicator', 'multiclass', 'binary'}
        The type of the true target data, as output by
        ``utils.multiclass.type_of_target``
    y_true : array or indicator matrix
    y_pred : array or indicator matrix
    """
    check_consistent_length(y_true, y_pred)
    type_true = type_of_target(y_true)
    type_pred = type_of_target(y_pred)
    y_type = set([type_true, type_pred])
    if y_type == set(["binary", "multiclass"]):
        y_type = set(["multiclass"])
    if len(y_type) > 1:
        raise ValueError("Can't handle mix of {0} and {1}"
                         "".format(type_true, type_pred))
    # We can't have more than one value on y_type => The set is no more needed
    y_type = y_type.pop()
    # No metrics support "multiclass-multioutput" format
    if (y_type not in ["binary", "multiclass", "multilabel-indicator"]):
        raise ValueError("{0} is not supported".format(y_type))
    if y_type in ["binary", "multiclass"]:
        y_true = column_or_1d(y_true)
        y_pred = column_or_1d(y_pred)
    if y_type.startswith('multilabel'):
        y_true = csr_matrix(y_true)
        y_pred = csr_matrix(y_pred)
        y_type = 'multilabel-indicator'
    return y_type, y_true, y_pred
def _weighted_sum(sample_score, sample_weight, normalize=False):
    if normalize:
        return np.average(sample_score, weights=sample_weight)
    elif sample_weight is not None:
        return np.dot(sample_score, sample_weight)
    else:
        return sample_score.sum()
def accuracy_score(y_true, y_pred, normalize=True, sample_weight=None):
    """Accuracy classification score.
    In multilabel classification, this function computes subset accuracy:
    the set of labels predicted for a sample must *exactly* match the
    corresponding set of labels in y_true.
    Read more in the :ref:`User Guide <accuracy_score>`.
    Parameters
    ----------
    y_true : 1d array-like, or label indicator array / sparse matrix
        Ground truth (correct) labels.
    y_pred : 1d array-like, or label indicator array / sparse matrix
        Predicted labels, as returned by a classifier.
    normalize : bool, optional (default=True)
        If ``False``, return the number of correctly classified samples.
        Otherwise, return the fraction of correctly classified samples.
    sample_weight : array-like of shape = [n_samples], optional
        Sample weights.
    Returns
    -------
    score : float
        If ``normalize == True``, return the correctly classified samples
        (float), else it returns the number of correctly classified samples
        (int).
        The best performance is 1 with ``normalize == True`` and the number
        of samples with ``normalize == False``.
    See also
    --------
    jaccard_similarity_score, hamming_loss, zero_one_loss
    Notes
    -----
    In binary and multiclass classification, this function is equal
    to the ``jaccard_similarity_score`` function.
    Examples
    --------
    >>> import numpy as np
    >>> from sklearn.metrics import accuracy_score
    >>> y_pred = [0, 2, 1, 3]
    >>> y_true = [0, 1, 2, 3]
    >>> accuracy_score(y_true, y_pred)
    0.5
    >>> accuracy_score(y_true, y_pred, normalize=False)
    2
    In the multilabel case with binary label indicators:
    >>> accuracy_score(np.array([[0, 1], [1, 1]]), np.ones((2, 2)))
    0.5
    """
    # Compute accuracy for each possible representation
    y_type, y_true, y_pred = _check_targets(y_true, y_pred)
    if y_type.startswith('multilabel'):
        differing_labels = count_nonzero(y_true - y_pred, axis=1)
        score = differing_labels == 0
    else:
        score = y_true == y_pred
    return _weighted_sum(score, sample_weight, normalize)
def confusion_matrix(y_true, y_pred, labels=None):
    """Compute confusion matrix to evaluate the accuracy of a classification
    By definition a confusion matrix :math:`C` is such that :math:`C_{i, j}`
    is equal to the number of observations known to be in group :math:`i` but
    predicted to be in group :math:`j`.
    Read more in the :ref:`User Guide <confusion_matrix>`.
    Parameters
    ----------
    y_true : array, shape = [n_samples]
        Ground truth (correct) target values.
    y_pred : array, shape = [n_samples]
        Estimated targets as returned by a classifier.
    labels : array, shape = [n_classes], optional
        List of labels to index the matrix. This may be used to reorder
        or select a subset of labels.
        If none is given, those that appear at least once
        in ``y_true`` or ``y_pred`` are used in sorted order.
    Returns
    -------
    C : array, shape = [n_classes, n_classes]
        Confusion matrix
    References
    ----------
    .. [1] `Wikipedia entry for the Confusion matrix
           <http://en.wikipedia.org/wiki/Confusion_matrix>`_
    Examples
    --------
    >>> from sklearn.metrics import confusion_matrix
    >>> y_true = [2, 0, 2, 2, 0, 1]
    >>> y_pred = [0, 0, 2, 2, 0, 2]
    >>> confusion_matrix(y_true, y_pred)
    array([[2, 0, 0],
           [0, 0, 1],
           [1, 0, 2]])
    >>> y_true = ["cat", "ant", "cat", "cat", "ant", "bird"]
    >>> y_pred = ["ant", "ant", "cat", "cat", "ant", "cat"]
    >>> confusion_matrix(y_true, y_pred, labels=["ant", "bird", "cat"])
    array([[2, 0, 0],
           [0, 0, 1],
           [1, 0, 2]])
    """
    y_type, y_true, y_pred = _check_targets(y_true, y_pred)
    if y_type not in ("binary", "multiclass"):
        raise ValueError("%s is not supported" % y_type)
    if labels is None:
        labels = unique_labels(y_true, y_pred)
    else:
        labels = np.asarray(labels)
    n_labels = labels.size
    label_to_ind = dict((y, x) for x, y in enumerate(labels))
    # convert yt, yp into index
    y_pred = np.array([label_to_ind.get(x, n_labels + 1) for x in y_pred])
    y_true = np.array([label_to_ind.get(x, n_labels + 1) for x in y_true])
    # intersect y_pred, y_true with labels, eliminate items not in labels
    ind = np.logical_and(y_pred < n_labels, y_true < n_labels)
    y_pred = y_pred[ind]
    y_true = y_true[ind]
    CM = coo_matrix((np.ones(y_true.shape[0], dtype=np.int), (y_true, y_pred)),
                    shape=(n_labels, n_labels)
                    ).toarray()
    return CM
def cohen_kappa_score(y1, y2, labels=None):
    """Cohen's kappa: a statistic that measures inter-annotator agreement.
    This function computes Cohen's kappa [1], a score that expresses the level
    of agreement between two annotators on a classification problem. It is
    defined as
    .. math::
        \kappa = (p_o - p_e) / (1 - p_e)
    where :math:`p_o` is the empirical probability of agreement on the label
    assigned to any sample (the observed agreement ratio), and :math:`p_e` is
    the expected agreement when both annotators assign labels randomly.
    :math:`p_e` is estimated using a per-annotator empirical prior over the
    class labels [2].
    Parameters
    ----------
    y1 : array, shape = [n_samples]
        Labels assigned by the first annotator.
    y2 : array, shape = [n_samples]
        Labels assigned by the second annotator. The kappa statistic is
        symmetric, so swapping ``y1`` and ``y2`` doesn't change the value.
    labels : array, shape = [n_classes], optional
        List of labels to index the matrix. This may be used to select a
        subset of labels. If None, all labels that appear at least once in
        ``y1`` or ``y2`` are used.
    Returns
    -------
    kappa : float
        The kappa statistic, which is a number between -1 and 1. The maximum
        value means complete agreement; zero or lower means chance agreement.
    References
    ----------
    .. [1] J. Cohen (1960). "A coefficient of agreement for nominal scales".
           Educational and Psychological Measurement 20(1):37-46.
           doi:10.1177/001316446002000104.
    .. [2] R. Artstein and M. Poesio (2008). "Inter-coder agreement for
           computational linguistics". Computational Linguistic 34(4):555-596.
    """
    confusion = confusion_matrix(y1, y2, labels=labels)
    P = confusion / float(confusion.sum())
    p_observed = np.trace(P)
    p_expected = np.dot(P.sum(axis=0), P.sum(axis=1))
    return (p_observed - p_expected) / (1 - p_expected)
def jaccard_similarity_score(y_true, y_pred, normalize=True,
                             sample_weight=None):
    """Jaccard similarity coefficient score
    The Jaccard index [1], or Jaccard similarity coefficient, defined as
    the size of the intersection divided by the size of the union of two label
    sets, is used to compare set of predicted labels for a sample to the
    corresponding set of labels in ``y_true``.
    Read more in the :ref:`User Guide <jaccard_similarity_score>`.
    Parameters
    ----------
    y_true : 1d array-like, or label indicator array / sparse matrix
        Ground truth (correct) labels.
    y_pred : 1d array-like, or label indicator array / sparse matrix
        Predicted labels, as returned by a classifier.
    normalize : bool, optional (default=True)
        If ``False``, return the sum of the Jaccard similarity coefficient
        over the sample set. Otherwise, return the average of Jaccard
        similarity coefficient.
    sample_weight : array-like of shape = [n_samples], optional
        Sample weights.
    Returns
    -------
    score : float
        If ``normalize == True``, return the average Jaccard similarity
        coefficient, else it returns the sum of the Jaccard similarity
        coefficient over the sample set.
        The best performance is 1 with ``normalize == True`` and the number
        of samples with ``normalize == False``.
    See also
    --------
    accuracy_score, hamming_loss, zero_one_loss
    Notes
    -----
    In binary and multiclass classification, this function is equivalent
    to the ``accuracy_score``. It differs in the multilabel classification
    problem.
    References
    ----------
    .. [1] `Wikipedia entry for the Jaccard index
           <http://en.wikipedia.org/wiki/Jaccard_index>`_
    Examples
    --------
    >>> import numpy as np
    >>> from sklearn.metrics import jaccard_similarity_score
    >>> y_pred = [0, 2, 1, 3]
    >>> y_true = [0, 1, 2, 3]
    >>> jaccard_similarity_score(y_true, y_pred)
    0.5
    >>> jaccard_similarity_score(y_true, y_pred, normalize=False)
    2
    In the multilabel case with binary label indicators:
    >>> jaccard_similarity_score(np.array([[0, 1], [1, 1]]),\
        np.ones((2, 2)))
    0.75
    """
    # Compute accuracy for each possible representation
    y_type, y_true, y_pred = _check_targets(y_true, y_pred)
    if y_type.startswith('multilabel'):
        with np.errstate(divide='ignore', invalid='ignore'):
            # oddly, we may get an "invalid" rather than a "divide" error here
            pred_or_true = count_nonzero(y_true + y_pred, axis=1)
            pred_and_true = count_nonzero(y_true.multiply(y_pred), axis=1)
            score = pred_and_true / pred_or_true
            # If there is no label, it results in a Nan instead, we set
            # the jaccard to 1: lim_{x->0} x/x = 1
            # Note with py2.6 and np 1.3: we can't check safely for nan.
            score[pred_or_true == 0.0] = 1.0
    else:
        score = y_true == y_pred
    return _weighted_sum(score, sample_weight, normalize)
def matthews_corrcoef(y_true, y_pred):
    """Compute the Matthews correlation coefficient (MCC) for binary classes
    The Matthews correlation coefficient is used in machine learning as a
    measure of the quality of binary (two-class) classifications. It takes into
    account true and false positives and negatives and is generally regarded as
    a balanced measure which can be used even if the classes are of very
    different sizes. The MCC is in essence a correlation coefficient value
    between -1 and +1. A coefficient of +1 represents a perfect prediction, 0
    an average random prediction and -1 an inverse prediction.  The statistic
    is also known as the phi coefficient. [source: Wikipedia]
    Only in the binary case does this relate to information about true and
    false positives and negatives. See references below.
    Read more in the :ref:`User Guide <matthews_corrcoef>`.
    Parameters
    ----------
    y_true : array, shape = [n_samples]
        Ground truth (correct) target values.
    y_pred : array, shape = [n_samples]
        Estimated targets as returned by a classifier.
    Returns
    -------
    mcc : float
        The Matthews correlation coefficient (+1 represents a perfect
        prediction, 0 an average random prediction and -1 and inverse
        prediction).
    References
    ----------
    .. [1] `Baldi, Brunak, Chauvin, Andersen and Nielsen, (2000). Assessing the
       accuracy of prediction algorithms for classification: an overview
       <http://dx.doi.org/10.1093/bioinformatics/16.5.412>`_
    .. [2] `Wikipedia entry for the Matthews Correlation Coefficient
       <http://en.wikipedia.org/wiki/Matthews_correlation_coefficient>`_
    Examples
    --------
    >>> from sklearn.metrics import matthews_corrcoef
    >>> y_true = [+1, +1, +1, -1]
    >>> y_pred = [+1, -1, +1, +1]
    >>> matthews_corrcoef(y_true, y_pred)  # doctest: +ELLIPSIS
    -0.33...
    """
    y_type, y_true, y_pred = _check_targets(y_true, y_pred)
    if y_type != "binary":
        raise ValueError("%s is not supported" % y_type)
    lb = LabelEncoder()
    lb.fit(np.hstack([y_true, y_pred]))
    y_true = lb.transform(y_true)
    y_pred = lb.transform(y_pred)
    with np.errstate(invalid='ignore'):
        mcc = np.corrcoef(y_true, y_pred)[0, 1]
    if np.isnan(mcc):
        return 0.
    else:
        return mcc
def zero_one_loss(y_true, y_pred, normalize=True, sample_weight=None):
    """Zero-one classification loss.
    If normalize is ``True``, return the fraction of misclassifications
    (float), else it returns the number of misclassifications (int). The best
    performance is 0.
    Read more in the :ref:`User Guide <zero_one_loss>`.
    Parameters
    ----------
    y_true : 1d array-like, or label indicator array / sparse matrix
        Ground truth (correct) labels.
    y_pred : 1d array-like, or label indicator array / sparse matrix
        Predicted labels, as returned by a classifier.
    normalize : bool, optional (default=True)
        If ``False``, return the number of misclassifications.
        Otherwise, return the fraction of misclassifications.
    sample_weight : array-like of shape = [n_samples], optional
        Sample weights.
    Returns
    -------
    loss : float or int,
        If ``normalize == True``, return the fraction of misclassifications
        (float), else it returns the number of misclassifications (int).
    Notes
    -----
    In multilabel classification, the zero_one_loss function corresponds to
    the subset zero-one loss: for each sample, the entire set of labels must be
    correctly predicted, otherwise the loss for that sample is equal to one.
    See also
    --------
    accuracy_score, hamming_loss, jaccard_similarity_score
    Examples
    --------
    >>> from sklearn.metrics import zero_one_loss
    >>> y_pred = [1, 2, 3, 4]
    >>> y_true = [2, 2, 3, 4]
    >>> zero_one_loss(y_true, y_pred)
    0.25
    >>> zero_one_loss(y_true, y_pred, normalize=False)
    1
    In the multilabel case with binary label indicators:
    >>> zero_one_loss(np.array([[0, 1], [1, 1]]), np.ones((2, 2)))
    0.5
    """
    score = accuracy_score(y_true, y_pred,
                           normalize=normalize,
                           sample_weight=sample_weight)
    if normalize:
        return 1 - score
    else:
        if sample_weight is not None:
            n_samples = np.sum(sample_weight)
        else:
            n_samples = _num_samples(y_true)
        return n_samples - score
def f1_score(y_true, y_pred, labels=None, pos_label=1, average='binary',
             sample_weight=None):
    """Compute the F1 score, also known as balanced F-score or F-measure
    The F1 score can be interpreted as a weighted average of the precision and
    recall, where an F1 score reaches its best value at 1 and worst score at 0.
    The relative contribution of precision and recall to the F1 score are
    equal. The formula for the F1 score is::
        F1 = 2 * (precision * recall) / (precision + recall)
    In the multi-class and multi-label case, this is the weighted average of
    the F1 score of each class.
    Read more in the :ref:`User Guide <precision_recall_f_measure_metrics>`.
    Parameters
    ----------
    y_true : 1d array-like, or label indicator array / sparse matrix
        Ground truth (correct) target values.
    y_pred : 1d array-like, or label indicator array / sparse matrix
        Estimated targets as returned by a classifier.
    labels : list, optional
        The set of labels to include when ``average != 'binary'``, and their
        order if ``average is None``. Labels present in the data can be
        excluded, for example to calculate a multiclass average ignoring a
        majority negative class, while labels not present in the data will
        result in 0 components in a macro average. For multilabel targets,
        labels are column indices. By default, all labels in ``y_true`` and
        ``y_pred`` are used in sorted order.
    pos_label : str or int, 1 by default
        The class to report if ``average='binary'``. Until version 0.18 it is
        necessary to set ``pos_label=None`` if seeking to use another averaging
        method over binary targets.
    average : string, [None, 'binary' (default), 'micro', 'macro', 'samples', \
                       'weighted']
        This parameter is required for multiclass/multilabel targets.
        If ``None``, the scores for each class are returned. Otherwise, this
        determines the type of averaging performed on the data:
        ``'binary'``:
            Only report results for the class specified by ``pos_label``.
            This is applicable only if targets (``y_{true,pred}``) are binary.
        ``'micro'``:
            Calculate metrics globally by counting the total true positives,
            false negatives and false positives.
        ``'macro'``:
            Calculate metrics for each label, and find their unweighted
            mean.  This does not take label imbalance into account.
        ``'weighted'``:
            Calculate metrics for each label, and find their average, weighted
            by support (the number of true instances for each label). This
            alters 'macro' to account for label imbalance; it can result in an
            F-score that is not between precision and recall.
        ``'samples'``:
            Calculate metrics for each instance, and find their average (only
            meaningful for multilabel classification where this differs from
            :func:`accuracy_score`).
        Note that if ``pos_label`` is given in binary classification with
        `average != 'binary'`, only that positive class is reported. This
        behavior is deprecated and will change in version 0.18.
    sample_weight : array-like of shape = [n_samples], optional
        Sample weights.
    Returns
    -------
    f1_score : float or array of float, shape = [n_unique_labels]
        F1 score of the positive class in binary classification or weighted
        average of the F1 scores of each class for the multiclass task.
    References
    ----------
    .. [1] `Wikipedia entry for the F1-score
           <http://en.wikipedia.org/wiki/F1_score>`_
    Examples
    --------
    >>> from sklearn.metrics import f1_score
    >>> y_true = [0, 1, 2, 0, 1, 2]
    >>> y_pred = [0, 2, 1, 0, 0, 1]
    >>> f1_score(y_true, y_pred, average='macro')  # doctest: +ELLIPSIS
    0.26...
    >>> f1_score(y_true, y_pred, average='micro')  # doctest: +ELLIPSIS
    0.33...
    >>> f1_score(y_true, y_pred, average='weighted')  # doctest: +ELLIPSIS
    0.26...
    >>> f1_score(y_true, y_pred, average=None)
    array([ 0.8,  0. ,  0. ])
    """
    return fbeta_score(y_true, y_pred, 1, labels=labels,
                       pos_label=pos_label, average=average,
                       sample_weight=sample_weight)
def fbeta_score(y_true, y_pred, beta, labels=None, pos_label=1,
                average='binary', sample_weight=None):
    """Compute the F-beta score
    The F-beta score is the weighted harmonic mean of precision and recall,
    reaching its optimal value at 1 and its worst value at 0.
    The `beta` parameter determines the weight of precision in the combined
    score. ``beta < 1`` lends more weight to precision, while ``beta > 1``
    favors recall (``beta -> 0`` considers only precision, ``beta -> inf``
    only recall).
    Read more in the :ref:`User Guide <precision_recall_f_measure_metrics>`.
    Parameters
    ----------
    y_true : 1d array-like, or label indicator array / sparse matrix
        Ground truth (correct) target values.
    y_pred : 1d array-like, or label indicator array / sparse matrix
        Estimated targets as returned by a classifier.
    beta: float
        Weight of precision in harmonic mean.
    labels : list, optional
        The set of labels to include when ``average != 'binary'``, and their
        order if ``average is None``. Labels present in the data can be
        excluded, for example to calculate a multiclass average ignoring a
        majority negative class, while labels not present in the data will
        result in 0 components in a macro average. For multilabel targets,
        labels are column indices. By default, all labels in ``y_true`` and
        ``y_pred`` are used in sorted order.
    pos_label : str or int, 1 by default
        The class to report if ``average='binary'``. Until version 0.18 it is
        necessary to set ``pos_label=None`` if seeking to use another averaging
        method over binary targets.
    average : string, [None, 'binary' (default), 'micro', 'macro', 'samples', \
                       'weighted']
        This parameter is required for multiclass/multilabel targets.
        If ``None``, the scores for each class are returned. Otherwise, this
        determines the type of averaging performed on the data:
        ``'binary'``:
            Only report results for the class specified by ``pos_label``.
            This is applicable only if targets (``y_{true,pred}``) are binary.
        ``'micro'``:
            Calculate metrics globally by counting the total true positives,
            false negatives and false positives.
        ``'macro'``:
            Calculate metrics for each label, and find their unweighted
            mean.  This does not take label imbalance into account.
        ``'weighted'``:
            Calculate metrics for each label, and find their average, weighted
            by support (the number of true instances for each label). This
            alters 'macro' to account for label imbalance; it can result in an
            F-score that is not between precision and recall.
        ``'samples'``:
            Calculate metrics for each instance, and find their average (only
            meaningful for multilabel classification where this differs from
            :func:`accuracy_score`).
        Note that if ``pos_label`` is given in binary classification with
        `average != 'binary'`, only that positive class is reported. This
        behavior is deprecated and will change in version 0.18.
    sample_weight : array-like of shape = [n_samples], optional
        Sample weights.
    Returns
    -------
    fbeta_score : float (if average is not None) or array of float, shape =\
        [n_unique_labels]
        F-beta score of the positive class in binary classification or weighted
        average of the F-beta score of each class for the multiclass task.
    References
    ----------
    .. [1] R. Baeza-Yates and B. Ribeiro-Neto (2011).
           Modern Information Retrieval. Addison Wesley, pp. 327-328.
    .. [2] `Wikipedia entry for the F1-score
           <http://en.wikipedia.org/wiki/F1_score>`_
    Examples
    --------
    >>> from sklearn.metrics import fbeta_score
    >>> y_true = [0, 1, 2, 0, 1, 2]
    >>> y_pred = [0, 2, 1, 0, 0, 1]
    >>> fbeta_score(y_true, y_pred, average='macro', beta=0.5)
    ... # doctest: +ELLIPSIS
    0.23...
    >>> fbeta_score(y_true, y_pred, average='micro', beta=0.5)
    ... # doctest: +ELLIPSIS
    0.33...
    >>> fbeta_score(y_true, y_pred, average='weighted', beta=0.5)
    ... # doctest: +ELLIPSIS
    0.23...
    >>> fbeta_score(y_true, y_pred, average=None, beta=0.5)
    ... # doctest: +ELLIPSIS
    array([ 0.71...,  0.        ,  0.        ])
    """
    _, _, f, _ = precision_recall_fscore_support(y_true, y_pred,
                                                 beta=beta,
                                                 labels=labels,
                                                 pos_label=pos_label,
                                                 average=average,
                                                 warn_for=('f-score',),
                                                 sample_weight=sample_weight)
    return f
def _prf_divide(numerator, denominator, metric, modifier, average, warn_for):
    """Performs division and handles divide-by-zero.
    On zero-division, sets the corresponding result elements to zero
    and raises a warning.
    The metric, modifier and average arguments are used only for determining
    an appropriate warning.
    """
    result = numerator / denominator
    mask = denominator == 0.0
    if not np.any(mask):
        return result
    # remove infs
    result[mask] = 0.0
    # build appropriate warning
    # E.g. "Precision and F-score are ill-defined and being set to 0.0 in
    # labels with no predicted samples"
    axis0 = 'sample'
    axis1 = 'label'
    if average == 'samples':
        axis0, axis1 = axis1, axis0
    if metric in warn_for and 'f-score' in warn_for:
        msg_start = '{0} and F-score are'.format(metric.title())
    elif metric in warn_for:
        msg_start = '{0} is'.format(metric.title())
    elif 'f-score' in warn_for:
        msg_start = 'F-score is'
    else:
        return result
    msg = ('{0} ill-defined and being set to 0.0 {{0}} '
           'no {1} {2}s.'.format(msg_start, modifier, axis0))
    if len(mask) == 1:
        msg = msg.format('due to')
    else:
        msg = msg.format('in {0}s with'.format(axis1))
    warnings.warn(msg, UndefinedMetricWarning, stacklevel=2)
    return result
def precision_recall_fscore_support(y_true, y_pred, beta=1.0, labels=None,
                                    pos_label=1, average=None,
                                    warn_for=('precision', 'recall',
                                              'f-score'),
                                    sample_weight=None):
    """Compute precision, recall, F-measure and support for each class
    The precision is the ratio ``tp / (tp + fp)`` where ``tp`` is the number of
    true positives and ``fp`` the number of false positives. The precision is
    intuitively the ability of the classifier not to label as positive a sample
    that is negative.
    The recall is the ratio ``tp / (tp + fn)`` where ``tp`` is the number of
    true positives and ``fn`` the number of false negatives. The recall is
    intuitively the ability of the classifier to find all the positive samples.
    The F-beta score can be interpreted as a weighted harmonic mean of
    the precision and recall, where an F-beta score reaches its best
    value at 1 and worst score at 0.
    The F-beta score weights recall more than precision by a factor of
    ``beta``. ``beta == 1.0`` means recall and precision are equally important.
    The support is the number of occurrences of each class in ``y_true``.
    If ``pos_label is None`` and in binary classification, this function
    returns the average precision, recall and F-measure if ``average``
    is one of ``'micro'``, ``'macro'``, ``'weighted'`` or ``'samples'``.
    Read more in the :ref:`User Guide <precision_recall_f_measure_metrics>`.
    Parameters
    ----------
    y_true : 1d array-like, or label indicator array / sparse matrix
        Ground truth (correct) target values.
    y_pred : 1d array-like, or label indicator array / sparse matrix
        Estimated targets as returned by a classifier.
    beta : float, 1.0 by default
        The strength of recall versus precision in the F-score.
    labels : list, optional
        The set of labels to include when ``average != 'binary'``, and their
        order if ``average is None``. Labels present in the data can be
        excluded, for example to calculate a multiclass average ignoring a
        majority negative class, while labels not present in the data will
        result in 0 components in a macro average. For multilabel targets,
        labels are column indices. By default, all labels in ``y_true`` and
        ``y_pred`` are used in sorted order.
    pos_label : str or int, 1 by default
        The class to report if ``average='binary'``. Until version 0.18 it is
        necessary to set ``pos_label=None`` if seeking to use another averaging
        method over binary targets.
    average : string, [None (default), 'binary', 'micro', 'macro', 'samples', \
                       'weighted']
        If ``None``, the scores for each class are returned. Otherwise, this
        determines the type of averaging performed on the data:
        ``'binary'``:
            Only report results for the class specified by ``pos_label``.
            This is applicable only if targets (``y_{true,pred}``) are binary.
        ``'micro'``:
            Calculate metrics globally by counting the total true positives,
            false negatives and false positives.
        ``'macro'``:
            Calculate metrics for each label, and find their unweighted
            mean.  This does not take label imbalance into account.
        ``'weighted'``:
            Calculate metrics for each label, and find their average, weighted
            by support (the number of true instances for each label). This
            alters 'macro' to account for label imbalance; it can result in an
            F-score that is not between precision and recall.
        ``'samples'``:
            Calculate metrics for each instance, and find their average (only
            meaningful for multilabel classification where this differs from
            :func:`accuracy_score`).
        Note that if ``pos_label`` is given in binary classification with
        `average != 'binary'`, only that positive class is reported. This
        behavior is deprecated and will change in version 0.18.
    warn_for : tuple or set, for internal use
        This determines which warnings will be made in the case that this
        function is being used to return only one of its metrics.
    sample_weight : array-like of shape = [n_samples], optional
        Sample weights.
    Returns
    -------
    precision: float (if average is not None) or array of float, shape =\
        [n_unique_labels]
    recall: float (if average is not None) or array of float, , shape =\
        [n_unique_labels]
    fbeta_score: float (if average is not None) or array of float, shape =\
        [n_unique_labels]
    support: int (if average is not None) or array of int, shape =\
        [n_unique_labels]
        The number of occurrences of each label in ``y_true``.
    References
    ----------
    .. [1] `Wikipedia entry for the Precision and recall
           <http://en.wikipedia.org/wiki/Precision_and_recall>`_
    .. [2] `Wikipedia entry for the F1-score
           <http://en.wikipedia.org/wiki/F1_score>`_
    .. [3] `Discriminative Methods for Multi-labeled Classification Advances
           in Knowledge Discovery and Data Mining (2004), pp. 22-30 by Shantanu
           Godbole, Sunita Sarawagi
           <http://www.godbole.net/shantanu/pubs/multilabelsvm-pakdd04.pdf>`
    Examples
    --------
    >>> from sklearn.metrics import precision_recall_fscore_support
    >>> y_true = np.array(['cat', 'dog', 'pig', 'cat', 'dog', 'pig'])
    >>> y_pred = np.array(['cat', 'pig', 'dog', 'cat', 'cat', 'dog'])
    >>> precision_recall_fscore_support(y_true, y_pred, average='macro')
    ... # doctest: +ELLIPSIS
    (0.22..., 0.33..., 0.26..., None)
    >>> precision_recall_fscore_support(y_true, y_pred, average='micro')
    ... # doctest: +ELLIPSIS
    (0.33..., 0.33..., 0.33..., None)
    >>> precision_recall_fscore_support(y_true, y_pred, average='weighted')
    ... # doctest: +ELLIPSIS
    (0.22..., 0.33..., 0.26..., None)
    It is possible to compute per-label precisions, recalls, F1-scores and
    supports instead of averaging:
    >>> precision_recall_fscore_support(y_true, y_pred, average=None,
    ... labels=['pig', 'dog', 'cat'])
    ... # doctest: +ELLIPSIS,+NORMALIZE_WHITESPACE
    (array([ 0. ,  0. ,  0.66...]),
     array([ 0.,  0.,  1.]),
     array([ 0. ,  0. ,  0.8]),
     array([2, 2, 2]))
    """
    average_options = (None, 'micro', 'macro', 'weighted', 'samples')
    if average not in average_options and average != 'binary':
        raise ValueError('average has to be one of ' +
                         str(average_options))
    if beta <= 0:
        raise ValueError("beta should be >0 in the F-beta score")
    y_type, y_true, y_pred = _check_targets(y_true, y_pred)
    present_labels = unique_labels(y_true, y_pred)
    if average == 'binary' and (y_type != 'binary' or pos_label is None):
        warnings.warn('The default `weighted` averaging is deprecated, '
                      'and from version 0.18, use of precision, recall or '
                      'F-score with multiclass or multilabel data or '
                      'pos_label=None will result in an exception. '
                      'Please set an explicit value for `average`, one of '
                      '%s. In cross validation use, for instance, '
                      'scoring="f1_weighted" instead of scoring="f1".'
                      % str(average_options), DeprecationWarning, stacklevel=2)
        average = 'weighted'
    if y_type == 'binary' and pos_label is not None and average is not None:
        if average != 'binary':
            warnings.warn('From version 0.18, binary input will not be '
                          'handled specially when using averaged '
                          'precision/recall/F-score. '
                          'Please use average=\'binary\' to report only the '
                          'positive class performance.', DeprecationWarning)
        if labels is None or len(labels) <= 2:
            if pos_label not in present_labels:
                if len(present_labels) < 2:
                    # Only negative labels
                    return (0., 0., 0., 0)
                else:
                    raise ValueError("pos_label=%r is not a valid label: %r" %
                                     (pos_label, present_labels))
            labels = [pos_label]
    if labels is None:
        labels = present_labels
        n_labels = None
    else:
        n_labels = len(labels)
        labels = np.hstack([labels, np.setdiff1d(present_labels, labels,
                                                 assume_unique=True)])
    ### Calculate tp_sum, pred_sum, true_sum ###
    if y_type.startswith('multilabel'):
        sum_axis = 1 if average == 'samples' else 0
        # All labels are index integers for multilabel.
        # Select labels:
        if not np.all(labels == present_labels):
            if np.max(labels) > np.max(present_labels):
                raise ValueError('All labels must be in [0, n labels). '
                                 'Got %d > %d' %
                                 (np.max(labels), np.max(present_labels)))
            if np.min(labels) < 0:
                raise ValueError('All labels must be in [0, n labels). '
                                 'Got %d < 0' % np.min(labels))
            y_true = y_true[:, labels[:n_labels]]
            y_pred = y_pred[:, labels[:n_labels]]
        # calculate weighted counts
        true_and_pred = y_true.multiply(y_pred)
        tp_sum = count_nonzero(true_and_pred, axis=sum_axis,
                               sample_weight=sample_weight)
        pred_sum = count_nonzero(y_pred, axis=sum_axis,
                                 sample_weight=sample_weight)
        true_sum = count_nonzero(y_true, axis=sum_axis,
                                 sample_weight=sample_weight)
    elif average == 'samples':
        raise ValueError("Sample-based precision, recall, fscore is "
                         "not meaningful outside multilabel "
                         "classification. See the accuracy_score instead.")
    else:
        le = LabelEncoder()
        le.fit(labels)
        y_true = le.transform(y_true)
        y_pred = le.transform(y_pred)
        sorted_labels = le.classes_
        # labels are now from 0 to len(labels) - 1 -> use bincount
        tp = y_true == y_pred
        tp_bins = y_true[tp]
        if sample_weight is not None:
            tp_bins_weights = np.asarray(sample_weight)[tp]
        else:
            tp_bins_weights = None
        if len(tp_bins):
            tp_sum = bincount(tp_bins, weights=tp_bins_weights,
                              minlength=len(labels))
        else:
            # Pathological case
            true_sum = pred_sum = tp_sum = np.zeros(len(labels))
        if len(y_pred):
            pred_sum = bincount(y_pred, weights=sample_weight,
                                minlength=len(labels))
        if len(y_true):
            true_sum = bincount(y_true, weights=sample_weight,
                                minlength=len(labels))
        # Retain only selected labels
        indices = np.searchsorted(sorted_labels, labels[:n_labels])
        tp_sum = tp_sum[indices]
        true_sum = true_sum[indices]
        pred_sum = pred_sum[indices]
    if average == 'micro':
        tp_sum = np.array([tp_sum.sum()])
        pred_sum = np.array([pred_sum.sum()])
        true_sum = np.array([true_sum.sum()])
    ### Finally, we have all our sufficient statistics. Divide! ###
    beta2 = beta ** 2
    with np.errstate(divide='ignore', invalid='ignore'):
        # Divide, and on zero-division, set scores to 0 and warn:
        # Oddly, we may get an "invalid" rather than a "divide" error
        # here.
        precision = _prf_divide(tp_sum, pred_sum,
                                'precision', 'predicted', average, warn_for)
        recall = _prf_divide(tp_sum, true_sum,
                             'recall', 'true', average, warn_for)
        # Don't need to warn for F: either P or R warned, or tp == 0 where pos
        # and true are nonzero, in which case, F is well-defined and zero
        f_score = ((1 + beta2) * precision * recall /
                   (beta2 * precision + recall))
        f_score[tp_sum == 0] = 0.0
    ## Average the results ##
    if average == 'weighted':
        weights = true_sum
        if weights.sum() == 0:
            return 0, 0, 0, None
    elif average == 'samples':
        weights = sample_weight
    else:
        weights = None
    if average is not None:
        assert average != 'binary' or len(precision) == 1
        precision = np.average(precision, weights=weights)
        recall = np.average(recall, weights=weights)
        f_score = np.average(f_score, weights=weights)
        true_sum = None  # return no support
    return precision, recall, f_score, true_sum
def precision_score(y_true, y_pred, labels=None, pos_label=1,
                    average='binary', sample_weight=None):
    """Compute the precision
    The precision is the ratio ``tp / (tp + fp)`` where ``tp`` is the number of
    true positives and ``fp`` the number of false positives. The precision is
    intuitively the ability of the classifier not to label as positive a sample
    that is negative.
    The best value is 1 and the worst value is 0.
    Read more in the :ref:`User Guide <precision_recall_f_measure_metrics>`.
    Parameters
    ----------
    y_true : 1d array-like, or label indicator array / sparse matrix
        Ground truth (correct) target values.
    y_pred : 1d array-like, or label indicator array / sparse matrix
        Estimated targets as returned by a classifier.
    labels : list, optional
        The set of labels to include when ``average != 'binary'``, and their
        order if ``average is None``. Labels present in the data can be
        excluded, for example to calculate a multiclass average ignoring a
        majority negative class, while labels not present in the data will
        result in 0 components in a macro average. For multilabel targets,
        labels are column indices. By default, all labels in ``y_true`` and
        ``y_pred`` are used in sorted order.
    pos_label : str or int, 1 by default
        The class to report if ``average='binary'``. Until version 0.18 it is
        necessary to set ``pos_label=None`` if seeking to use another averaging
        method over binary targets.
    average : string, [None, 'binary' (default), 'micro', 'macro', 'samples', \
                       'weighted']
        This parameter is required for multiclass/multilabel targets.
        If ``None``, the scores for each class are returned. Otherwise, this
        determines the type of averaging performed on the data:
        ``'binary'``:
            Only report results for the class specified by ``pos_label``.
            This is applicable only if targets (``y_{true,pred}``) are binary.
        ``'micro'``:
            Calculate metrics globally by counting the total true positives,
            false negatives and false positives.
        ``'macro'``:
            Calculate metrics for each label, and find their unweighted
            mean.  This does not take label imbalance into account.
        ``'weighted'``:
            Calculate metrics for each label, and find their average, weighted
            by support (the number of true instances for each label). This
            alters 'macro' to account for label imbalance; it can result in an
            F-score that is not between precision and recall.
        ``'samples'``:
            Calculate metrics for each instance, and find their average (only
            meaningful for multilabel classification where this differs from
            :func:`accuracy_score`).
        Note that if ``pos_label`` is given in binary classification with
        `average != 'binary'`, only that positive class is reported. This
        behavior is deprecated and will change in version 0.18.
    sample_weight : array-like of shape = [n_samples], optional
        Sample weights.
    Returns
    -------
    precision : float (if average is not None) or array of float, shape =\
        [n_unique_labels]
        Precision of the positive class in binary classification or weighted
        average of the precision of each class for the multiclass task.
    Examples
    --------
    >>> from sklearn.metrics import precision_score
    >>> y_true = [0, 1, 2, 0, 1, 2]
    >>> y_pred = [0, 2, 1, 0, 0, 1]
    >>> precision_score(y_true, y_pred, average='macro')  # doctest: +ELLIPSIS
    0.22...
    >>> precision_score(y_true, y_pred, average='micro')  # doctest: +ELLIPSIS
    0.33...
    >>> precision_score(y_true, y_pred, average='weighted')
    ... # doctest: +ELLIPSIS
    0.22...
    >>> precision_score(y_true, y_pred, average=None)  # doctest: +ELLIPSIS
    array([ 0.66...,  0.        ,  0.        ])
    """
    p, _, _, _ = precision_recall_fscore_support(y_true, y_pred,
                                                 labels=labels,
                                                 pos_label=pos_label,
                                                 average=average,
                                                 warn_for=('precision',),
                                                 sample_weight=sample_weight)
    return p
def recall_score(y_true, y_pred, labels=None, pos_label=1, average='binary',
                 sample_weight=None):
    """Compute the recall
    The recall is the ratio ``tp / (tp + fn)`` where ``tp`` is the number of
    true positives and ``fn`` the number of false negatives. The recall is
    intuitively the ability of the classifier to find all the positive samples.
    The best value is 1 and the worst value is 0.
    Read more in the :ref:`User Guide <precision_recall_f_measure_metrics>`.
    Parameters
    ----------
    y_true : 1d array-like, or label indicator array / sparse matrix
        Ground truth (correct) target values.
    y_pred : 1d array-like, or label indicator array / sparse matrix
        Estimated targets as returned by a classifier.
    labels : list, optional
        The set of labels to include when ``average != 'binary'``, and their
        order if ``average is None``. Labels present in the data can be
        excluded, for example to calculate a multiclass average ignoring a
        majority negative class, while labels not present in the data will
        result in 0 components in a macro average. For multilabel targets,
        labels are column indices. By default, all labels in ``y_true`` and
        ``y_pred`` are used in sorted order.
    pos_label : str or int, 1 by default
        The class to report if ``average='binary'``. Until version 0.18 it is
        necessary to set ``pos_label=None`` if seeking to use another averaging
        method over binary targets.
    average : string, [None, 'binary' (default), 'micro', 'macro', 'samples', \
                       'weighted']
        This parameter is required for multiclass/multilabel targets.
        If ``None``, the scores for each class are returned. Otherwise, this
        determines the type of averaging performed on the data:
        ``'binary'``:
            Only report results for the class specified by ``pos_label``.
            This is applicable only if targets (``y_{true,pred}``) are binary.
        ``'micro'``:
            Calculate metrics globally by counting the total true positives,
            false negatives and false positives.
        ``'macro'``:
            Calculate metrics for each label, and find their unweighted
            mean.  This does not take label imbalance into account.
        ``'weighted'``:
            Calculate metrics for each label, and find their average, weighted
            by support (the number of true instances for each label). This
            alters 'macro' to account for label imbalance; it can result in an
            F-score that is not between precision and recall.
        ``'samples'``:
            Calculate metrics for each instance, and find their average (only
            meaningful for multilabel classification where this differs from
            :func:`accuracy_score`).
        Note that if ``pos_label`` is given in binary classification with
        `average != 'binary'`, only that positive class is reported. This
        behavior is deprecated and will change in version 0.18.
    sample_weight : array-like of shape = [n_samples], optional
        Sample weights.
    Returns
    -------
    recall : float (if average is not None) or array of float, shape =\
        [n_unique_labels]
        Recall of the positive class in binary classification or weighted
        average of the recall of each class for the multiclass task.
    Examples
    --------
    >>> from sklearn.metrics import recall_score
    >>> y_true = [0, 1, 2, 0, 1, 2]
    >>> y_pred = [0, 2, 1, 0, 0, 1]
    >>> recall_score(y_true, y_pred, average='macro')  # doctest: +ELLIPSIS
    0.33...
    >>> recall_score(y_true, y_pred, average='micro')  # doctest: +ELLIPSIS
    0.33...
    >>> recall_score(y_true, y_pred, average='weighted')  # doctest: +ELLIPSIS
    0.33...
    >>> recall_score(y_true, y_pred, average=None)
    array([ 1.,  0.,  0.])
    """
    _, r, _, _ = precision_recall_fscore_support(y_true, y_pred,
                                                 labels=labels,
                                                 pos_label=pos_label,
                                                 average=average,
                                                 warn_for=('recall',),
                                                 sample_weight=sample_weight)
    return r
def classification_report(y_true, y_pred, labels=None, target_names=None,
                          sample_weight=None, digits=2):
    """Build a text report showing the main classification metrics
    Read more in the :ref:`User Guide <classification_report>`.
    Parameters
    ----------
    y_true : 1d array-like, or label indicator array / sparse matrix
        Ground truth (correct) target values.
    y_pred : 1d array-like, or label indicator array / sparse matrix
        Estimated targets as returned by a classifier.
    labels : array, shape = [n_labels]
        Optional list of label indices to include in the report.
    target_names : list of strings
        Optional display names matching the labels (same order).
    sample_weight : array-like of shape = [n_samples], optional
        Sample weights.
    digits : int
        Number of digits for formatting output floating point values
    Returns
    -------
    report : string
        Text summary of the precision, recall, F1 score for each class.
    Examples
    --------
    >>> from sklearn.metrics import classification_report
    >>> y_true = [0, 1, 2, 2, 2]
    >>> y_pred = [0, 0, 2, 2, 1]
    >>> target_names = ['class 0', 'class 1', 'class 2']
    >>> print(classification_report(y_true, y_pred, target_names=target_names))
                 precision    recall  f1-score   support
    <BLANKLINE>
        class 0       0.50      1.00      0.67         1
        class 1       0.00      0.00      0.00         1
        class 2       1.00      0.67      0.80         3
    <BLANKLINE>
    avg / total       0.70      0.60      0.61         5
    <BLANKLINE>
    """
    if labels is None:
        labels = unique_labels(y_true, y_pred)
    else:
        labels = np.asarray(labels)
    last_line_heading = 'avg / total'
    if target_names is None:
        width = len(last_line_heading)
        target_names = ['%s' % l for l in labels]
    else:
        width = max(len(cn) for cn in target_names)
        width = max(width, len(last_line_heading), digits)
    headers = ["precision", "recall", "f1-score", "support"]
    fmt = '%% %ds' % width  # first column: class name
    fmt += '  '
    fmt += ' '.join(['% 9s' for _ in headers])
    fmt += '\n'
    headers = [""] + headers
    report = fmt % tuple(headers)
    report += '\n'
    p, r, f1, s = precision_recall_fscore_support(y_true, y_pred,
                                                  labels=labels,
                                                  average=None,
                                                  sample_weight=sample_weight)
    for i, label in enumerate(labels):
        values = [target_names[i]]
        for v in (p[i], r[i], f1[i]):
            values += ["{0:0.{1}f}".format(v, digits)]
        values += ["{0}".format(s[i])]
        report += fmt % tuple(values)
    report += '\n'
    # compute averages
    values = [last_line_heading]
    for v in (np.average(p, weights=s),
              np.average(r, weights=s),
              np.average(f1, weights=s)):
        values += ["{0:0.{1}f}".format(v, digits)]
    values += ['{0}'.format(np.sum(s))]
    report += fmt % tuple(values)
    return report
def hamming_loss(y_true, y_pred, classes=None):
    """Compute the average Hamming loss.
    The Hamming loss is the fraction of labels that are incorrectly predicted.
    Read more in the :ref:`User Guide <hamming_loss>`.
    Parameters
    ----------
    y_true : 1d array-like, or label indicator array / sparse matrix
        Ground truth (correct) labels.
    y_pred : 1d array-like, or label indicator array / sparse matrix
        Predicted labels, as returned by a classifier.
    classes : array, shape = [n_labels], optional
        Integer array of labels.
    Returns
    -------
    loss : float or int,
        Return the average Hamming loss between element of ``y_true`` and
        ``y_pred``.
    See Also
    --------
    accuracy_score, jaccard_similarity_score, zero_one_loss
    Notes
    -----
    In multiclass classification, the Hamming loss correspond to the Hamming
    distance between ``y_true`` and ``y_pred`` which is equivalent to the
    subset ``zero_one_loss`` function.
    In multilabel classification, the Hamming loss is different from the
    subset zero-one loss. The zero-one loss considers the entire set of labels
    for a given sample incorrect if it does entirely match the true set of
    labels. Hamming loss is more forgiving in that it penalizes the individual
    labels.
    The Hamming loss is upperbounded by the subset zero-one loss. When
    normalized over samples, the Hamming loss is always between 0 and 1.
    References
    ----------
    .. [1] Grigorios Tsoumakas, Ioannis Katakis. Multi-Label Classification:
           An Overview. International Journal of Data Warehousing & Mining,
           3(3), 1-13, July-September 2007.
    .. [2] `Wikipedia entry on the Hamming distance
           <http://en.wikipedia.org/wiki/Hamming_distance>`_
    Examples
    --------
    >>> from sklearn.metrics import hamming_loss
    >>> y_pred = [1, 2, 3, 4]
    >>> y_true = [2, 2, 3, 4]
    >>> hamming_loss(y_true, y_pred)
    0.25
    In the multilabel case with binary label indicators:
    >>> hamming_loss(np.array([[0, 1], [1, 1]]), np.zeros((2, 2)))
    0.75
    """
    y_type, y_true, y_pred = _check_targets(y_true, y_pred)
    if classes is None:
        classes = unique_labels(y_true, y_pred)
    else:
        classes = np.asarray(classes)
    if y_type.startswith('multilabel'):
        n_differences = count_nonzero(y_true - y_pred)
        return (n_differences / (y_true.shape[0] * len(classes)))
    elif y_type in ["binary", "multiclass"]:
        return sp_hamming(y_true, y_pred)
    else:
        raise ValueError("{0} is not supported".format(y_type))
def log_loss(y_true, y_pred, eps=1e-15, normalize=True, sample_weight=None):
    """Log loss, aka logistic loss or cross-entropy loss.
    This is the loss function used in (multinomial) logistic regression
    and extensions of it such as neural networks, defined as the negative
    log-likelihood of the true labels given a probabilistic classifier's
    predictions. For a single sample with true label yt in {0,1} and
    estimated probability yp that yt = 1, the log loss is
        -log P(yt|yp) = -(yt log(yp) + (1 - yt) log(1 - yp))
    Read more in the :ref:`User Guide <log_loss>`.
    Parameters
    ----------
    y_true : array-like or label indicator matrix
        Ground truth (correct) labels for n_samples samples.
    y_pred : array-like of float, shape = (n_samples, n_classes)
        Predicted probabilities, as returned by a classifier's
        predict_proba method.
    eps : float
        Log loss is undefined for p=0 or p=1, so probabilities are
        clipped to max(eps, min(1 - eps, p)).
    normalize : bool, optional (default=True)
        If true, return the mean loss per sample.
        Otherwise, return the sum of the per-sample losses.
    sample_weight : array-like of shape = [n_samples], optional
        Sample weights.
    Returns
    -------
    loss : float
    Examples
    --------
    >>> log_loss(["spam", "ham", "ham", "spam"],  # doctest: +ELLIPSIS
    ...          [[.1, .9], [.9, .1], [.8, .2], [.35, .65]])
    0.21616...
    References
    ----------
    C.M. Bishop (2006). Pattern Recognition and Machine Learning. Springer,
    p. 209.
    Notes
    -----
    The logarithm used is the natural logarithm (base-e).
    """
    lb = LabelBinarizer()
    T = lb.fit_transform(y_true)
    if T.shape[1] == 1:
        T = np.append(1 - T, T, axis=1)
    # Clipping
    Y = np.clip(y_pred, eps, 1 - eps)
    # This happens in cases when elements in y_pred have type "str".
    if not isinstance(Y, np.ndarray):
        raise ValueError("y_pred should be an array of floats.")
    # If y_pred is of single dimension, assume y_true to be binary
    # and then check.
    if Y.ndim == 1:
        Y = Y[:, np.newaxis]
    if Y.shape[1] == 1:
        Y = np.append(1 - Y, Y, axis=1)
    # Check if dimensions are consistent.
    check_consistent_length(T, Y)
    T = check_array(T)
    Y = check_array(Y)
    if T.shape[1] != Y.shape[1]:
        raise ValueError("y_true and y_pred have different number of classes "
                         "%d, %d" % (T.shape[1], Y.shape[1]))
    # Renormalize
    Y /= Y.sum(axis=1)[:, np.newaxis]
    loss = -(T * np.log(Y)).sum(axis=1)
    return _weighted_sum(loss, sample_weight, normalize)
def hinge_loss(y_true, pred_decision, labels=None, sample_weight=None):
    """Average hinge loss (non-regularized)
    In binary class case, assuming labels in y_true are encoded with +1 and -1,
    when a prediction mistake is made, ``margin = y_true * pred_decision`` is
    always negative (since the signs disagree), implying ``1 - margin`` is
    always greater than 1.  The cumulated hinge loss is therefore an upper
    bound of the number of mistakes made by the classifier.
    In multiclass case, the function expects that either all the labels are
    included in y_true or an optional labels argument is provided which
    contains all the labels. The multilabel margin is calculated according
    to Crammer-Singer's method. As in the binary case, the cumulated hinge loss
    is an upper bound of the number of mistakes made by the classifier.
    Read more in the :ref:`User Guide <hinge_loss>`.
    Parameters
    ----------
    y_true : array, shape = [n_samples]
        True target, consisting of integers of two values. The positive label
        must be greater than the negative label.
    pred_decision : array, shape = [n_samples] or [n_samples, n_classes]
        Predicted decisions, as output by decision_function (floats).
    labels : array, optional, default None
        Contains all the labels for the problem. Used in multiclass hinge loss.
    sample_weight : array-like of shape = [n_samples], optional
        Sample weights.
    Returns
    -------
    loss : float
    References
    ----------
    .. [1] `Wikipedia entry on the Hinge loss
           <http://en.wikipedia.org/wiki/Hinge_loss>`_
    .. [2] Koby Crammer, Yoram Singer. On the Algorithmic
           Implementation of Multiclass Kernel-based Vector
           Machines. Journal of Machine Learning Research 2,
           (2001), 265-292
    .. [3] `L1 AND L2 Regularization for Multiclass Hinge Loss Models
           by Robert C. Moore, John DeNero.
           <http://www.ttic.edu/sigml/symposium2011/papers/
           Moore+DeNero_Regularization.pdf>`_
    Examples
    --------
    >>> from sklearn import svm
    >>> from sklearn.metrics import hinge_loss
    >>> X = [[0], [1]]
    >>> y = [-1, 1]
    >>> est = svm.LinearSVC(random_state=0)
    >>> est.fit(X, y)
    LinearSVC(C=1.0, class_weight=None, dual=True, fit_intercept=True,
         intercept_scaling=1, loss='squared_hinge', max_iter=1000,
         multi_class='ovr', penalty='l2', random_state=0, tol=0.0001,
         verbose=0)
    >>> pred_decision = est.decision_function([[-2], [3], [0.5]])
    >>> pred_decision  # doctest: +ELLIPSIS
    array([-2.18...,  2.36...,  0.09...])
    >>> hinge_loss([-1, 1, 1], pred_decision)  # doctest: +ELLIPSIS
    0.30...
    In the multiclass case:
    >>> X = np.array([[0], [1], [2], [3]])
    >>> Y = np.array([0, 1, 2, 3])
    >>> labels = np.array([0, 1, 2, 3])
    >>> est = svm.LinearSVC()
    >>> est.fit(X, Y)
    LinearSVC(C=1.0, class_weight=None, dual=True, fit_intercept=True,
         intercept_scaling=1, loss='squared_hinge', max_iter=1000,
         multi_class='ovr', penalty='l2', random_state=None, tol=0.0001,
         verbose=0)
    >>> pred_decision = est.decision_function([[-1], [2], [3]])
    >>> y_true = [0, 2, 3]
    >>> hinge_loss(y_true, pred_decision, labels)  #doctest: +ELLIPSIS
    0.56...
    """
    check_consistent_length(y_true, pred_decision, sample_weight)
    pred_decision = check_array(pred_decision, ensure_2d=False)
    y_true = column_or_1d(y_true)
    y_true_unique = np.unique(y_true)
    if y_true_unique.size > 2:
        if (labels is None and pred_decision.ndim > 1 and
                (np.size(y_true_unique) != pred_decision.shape[1])):
            raise ValueError("Please include all labels in y_true "
                             "or pass labels as third argument")
        if labels is None:
            labels = y_true_unique
        le = LabelEncoder()
        le.fit(labels)
        y_true = le.transform(y_true)
        mask = np.ones_like(pred_decision, dtype=bool)
        mask[np.arange(y_true.shape[0]), y_true] = False
        margin = pred_decision[~mask]
        margin -= np.max(pred_decision[mask].reshape(y_true.shape[0], -1),
                         axis=1)
    else:
        # Handles binary class case
        # this code assumes that positive and negative labels
        # are encoded as +1 and -1 respectively
        pred_decision = column_or_1d(pred_decision)
        pred_decision = np.ravel(pred_decision)
        lbin = LabelBinarizer(neg_label=-1)
        y_true = lbin.fit_transform(y_true)[:, 0]
        try:
            margin = y_true * pred_decision
        except TypeError:
            raise TypeError("pred_decision should be an array of floats.")
    losses = 1 - margin
    # The hinge_loss doesn't penalize good enough predictions.
    losses[losses <= 0] = 0
    return np.average(losses, weights=sample_weight)
def _check_binary_probabilistic_predictions(y_true, y_prob):
    """Check that y_true is binary and y_prob contains valid probabilities"""
    check_consistent_length(y_true, y_prob)
    labels = np.unique(y_true)
    if len(labels) != 2:
        raise ValueError("Only binary classification is supported. "
                         "Provided labels %s." % labels)
    if y_prob.max() > 1:
        raise ValueError("y_prob contains values greater than 1.")
    if y_prob.min() < 0:
        raise ValueError("y_prob contains values less than 0.")
    return label_binarize(y_true, labels)[:, 0]
def brier_score_loss(y_true, y_prob, sample_weight=None, pos_label=None):
    """Compute the Brier score.
    The smaller the Brier score, the better, hence the naming with "loss".
    Across all items in a set N predictions, the Brier score measures the
    mean squared difference between (1) the predicted probability assigned
    to the possible outcomes for item i, and (2) the actual outcome.
    Therefore, the lower the Brier score is for a set of predictions, the
    better the predictions are calibrated. Note that the Brier score always
    takes on a value between zero and one, since this is the largest
    possible difference between a predicted probability (which must be
    between zero and one) and the actual outcome (which can take on values
    of only 0 and 1).
    The Brier score is appropriate for binary and categorical outcomes that
    can be structured as true or false, but is inappropriate for ordinal
    variables which can take on three or more values (this is because the
    Brier score assumes that all possible outcomes are equivalently
    "distant" from one another). Which label is considered to be the positive
    label is controlled via the parameter pos_label, which defaults to 1.
    Read more in the :ref:`User Guide <calibration>`.
    Parameters
    ----------
    y_true : array, shape (n_samples,)
        True targets.
    y_prob : array, shape (n_samples,)
        Probabilities of the positive class.
    sample_weight : array-like of shape = [n_samples], optional
        Sample weights.
    pos_label : int (default: None)
        Label of the positive class. If None, the maximum label is used as
        positive class
    Returns
    -------
    score : float
        Brier score
    Examples
    --------
    >>> import numpy as np
    >>> from sklearn.metrics import brier_score_loss
    >>> y_true = np.array([0, 1, 1, 0])
    >>> y_true_categorical = np.array(["spam", "ham", "ham", "spam"])
    >>> y_prob = np.array([0.1, 0.9, 0.8, 0.3])
    >>> brier_score_loss(y_true, y_prob)  # doctest: +ELLIPSIS
    0.037...
    >>> brier_score_loss(y_true, 1-y_prob, pos_label=0)  # doctest: +ELLIPSIS
    0.037...
    >>> brier_score_loss(y_true_categorical, y_prob, \
                         pos_label="ham")  # doctest: +ELLIPSIS
    0.037...
    >>> brier_score_loss(y_true, np.array(y_prob) > 0.5)
    0.0
    References
    ----------
    http://en.wikipedia.org/wiki/Brier_score
    """
    y_true = column_or_1d(y_true)
    y_prob = column_or_1d(y_prob)
    if pos_label is None:
        pos_label = y_true.max()
    y_true = np.array(y_true == pos_label, int)
    y_true = _check_binary_probabilistic_predictions(y_true, y_prob)
    return np.average((y_true - y_prob) ** 2, weights=sample_weight)
 | 
	bsd-3-clause | 
| 
	HeraclesHX/scikit-learn | 
	sklearn/cluster/tests/test_dbscan.py | 
	114 | 
	11393 | 
	"""
Tests for DBSCAN clustering algorithm
"""
import pickle
import numpy as np
from scipy.spatial import distance
from scipy import sparse
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_in
from sklearn.utils.testing import assert_not_in
from sklearn.cluster.dbscan_ import DBSCAN
from sklearn.cluster.dbscan_ import dbscan
from sklearn.cluster.tests.common import generate_clustered_data
from sklearn.metrics.pairwise import pairwise_distances
n_clusters = 3
X = generate_clustered_data(n_clusters=n_clusters)
def test_dbscan_similarity():
    # Tests the DBSCAN algorithm with a similarity array.
    # Parameters chosen specifically for this task.
    eps = 0.15
    min_samples = 10
    # Compute similarities
    D = distance.squareform(distance.pdist(X))
    D /= np.max(D)
    # Compute DBSCAN
    core_samples, labels = dbscan(D, metric="precomputed", eps=eps,
                                  min_samples=min_samples)
    # number of clusters, ignoring noise if present
    n_clusters_1 = len(set(labels)) - (1 if -1 in labels else 0)
    assert_equal(n_clusters_1, n_clusters)
    db = DBSCAN(metric="precomputed", eps=eps, min_samples=min_samples)
    labels = db.fit(D).labels_
    n_clusters_2 = len(set(labels)) - int(-1 in labels)
    assert_equal(n_clusters_2, n_clusters)
def test_dbscan_feature():
    # Tests the DBSCAN algorithm with a feature vector array.
    # Parameters chosen specifically for this task.
    # Different eps to other test, because distance is not normalised.
    eps = 0.8
    min_samples = 10
    metric = 'euclidean'
    # Compute DBSCAN
    # parameters chosen for task
    core_samples, labels = dbscan(X, metric=metric, eps=eps,
                                  min_samples=min_samples)
    # number of clusters, ignoring noise if present
    n_clusters_1 = len(set(labels)) - int(-1 in labels)
    assert_equal(n_clusters_1, n_clusters)
    db = DBSCAN(metric=metric, eps=eps, min_samples=min_samples)
    labels = db.fit(X).labels_
    n_clusters_2 = len(set(labels)) - int(-1 in labels)
    assert_equal(n_clusters_2, n_clusters)
def test_dbscan_sparse():
    core_sparse, labels_sparse = dbscan(sparse.lil_matrix(X), eps=.8,
                                        min_samples=10)
    core_dense, labels_dense = dbscan(X, eps=.8, min_samples=10)
    assert_array_equal(core_dense, core_sparse)
    assert_array_equal(labels_dense, labels_sparse)
def test_dbscan_no_core_samples():
    rng = np.random.RandomState(0)
    X = rng.rand(40, 10)
    X[X < .8] = 0
    for X_ in [X, sparse.csr_matrix(X)]:
        db = DBSCAN(min_samples=6).fit(X_)
        assert_array_equal(db.components_, np.empty((0, X_.shape[1])))
        assert_array_equal(db.labels_, -1)
        assert_equal(db.core_sample_indices_.shape, (0,))
def test_dbscan_callable():
    # Tests the DBSCAN algorithm with a callable metric.
    # Parameters chosen specifically for this task.
    # Different eps to other test, because distance is not normalised.
    eps = 0.8
    min_samples = 10
    # metric is the function reference, not the string key.
    metric = distance.euclidean
    # Compute DBSCAN
    # parameters chosen for task
    core_samples, labels = dbscan(X, metric=metric, eps=eps,
                                  min_samples=min_samples,
                                  algorithm='ball_tree')
    # number of clusters, ignoring noise if present
    n_clusters_1 = len(set(labels)) - int(-1 in labels)
    assert_equal(n_clusters_1, n_clusters)
    db = DBSCAN(metric=metric, eps=eps, min_samples=min_samples,
                algorithm='ball_tree')
    labels = db.fit(X).labels_
    n_clusters_2 = len(set(labels)) - int(-1 in labels)
    assert_equal(n_clusters_2, n_clusters)
def test_dbscan_balltree():
    # Tests the DBSCAN algorithm with balltree for neighbor calculation.
    eps = 0.8
    min_samples = 10
    D = pairwise_distances(X)
    core_samples, labels = dbscan(D, metric="precomputed", eps=eps,
                                  min_samples=min_samples)
    # number of clusters, ignoring noise if present
    n_clusters_1 = len(set(labels)) - int(-1 in labels)
    assert_equal(n_clusters_1, n_clusters)
    db = DBSCAN(p=2.0, eps=eps, min_samples=min_samples, algorithm='ball_tree')
    labels = db.fit(X).labels_
    n_clusters_2 = len(set(labels)) - int(-1 in labels)
    assert_equal(n_clusters_2, n_clusters)
    db = DBSCAN(p=2.0, eps=eps, min_samples=min_samples, algorithm='kd_tree')
    labels = db.fit(X).labels_
    n_clusters_3 = len(set(labels)) - int(-1 in labels)
    assert_equal(n_clusters_3, n_clusters)
    db = DBSCAN(p=1.0, eps=eps, min_samples=min_samples, algorithm='ball_tree')
    labels = db.fit(X).labels_
    n_clusters_4 = len(set(labels)) - int(-1 in labels)
    assert_equal(n_clusters_4, n_clusters)
    db = DBSCAN(leaf_size=20, eps=eps, min_samples=min_samples,
                algorithm='ball_tree')
    labels = db.fit(X).labels_
    n_clusters_5 = len(set(labels)) - int(-1 in labels)
    assert_equal(n_clusters_5, n_clusters)
def test_input_validation():
    # DBSCAN.fit should accept a list of lists.
    X = [[1., 2.], [3., 4.]]
    DBSCAN().fit(X)             # must not raise exception
def test_dbscan_badargs():
    # Test bad argument values: these should all raise ValueErrors
    assert_raises(ValueError,
                  dbscan,
                  X, eps=-1.0)
    assert_raises(ValueError,
                  dbscan,
                  X, algorithm='blah')
    assert_raises(ValueError,
                  dbscan,
                  X, metric='blah')
    assert_raises(ValueError,
                  dbscan,
                  X, leaf_size=-1)
    assert_raises(ValueError,
                  dbscan,
                  X, p=-1)
def test_pickle():
    obj = DBSCAN()
    s = pickle.dumps(obj)
    assert_equal(type(pickle.loads(s)), obj.__class__)
def test_boundaries():
    # ensure min_samples is inclusive of core point
    core, _ = dbscan([[0], [1]], eps=2, min_samples=2)
    assert_in(0, core)
    # ensure eps is inclusive of circumference
    core, _ = dbscan([[0], [1], [1]], eps=1, min_samples=2)
    assert_in(0, core)
    core, _ = dbscan([[0], [1], [1]], eps=.99, min_samples=2)
    assert_not_in(0, core)
def test_weighted_dbscan():
    # ensure sample_weight is validated
    assert_raises(ValueError, dbscan, [[0], [1]], sample_weight=[2])
    assert_raises(ValueError, dbscan, [[0], [1]], sample_weight=[2, 3, 4])
    # ensure sample_weight has an effect
    assert_array_equal([], dbscan([[0], [1]], sample_weight=None,
                                  min_samples=6)[0])
    assert_array_equal([], dbscan([[0], [1]], sample_weight=[5, 5],
                                  min_samples=6)[0])
    assert_array_equal([0], dbscan([[0], [1]], sample_weight=[6, 5],
                                   min_samples=6)[0])
    assert_array_equal([0, 1], dbscan([[0], [1]], sample_weight=[6, 6],
                                      min_samples=6)[0])
    # points within eps of each other:
    assert_array_equal([0, 1], dbscan([[0], [1]], eps=1.5,
                                      sample_weight=[5, 1], min_samples=6)[0])
    # and effect of non-positive and non-integer sample_weight:
    assert_array_equal([], dbscan([[0], [1]], sample_weight=[5, 0],
                                  eps=1.5, min_samples=6)[0])
    assert_array_equal([0, 1], dbscan([[0], [1]], sample_weight=[5.9, 0.1],
                                      eps=1.5, min_samples=6)[0])
    assert_array_equal([0, 1], dbscan([[0], [1]], sample_weight=[6, 0],
                                      eps=1.5, min_samples=6)[0])
    assert_array_equal([], dbscan([[0], [1]], sample_weight=[6, -1],
                                  eps=1.5, min_samples=6)[0])
    # for non-negative sample_weight, cores should be identical to repetition
    rng = np.random.RandomState(42)
    sample_weight = rng.randint(0, 5, X.shape[0])
    core1, label1 = dbscan(X, sample_weight=sample_weight)
    assert_equal(len(label1), len(X))
    X_repeated = np.repeat(X, sample_weight, axis=0)
    core_repeated, label_repeated = dbscan(X_repeated)
    core_repeated_mask = np.zeros(X_repeated.shape[0], dtype=bool)
    core_repeated_mask[core_repeated] = True
    core_mask = np.zeros(X.shape[0], dtype=bool)
    core_mask[core1] = True
    assert_array_equal(np.repeat(core_mask, sample_weight), core_repeated_mask)
    # sample_weight should work with precomputed distance matrix
    D = pairwise_distances(X)
    core3, label3 = dbscan(D, sample_weight=sample_weight,
                           metric='precomputed')
    assert_array_equal(core1, core3)
    assert_array_equal(label1, label3)
    # sample_weight should work with estimator
    est = DBSCAN().fit(X, sample_weight=sample_weight)
    core4 = est.core_sample_indices_
    label4 = est.labels_
    assert_array_equal(core1, core4)
    assert_array_equal(label1, label4)
    est = DBSCAN()
    label5 = est.fit_predict(X, sample_weight=sample_weight)
    core5 = est.core_sample_indices_
    assert_array_equal(core1, core5)
    assert_array_equal(label1, label5)
    assert_array_equal(label1, est.labels_)
def test_dbscan_core_samples_toy():
    X = [[0], [2], [3], [4], [6], [8], [10]]
    n_samples = len(X)
    for algorithm in ['brute', 'kd_tree', 'ball_tree']:
        # Degenerate case: every sample is a core sample, either with its own
        # cluster or including other close core samples.
        core_samples, labels = dbscan(X, algorithm=algorithm, eps=1,
                                      min_samples=1)
        assert_array_equal(core_samples, np.arange(n_samples))
        assert_array_equal(labels, [0, 1, 1, 1, 2, 3, 4])
        # With eps=1 and min_samples=2 only the 3 samples from the denser area
        # are core samples. All other points are isolated and considered noise.
        core_samples, labels = dbscan(X, algorithm=algorithm, eps=1,
                                      min_samples=2)
        assert_array_equal(core_samples, [1, 2, 3])
        assert_array_equal(labels, [-1, 0, 0, 0, -1, -1, -1])
        # Only the sample in the middle of the dense area is core. Its two
        # neighbors are edge samples. Remaining samples are noise.
        core_samples, labels = dbscan(X, algorithm=algorithm, eps=1,
                                      min_samples=3)
        assert_array_equal(core_samples, [2])
        assert_array_equal(labels, [-1, 0, 0, 0, -1, -1, -1])
        # It's no longer possible to extract core samples with eps=1:
        # everything is noise.
        core_samples, labels = dbscan(X, algorithm=algorithm, eps=1,
                                      min_samples=4)
        assert_array_equal(core_samples, [])
        assert_array_equal(labels, -np.ones(n_samples))
def test_dbscan_precomputed_metric_with_degenerate_input_arrays():
    # see https://github.com/scikit-learn/scikit-learn/issues/4641 for
    # more details
    X = np.ones((10, 2))
    labels = DBSCAN(eps=0.5, metric='precomputed').fit(X).labels_
    assert_equal(len(set(labels)), 1)
    X = np.zeros((10, 2))
    labels = DBSCAN(eps=0.5, metric='precomputed').fit(X).labels_
    assert_equal(len(set(labels)), 1)
 | 
	bsd-3-clause | 
| 
	Srisai85/scipy | 
	scipy/stats/kde.py | 
	27 | 
	17303 | 
	#-------------------------------------------------------------------------------
#
#  Define classes for (uni/multi)-variate kernel density estimation.
#
#  Currently, only Gaussian kernels are implemented.
#
#  Written by: Robert Kern
#
#  Date: 2004-08-09
#
#  Modified: 2005-02-10 by Robert Kern.
#              Contributed to Scipy
#            2005-10-07 by Robert Kern.
#              Some fixes to match the new scipy_core
#
#  Copyright 2004-2005 by Enthought, Inc.
#
#-------------------------------------------------------------------------------
from __future__ import division, print_function, absolute_import
# Standard library imports.
import warnings
# Scipy imports.
from scipy._lib.six import callable, string_types
from scipy import linalg, special
from numpy import atleast_2d, reshape, zeros, newaxis, dot, exp, pi, sqrt, \
     ravel, power, atleast_1d, squeeze, sum, transpose
import numpy as np
from numpy.random import randint, multivariate_normal
# Local imports.
from . import mvn
__all__ = ['gaussian_kde']
class gaussian_kde(object):
    """Representation of a kernel-density estimate using Gaussian kernels.
    Kernel density estimation is a way to estimate the probability density
    function (PDF) of a random variable in a non-parametric way.
    `gaussian_kde` works for both uni-variate and multi-variate data.   It
    includes automatic bandwidth determination.  The estimation works best for
    a unimodal distribution; bimodal or multi-modal distributions tend to be
    oversmoothed.
    Parameters
    ----------
    dataset : array_like
        Datapoints to estimate from. In case of univariate data this is a 1-D
        array, otherwise a 2-D array with shape (# of dims, # of data).
    bw_method : str, scalar or callable, optional
        The method used to calculate the estimator bandwidth.  This can be
        'scott', 'silverman', a scalar constant or a callable.  If a scalar,
        this will be used directly as `kde.factor`.  If a callable, it should
        take a `gaussian_kde` instance as only parameter and return a scalar.
        If None (default), 'scott' is used.  See Notes for more details.
    Attributes
    ----------
    dataset : ndarray
        The dataset with which `gaussian_kde` was initialized.
    d : int
        Number of dimensions.
    n : int
        Number of datapoints.
    factor : float
        The bandwidth factor, obtained from `kde.covariance_factor`, with which
        the covariance matrix is multiplied.
    covariance : ndarray
        The covariance matrix of `dataset`, scaled by the calculated bandwidth
        (`kde.factor`).
    inv_cov : ndarray
        The inverse of `covariance`.
    Methods
    -------
    evaluate
    __call__
    integrate_gaussian
    integrate_box_1d
    integrate_box
    integrate_kde
    pdf
    logpdf
    resample
    set_bandwidth
    covariance_factor
    Notes
    -----
    Bandwidth selection strongly influences the estimate obtained from the KDE
    (much more so than the actual shape of the kernel).  Bandwidth selection
    can be done by a "rule of thumb", by cross-validation, by "plug-in
    methods" or by other means; see [3]_, [4]_ for reviews.  `gaussian_kde`
    uses a rule of thumb, the default is Scott's Rule.
    Scott's Rule [1]_, implemented as `scotts_factor`, is::
        n**(-1./(d+4)),
    with ``n`` the number of data points and ``d`` the number of dimensions.
    Silverman's Rule [2]_, implemented as `silverman_factor`, is::
        (n * (d + 2) / 4.)**(-1. / (d + 4)).
    Good general descriptions of kernel density estimation can be found in [1]_
    and [2]_, the mathematics for this multi-dimensional implementation can be
    found in [1]_.
    References
    ----------
    .. [1] D.W. Scott, "Multivariate Density Estimation: Theory, Practice, and
           Visualization", John Wiley & Sons, New York, Chicester, 1992.
    .. [2] B.W. Silverman, "Density Estimation for Statistics and Data
           Analysis", Vol. 26, Monographs on Statistics and Applied Probability,
           Chapman and Hall, London, 1986.
    .. [3] B.A. Turlach, "Bandwidth Selection in Kernel Density Estimation: A
           Review", CORE and Institut de Statistique, Vol. 19, pp. 1-33, 1993.
    .. [4] D.M. Bashtannyk and R.J. Hyndman, "Bandwidth selection for kernel
           conditional density estimation", Computational Statistics & Data
           Analysis, Vol. 36, pp. 279-298, 2001.
    Examples
    --------
    Generate some random two-dimensional data:
    >>> from scipy import stats
    >>> def measure(n):
    ...     "Measurement model, return two coupled measurements."
    ...     m1 = np.random.normal(size=n)
    ...     m2 = np.random.normal(scale=0.5, size=n)
    ...     return m1+m2, m1-m2
    >>> m1, m2 = measure(2000)
    >>> xmin = m1.min()
    >>> xmax = m1.max()
    >>> ymin = m2.min()
    >>> ymax = m2.max()
    Perform a kernel density estimate on the data:
    >>> X, Y = np.mgrid[xmin:xmax:100j, ymin:ymax:100j]
    >>> positions = np.vstack([X.ravel(), Y.ravel()])
    >>> values = np.vstack([m1, m2])
    >>> kernel = stats.gaussian_kde(values)
    >>> Z = np.reshape(kernel(positions).T, X.shape)
    Plot the results:
    >>> import matplotlib.pyplot as plt
    >>> fig, ax = plt.subplots()
    >>> ax.imshow(np.rot90(Z), cmap=plt.cm.gist_earth_r,
    ...           extent=[xmin, xmax, ymin, ymax])
    >>> ax.plot(m1, m2, 'k.', markersize=2)
    >>> ax.set_xlim([xmin, xmax])
    >>> ax.set_ylim([ymin, ymax])
    >>> plt.show()
    """
    def __init__(self, dataset, bw_method=None):
        self.dataset = atleast_2d(dataset)
        if not self.dataset.size > 1:
            raise ValueError("`dataset` input should have multiple elements.")
        self.d, self.n = self.dataset.shape
        self.set_bandwidth(bw_method=bw_method)
    def evaluate(self, points):
        """Evaluate the estimated pdf on a set of points.
        Parameters
        ----------
        points : (# of dimensions, # of points)-array
            Alternatively, a (# of dimensions,) vector can be passed in and
            treated as a single point.
        Returns
        -------
        values : (# of points,)-array
            The values at each point.
        Raises
        ------
        ValueError : if the dimensionality of the input points is different than
                     the dimensionality of the KDE.
        """
        points = atleast_2d(points)
        d, m = points.shape
        if d != self.d:
            if d == 1 and m == self.d:
                # points was passed in as a row vector
                points = reshape(points, (self.d, 1))
                m = 1
            else:
                msg = "points have dimension %s, dataset has dimension %s" % (d,
                    self.d)
                raise ValueError(msg)
        result = zeros((m,), dtype=float)
        if m >= self.n:
            # there are more points than data, so loop over data
            for i in range(self.n):
                diff = self.dataset[:, i, newaxis] - points
                tdiff = dot(self.inv_cov, diff)
                energy = sum(diff*tdiff,axis=0) / 2.0
                result = result + exp(-energy)
        else:
            # loop over points
            for i in range(m):
                diff = self.dataset - points[:, i, newaxis]
                tdiff = dot(self.inv_cov, diff)
                energy = sum(diff * tdiff, axis=0) / 2.0
                result[i] = sum(exp(-energy), axis=0)
        result = result / self._norm_factor
        return result
    __call__ = evaluate
    def integrate_gaussian(self, mean, cov):
        """
        Multiply estimated density by a multivariate Gaussian and integrate
        over the whole space.
        Parameters
        ----------
        mean : aray_like
            A 1-D array, specifying the mean of the Gaussian.
        cov : array_like
            A 2-D array, specifying the covariance matrix of the Gaussian.
        Returns
        -------
        result : scalar
            The value of the integral.
        Raises
        ------
        ValueError :
            If the mean or covariance of the input Gaussian differs from
            the KDE's dimensionality.
        """
        mean = atleast_1d(squeeze(mean))
        cov = atleast_2d(cov)
        if mean.shape != (self.d,):
            raise ValueError("mean does not have dimension %s" % self.d)
        if cov.shape != (self.d, self.d):
            raise ValueError("covariance does not have dimension %s" % self.d)
        # make mean a column vector
        mean = mean[:, newaxis]
        sum_cov = self.covariance + cov
        diff = self.dataset - mean
        tdiff = dot(linalg.inv(sum_cov), diff)
        energies = sum(diff * tdiff, axis=0) / 2.0
        result = sum(exp(-energies), axis=0) / sqrt(linalg.det(2 * pi *
                                                        sum_cov)) / self.n
        return result
    def integrate_box_1d(self, low, high):
        """
        Computes the integral of a 1D pdf between two bounds.
        Parameters
        ----------
        low : scalar
            Lower bound of integration.
        high : scalar
            Upper bound of integration.
        Returns
        -------
        value : scalar
            The result of the integral.
        Raises
        ------
        ValueError
            If the KDE is over more than one dimension.
        """
        if self.d != 1:
            raise ValueError("integrate_box_1d() only handles 1D pdfs")
        stdev = ravel(sqrt(self.covariance))[0]
        normalized_low = ravel((low - self.dataset) / stdev)
        normalized_high = ravel((high - self.dataset) / stdev)
        value = np.mean(special.ndtr(normalized_high) -
                        special.ndtr(normalized_low))
        return value
    def integrate_box(self, low_bounds, high_bounds, maxpts=None):
        """Computes the integral of a pdf over a rectangular interval.
        Parameters
        ----------
        low_bounds : array_like
            A 1-D array containing the lower bounds of integration.
        high_bounds : array_like
            A 1-D array containing the upper bounds of integration.
        maxpts : int, optional
            The maximum number of points to use for integration.
        Returns
        -------
        value : scalar
            The result of the integral.
        """
        if maxpts is not None:
            extra_kwds = {'maxpts': maxpts}
        else:
            extra_kwds = {}
        value, inform = mvn.mvnun(low_bounds, high_bounds, self.dataset,
                                  self.covariance, **extra_kwds)
        if inform:
            msg = ('An integral in mvn.mvnun requires more points than %s' %
                   (self.d * 1000))
            warnings.warn(msg)
        return value
    def integrate_kde(self, other):
        """
        Computes the integral of the product of this  kernel density estimate
        with another.
        Parameters
        ----------
        other : gaussian_kde instance
            The other kde.
        Returns
        -------
        value : scalar
            The result of the integral.
        Raises
        ------
        ValueError
            If the KDEs have different dimensionality.
        """
        if other.d != self.d:
            raise ValueError("KDEs are not the same dimensionality")
        # we want to iterate over the smallest number of points
        if other.n < self.n:
            small = other
            large = self
        else:
            small = self
            large = other
        sum_cov = small.covariance + large.covariance
        sum_cov_chol = linalg.cho_factor(sum_cov)
        result = 0.0
        for i in range(small.n):
            mean = small.dataset[:, i, newaxis]
            diff = large.dataset - mean
            tdiff = linalg.cho_solve(sum_cov_chol, diff)
            energies = sum(diff * tdiff, axis=0) / 2.0
            result += sum(exp(-energies), axis=0)
        result /= sqrt(linalg.det(2 * pi * sum_cov)) * large.n * small.n
        return result
    def resample(self, size=None):
        """
        Randomly sample a dataset from the estimated pdf.
        Parameters
        ----------
        size : int, optional
            The number of samples to draw.  If not provided, then the size is
            the same as the underlying dataset.
        Returns
        -------
        resample : (self.d, `size`) ndarray
            The sampled dataset.
        """
        if size is None:
            size = self.n
        norm = transpose(multivariate_normal(zeros((self.d,), float),
                         self.covariance, size=size))
        indices = randint(0, self.n, size=size)
        means = self.dataset[:, indices]
        return means + norm
    def scotts_factor(self):
        return power(self.n, -1./(self.d+4))
    def silverman_factor(self):
        return power(self.n*(self.d+2.0)/4.0, -1./(self.d+4))
    #  Default method to calculate bandwidth, can be overwritten by subclass
    covariance_factor = scotts_factor
    covariance_factor.__doc__ = """Computes the coefficient (`kde.factor`) that
        multiplies the data covariance matrix to obtain the kernel covariance
        matrix. The default is `scotts_factor`.  A subclass can overwrite this
        method to provide a different method, or set it through a call to
        `kde.set_bandwidth`."""
    def set_bandwidth(self, bw_method=None):
        """Compute the estimator bandwidth with given method.
        The new bandwidth calculated after a call to `set_bandwidth` is used
        for subsequent evaluations of the estimated density.
        Parameters
        ----------
        bw_method : str, scalar or callable, optional
            The method used to calculate the estimator bandwidth.  This can be
            'scott', 'silverman', a scalar constant or a callable.  If a
            scalar, this will be used directly as `kde.factor`.  If a callable,
            it should take a `gaussian_kde` instance as only parameter and
            return a scalar.  If None (default), nothing happens; the current
            `kde.covariance_factor` method is kept.
        Notes
        -----
        .. versionadded:: 0.11
        Examples
        --------
        >>> import scipy.stats as stats
        >>> x1 = np.array([-7, -5, 1, 4, 5.])
        >>> kde = stats.gaussian_kde(x1)
        >>> xs = np.linspace(-10, 10, num=50)
        >>> y1 = kde(xs)
        >>> kde.set_bandwidth(bw_method='silverman')
        >>> y2 = kde(xs)
        >>> kde.set_bandwidth(bw_method=kde.factor / 3.)
        >>> y3 = kde(xs)
        >>> import matplotlib.pyplot as plt
        >>> fig, ax = plt.subplots()
        >>> ax.plot(x1, np.ones(x1.shape) / (4. * x1.size), 'bo',
        ...         label='Data points (rescaled)')
        >>> ax.plot(xs, y1, label='Scott (default)')
        >>> ax.plot(xs, y2, label='Silverman')
        >>> ax.plot(xs, y3, label='Const (1/3 * Silverman)')
        >>> ax.legend()
        >>> plt.show()
        """
        if bw_method is None:
            pass
        elif bw_method == 'scott':
            self.covariance_factor = self.scotts_factor
        elif bw_method == 'silverman':
            self.covariance_factor = self.silverman_factor
        elif np.isscalar(bw_method) and not isinstance(bw_method, string_types):
            self._bw_method = 'use constant'
            self.covariance_factor = lambda: bw_method
        elif callable(bw_method):
            self._bw_method = bw_method
            self.covariance_factor = lambda: self._bw_method(self)
        else:
            msg = "`bw_method` should be 'scott', 'silverman', a scalar " \
                  "or a callable."
            raise ValueError(msg)
        self._compute_covariance()
    def _compute_covariance(self):
        """Computes the covariance matrix for each Gaussian kernel using
        covariance_factor().
        """
        self.factor = self.covariance_factor()
        # Cache covariance and inverse covariance of the data
        if not hasattr(self, '_data_inv_cov'):
            self._data_covariance = atleast_2d(np.cov(self.dataset, rowvar=1,
                                               bias=False))
            self._data_inv_cov = linalg.inv(self._data_covariance)
        self.covariance = self._data_covariance * self.factor**2
        self.inv_cov = self._data_inv_cov / self.factor**2
        self._norm_factor = sqrt(linalg.det(2*pi*self.covariance)) * self.n
    def pdf(self, x):
        """
        Evaluate the estimated pdf on a provided set of points.
        Notes
        -----
        This is an alias for `gaussian_kde.evaluate`.  See the ``evaluate``
        docstring for more details.
        """
        return self.evaluate(x)
    def logpdf(self, x):
        """
        Evaluate the log of the estimated pdf on a provided set of points.
        Notes
        -----
        See `gaussian_kde.evaluate` for more details; this method simply
        returns ``np.log(gaussian_kde.evaluate(x))``.
        """
        return np.log(self.evaluate(x))
 | 
	bsd-3-clause | 
| 
	simvisage/oricreate | 
	docs/howtos/ex08_rigid_facets/sim031miura_ori_psi_cntl.py | 
	1 | 
	2750 | 
	r'''
Fold the Miura ori crease pattern using psi control
---------------------------------------------------
'''
import numpy as np
from oricreate.api import \
    SimulationTask, SimulationConfig, \
    FTV, FTA
from oricreate.gu import \
    GuConstantLength, GuDofConstraints, GuPsiConstraints, fix
def create_cp_factory():
    # begin
    from oricreate.api import MiuraOriCPFactory
    cp_factory = MiuraOriCPFactory(L_x=30,
                                   L_y=21,
                                   n_x=2,
                                   n_y=2,
                                   d_0=3.0,
                                   d_1=-3.0)
    # end
    return cp_factory
if __name__ == '__main__':
    cpf = create_cp_factory()
    cp = cpf.formed_object
    import matplotlib.pyplot as plt
    fig, ax = plt.subplots()
    cp.plot_mpl(ax, facets=True)
    plt.tight_layout()
    plt.show()
    # Link the crease factory it with the constraint client
    gu_constant_length = GuConstantLength()
    dof_constraints = fix(cpf.N_grid[0, 1], [1]) \
        + fix(cpf.N_grid[1, 1], [0, 1, 2]) \
        + fix(cpf.N_grid[1, (0, -1)], [2])
    gu_dof_constraints = GuDofConstraints(dof_constraints=dof_constraints)
    psi_max = np.pi / 4.0
    diag_psi_constraints = [([(i, 1.0)], 0) for i in cpf.L_d_grid.flatten()]
    gu_psi_constraints = \
        GuPsiConstraints(forming_task=cpf,
                         psi_constraints=diag_psi_constraints +
                         [([(cpf.L_h_grid[1, 1], 1.0)],
                           lambda t: -psi_max * t),
                          ])
    sim_config = SimulationConfig(goal_function_type='none',
                                  gu={'cl': gu_constant_length,
                                      'dofs': gu_dof_constraints,
                                      'psi': gu_psi_constraints},
                                  acc=1e-5, MAX_ITER=10)
    sim_task = SimulationTask(previous_task=cpf,
                              config=sim_config,
                              n_steps=5)
    cp = sim_task.formed_object
    cp.u[cpf.N_grid[(0, -1), 1], 2] = -1.0
    sim_task.u_1
    ftv = FTV()
    #ftv.add(sim_task.sim_history.viz3d['node_numbers'], order=5)
    ftv.add(sim_task.sim_history.viz3d['cp'])
    ftv.add(gu_dof_constraints.viz3d['default'])
    fta = FTA(ftv=ftv)
    fta.init_view(a=200, e=35, d=50, f=(0, 0, 0), r=0)
    fta.add_cam_move(a=200, e=34, n=5, d=50, r=0,
                     duration=10,
                     #                     vot_fn=lambda cmt: np.linspace(0, 1, 4),
                     azimuth_move='damped',
                     elevation_move='damped',
                     distance_move='damped')
    fta.plot()
    fta.configure_traits()
 | 
	gpl-3.0 | 
| 
	wanggang3333/scikit-learn | 
	examples/model_selection/plot_validation_curve.py | 
	229 | 
	1823 | 
	"""
==========================
Plotting Validation Curves
==========================
In this plot you can see the training scores and validation scores of an SVM
for different values of the kernel parameter gamma. For very low values of
gamma, you can see that both the training score and the validation score are
low. This is called underfitting. Medium values of gamma will result in high
values for both scores, i.e. the classifier is performing fairly well. If gamma
is too high, the classifier will overfit, which means that the training score
is good but the validation score is poor.
"""
print(__doc__)
import matplotlib.pyplot as plt
import numpy as np
from sklearn.datasets import load_digits
from sklearn.svm import SVC
from sklearn.learning_curve import validation_curve
digits = load_digits()
X, y = digits.data, digits.target
param_range = np.logspace(-6, -1, 5)
train_scores, test_scores = validation_curve(
    SVC(), X, y, param_name="gamma", param_range=param_range,
    cv=10, scoring="accuracy", n_jobs=1)
train_scores_mean = np.mean(train_scores, axis=1)
train_scores_std = np.std(train_scores, axis=1)
test_scores_mean = np.mean(test_scores, axis=1)
test_scores_std = np.std(test_scores, axis=1)
plt.title("Validation Curve with SVM")
plt.xlabel("$\gamma$")
plt.ylabel("Score")
plt.ylim(0.0, 1.1)
plt.semilogx(param_range, train_scores_mean, label="Training score", color="r")
plt.fill_between(param_range, train_scores_mean - train_scores_std,
                 train_scores_mean + train_scores_std, alpha=0.2, color="r")
plt.semilogx(param_range, test_scores_mean, label="Cross-validation score",
             color="g")
plt.fill_between(param_range, test_scores_mean - test_scores_std,
                 test_scores_mean + test_scores_std, alpha=0.2, color="g")
plt.legend(loc="best")
plt.show()
 | 
	bsd-3-clause | 
| 
	beni55/SimpleCV | 
	SimpleCV/examples/util/ColorCube.py | 
	13 | 
	1901 | 
	from SimpleCV import Image, Camera, Display, Color
import pygame as pg
import numpy as np
from pylab import *
from mpl_toolkits.mplot3d import axes3d
from matplotlib.backends.backend_agg import FigureCanvasAgg
import cv2
bins = 8
#precompute
idxs = []
colors = []
offset = bins/2
skip = 255/bins
for x in range(0,bins):
    for y in range(0,bins):
        for z in range(0,bins):
            b = ((x*skip)+offset)/255.0
            g = ((y*skip)+offset)/255.0
            r = ((z*skip)+offset)/255.0
            idxs.append((x,y,z,(r,g,b)))
# plot points in 3D
cam = Camera()
disp = Display((800,600))
fig = figure()
fig.set_size_inches( (10,7) )
canvas = FigureCanvasAgg(fig)
azim = 0
while disp.isNotDone():
    ax = fig.gca(projection='3d')
    ax.set_xlabel('BLUE', color=(0,0,1) )
    ax.set_ylabel('GREEN',color=(0,1,0))
    ax.set_zlabel('RED',color=(1,0,0))
    # Get the color histogram
    img = cam.getImage().scale(0.3)
    rgb = img.getNumpyCv2()
    hist = cv2.calcHist([rgb],[0,1,2],None,[bins,bins,bins],[0,256,0,256,0,256])
    hist = hist/np.max(hist)
    # render everything
    [ ax.plot([x],[y],[z],'.',markersize=max(hist[x,y,z]*100,6),color=color) for x,y,z,color in idxs if(hist[x][y][z]>0) ]
    #[ ax.plot([x],[y],[z],'.',color=color) for x,y,z,color in idxs if(hist[x][y][z]>0) ]
    ax.set_xlim3d(0, bins-1)
    ax.set_ylim3d(0, bins-1)
    ax.set_zlim3d(0, bins-1)
    azim = (azim+0.5)%360
    ax.view_init(elev=35, azim=azim)
    ########### convert matplotlib to  SimpleCV image
    canvas.draw()
    renderer = canvas.get_renderer()
    raw_data = renderer.tostring_rgb()
    size = canvas.get_width_height()    
    surf = pg.image.fromstring(raw_data, size, "RGB")
    figure = Image(surf)
    ############ All done
    figure = figure.floodFill((0,0), tolerance=5,color=Color.WHITE)
    result = figure.blit(img, pos=(20,20))
    result.save(disp)
    fig.clf()
 | 
	bsd-3-clause | 
| 
	raymondnoonan/Mpropulator | 
	MPropulator/readConfig.py | 
	1 | 
	1536 | 
	import pandas as pd
import os
from MPropulator import validations as vd
def readConfig(config):
    '''
    Reads in the config file as a dataframe
    and validates the inputs and outputs of
    this file.
    args:   config is the path to the config file csv
    output: pandas dataframe that is a parsed and prepped
            config file.
    '''
    vd.validateConfigPath(config)
    parseConfig = pd.read_csv(config)
    #parseConfig.fillna("", inplace=True)
    # users enter rows to skip as 1,2,3,4 and cols to skip as A,B,C
    # we need to parse these into lists
    split = str.split
    def splitFunc(val):
        arr = split(str(val).strip(" "),",")
        arr = [x for x in arr if x != 'nan']
        return arr
    #splitFunc = lambda x: split(str(x).strip(" "), ",")
    def assertList(x):
        assert(isinstance(x,list))
    parseConfig['skiprows'] = map(splitFunc, parseConfig['skiprows'])
    parseConfig['skipcols'] = map(splitFunc, parseConfig['skipcols'])
    map(assertList, parseConfig['skiprows'])
    map(assertList, parseConfig['skipcols'])
    # in addition, for the skiprows, we want these as ints, not strings
    def makeInt(array):
        intArray = [int(x) for x in array]
        return intArray
    try:
        parseConfig['skiprows'] = map(makeInt, parseConfig['skiprows'])
    except:
        raise ValueError("Cannot convert some values in skiprows to ints")
    parseConfig['ignore'].fillna(False, inplace=True)
    vd.validateConfigRead(parseConfig)
    return parseConfig
 | 
	mit | 
| 
	barnabytprowe/great3-public | 
	validation/plot_variable_submission.py | 
	2 | 
	3710 | 
	#!/usr/bin/env python
# Copyright (c) 2014, the GREAT3 executive committee (http://www.great3challenge.info/?q=contacts)
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification, are permitted
# provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this list of conditions
# and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice, this list of
# conditions and the following disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors may be used to
# endorse or promote products derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
# FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
# IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
# OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""@file plot_variable_submission.py
Handy command line executable script for plotting up a GREAT3 variable shear submission.
"""
# Constants
NFIELDS = 10
NBINS_THETA = 15
YLIM_EMODE = 2.e-5
YLIM_BMODE = 2.e-5
def plot(submission_filename, output_filename, nfields=NFIELDS, nbins_theta=NBINS_THETA,
         ylim_emode=YLIM_EMODE, ylim_bmode=YLIM_BMODE):
    """Plot a submission.
    """ 
    import numpy as np
    import matplotlib.pyplot as plt
    # Load the data from the input submission
    data = np.loadtxt(submission_filename)
    field, theta, map_E, map_B, maperr = (
        data[:, 0].astype(int), data[:, 1], data[:, 2], data[:, 3], data[:, 4])
    # Then plot (largely borrowed from the code in server/great3/evaluate.py)
    plt.figure(figsize=(10, 8))
    plt.subplot(211)
    for ifield in range(nfields):
        plt.semilogx(
            theta[ifield * nbins_theta: (ifield + 1) * nbins_theta],
            map_E[ifield * nbins_theta: (ifield + 1) * nbins_theta], label="Field "+str(ifield))
    plt.ylim(-ylim_emode, ylim_emode)
    plt.title(submission_filename+" E-mode")
    plt.ylabel("Ap. Mass Dispersion")
    plt.axhline(ls="--", color="k")
    plt.legend()
    plt.subplot(212)
    for ifield in range(nfields):
        plt.semilogx(
            theta[ifield * nbins_theta: (ifield + 1) * nbins_theta],
            map_B[ifield * nbins_theta: (ifield + 1) * nbins_theta], label="Field "+str(ifield))
    plt.ylim(-ylim_bmode, ylim_bmode)
    plt.title(submission_filename+" B-mode")
    plt.xlabel("Theta [degrees]")
    plt.ylabel("Ap. Mass Dispersion")
    plt.axhline(ls="--", color="k")
    plt.legend()
    plt.savefig(output_filename)
    return
if __name__ == "__main__":
    import sys
    # Get the input and output filenames from the command line
    if len(sys.argv) != 3:
        print "plot_variable_submission.py"
        print "usage: ./plot_variable_submission.py input_submission output_filename"
        sys.exit(1)
    submission_filename = sys.argv[1]
    output_filename = sys.argv[2]
    plot(submission_filename, output_filename)
 
 | 
	bsd-3-clause | 
| 
	OGGM/oggm | 
	oggm/cli/benchmark.py | 
	2 | 
	8716 | 
	"""Command line arguments to the oggm_benchmark command
Type `$ oggm_benchmark -h` for help
"""
# External modules
import os
import sys
import argparse
import time
import logging
import pandas as pd
import geopandas as gpd
# Locals
import oggm.cfg as cfg
from oggm import utils, workflow, tasks
from oggm.exceptions import InvalidParamsError
def _add_time_to_df(df, index, t):
    df.loc[index, 't'] = t
    m, s = divmod(t, 60)
    h, m = divmod(m, 60)
    df.loc[index, 'H'] = h
    df.loc[index, 'M'] = m
    df.loc[index, 'S'] = s
def run_benchmark(rgi_version=None, rgi_reg=None, border=None,
                  output_folder='', working_dir='', is_test=False,
                  test_rgidf=None, test_intersects_file=None,
                  test_topofile=None):
    """Does the actual job.
    Parameters
    ----------
    rgi_version : str
        the RGI version to use (defaults to cfg.PARAMS)
    rgi_reg : str
        the RGI region to process
    border : int
        the number of pixels at the maps border
    output_folder : str
        path to the output folder (where to put the preprocessed tar files)
    working_dir : str
        path to the OGGM working directory
    is_test : bool
        to test on a couple of glaciers only!
    test_rgidf : shapefile
        for testing purposes only
    test_intersects_file : shapefile
        for testing purposes only
    test_topofile : str
        for testing purposes only
    """
    # Module logger
    log = logging.getLogger(__name__)
    # Params
    params = {}
    # Local paths
    utils.mkdir(working_dir)
    params['working_dir'] = working_dir
    # Initialize OGGM and set up the run parameters
    cfg.initialize(logging_level='WORKFLOW', params=params, future=True)
    # Use multiprocessing?
    cfg.PARAMS['use_multiprocessing'] = True
    # How many grid points around the glacier?
    # Make it large if you expect your glaciers to grow large
    cfg.PARAMS['border'] = border
    # Set to True for operational runs
    cfg.PARAMS['continue_on_error'] = True
    # For statistics
    odf = pd.DataFrame()
    if rgi_version is None:
        rgi_version = cfg.PARAMS['rgi_version']
    base_dir = os.path.join(output_folder)
    # Add a package version file
    utils.mkdir(base_dir)
    opath = os.path.join(base_dir, 'package_versions.txt')
    with open(opath, 'w') as vfile:
        vfile.write(utils.show_versions(logger=log))
    # Read RGI
    start = time.time()
    if test_rgidf is None:
        # Get the RGI file
        rgidf = gpd.read_file(utils.get_rgi_region_file(rgi_reg,
                                                        version=rgi_version))
        # We use intersects
        rgif = utils.get_rgi_intersects_region_file(rgi_reg,
                                                    version=rgi_version)
        cfg.set_intersects_db(rgif)
    else:
        rgidf = test_rgidf
        cfg.set_intersects_db(test_intersects_file)
    if is_test:
        # Just for fun
        rgidf = rgidf.sample(2)
    _add_time_to_df(odf, 'Read RGI', time.time()-start)
    # Sort for more efficient parallel computing
    rgidf = rgidf.sort_values('Area', ascending=False)
    log.workflow('Starting prepro run for RGI reg: {} '
                 'and border: {}'.format(rgi_reg, border))
    log.workflow('Number of glaciers: {}'.format(len(rgidf)))
    # Input
    if test_topofile:
        cfg.PATHS['dem_file'] = test_topofile
    utils.apply_test_ref_tstars()
    # Initialize working directories
    start = time.time()
    gdirs = workflow.init_glacier_directories(rgidf, reset=True, force=True)
    _add_time_to_df(odf, 'init_glacier_directories', time.time()-start)
    # Tasks
    task_list = [
        tasks.define_glacier_region,
        tasks.process_cru_data,
        tasks.glacier_masks,
        tasks.compute_centerlines,
        tasks.initialize_flowlines,
        tasks.compute_downstream_line,
        tasks.compute_downstream_bedshape,
        tasks.catchment_area,
        tasks.catchment_intersections,
        tasks.catchment_width_geom,
        tasks.catchment_width_correction,
        tasks.local_t_star,
        tasks.mu_star_calibration,
        tasks.prepare_for_inversion,
        tasks.mass_conservation_inversion,
        tasks.filter_inversion_output,
        tasks.init_present_time_glacier,
    ]
    for task in task_list:
        start = time.time()
        workflow.execute_entity_task(task, gdirs)
        _add_time_to_df(odf, task.__name__, time.time()-start)
    # Runs
    start = time.time()
    workflow.execute_entity_task(tasks.run_random_climate, gdirs,
                                 nyears=250, bias=0, seed=0,
                                 output_filesuffix='_tstar')
    _add_time_to_df(odf, 'run_random_climate_tstar_250', time.time()-start)
    start = time.time()
    workflow.execute_entity_task(tasks.run_random_climate, gdirs,
                                 nyears=250, y0=1995, seed=0,
                                 output_filesuffix='_commit')
    _add_time_to_df(odf, 'run_random_climate_commit_250', time.time()-start)
    # Compile results
    start = time.time()
    utils.compile_glacier_statistics(gdirs)
    _add_time_to_df(odf, 'compile_glacier_statistics', time.time()-start)
    start = time.time()
    utils.compile_climate_statistics(gdirs,
                                     add_climate_period=[1920, 1960, 2000])
    _add_time_to_df(odf, 'compile_climate_statistics', time.time()-start)
    start = time.time()
    utils.compile_run_output(gdirs, input_filesuffix='_tstar')
    _add_time_to_df(odf, 'compile_run_output_tstar', time.time()-start)
    start = time.time()
    utils.compile_run_output(gdirs, input_filesuffix='_commit')
    _add_time_to_df(odf, 'compile_run_output_commit', time.time()-start)
    # Log
    opath = os.path.join(base_dir, 'benchmarks_b{:03d}.csv'.format(border))
    odf.index.name = 'Task'
    odf.to_csv(opath)
    log.workflow('OGGM benchmarks is done!')
def parse_args(args):
    """Check input arguments and env variables"""
    # CLI args
    description = ('Run an OGGM benchmark on a selected RGI Region. '
                   'This writes a benchmark_{border}.txt file where '
                   'the results are summarized')
    parser = argparse.ArgumentParser(description=description)
    parser.add_argument('--map-border', type=int,
                        help='the size of the map border. Is required if '
                             '$OGGM_MAP_BORDER is not set.')
    parser.add_argument('--rgi-reg', type=str,
                        help='the rgi region to process. Is required if '
                             '$OGGM_RGI_REG is not set.')
    parser.add_argument('--rgi-version', type=str,
                        help='the RGI version to use. Defaults to the OGGM '
                             'default.')
    parser.add_argument('--working-dir', type=str,
                        help='path to the directory where to write the '
                             'output. Defaults to current directory or '
                             '$OGGM_WORKDIR.')
    parser.add_argument('--output', type=str,
                        help='path to the directory where to write the '
                             'output. Defaults to current directory or'
                             '$OGGM_OUTDIR.')
    parser.add_argument('--test', nargs='?', const=True, default=False,
                        help='if you want to do a test on a couple of '
                             'glaciers first.')
    args = parser.parse_args(args)
    # Check input
    rgi_reg = args.rgi_reg
    if not rgi_reg:
        rgi_reg = os.environ.get('OGGM_RGI_REG', None)
        if rgi_reg is None:
            raise InvalidParamsError('--rgi-reg is required!')
    rgi_reg = '{:02}'.format(int(rgi_reg))
    rgi_version = args.rgi_version
    border = args.map_border
    if not border:
        border = os.environ.get('OGGM_MAP_BORDER', None)
        if border is None:
            raise InvalidParamsError('--map-border is required!')
    working_dir = args.working_dir
    if not working_dir:
        working_dir = os.environ.get('OGGM_WORKDIR', '')
    output_folder = args.output
    if not output_folder:
        output_folder = os.environ.get('OGGM_OUTDIR', '')
    border = int(border)
    output_folder = os.path.abspath(output_folder)
    working_dir = os.path.abspath(working_dir)
    # All good
    return dict(rgi_version=rgi_version, rgi_reg=rgi_reg,
                border=border, output_folder=output_folder,
                working_dir=working_dir, is_test=args.test)
def main():
    """Script entry point"""
    run_benchmark(**parse_args(sys.argv[1:]))
 | 
	bsd-3-clause | 
| 
	skrzym/monday-morning-quarterback | 
	Research/report.py | 
	1 | 
	13031 | 
	from matplotlib import pyplot as plt
import matplotlib.ticker as plticker
import seaborn as sns
import pandas as pd
import numpy as np
import math
import warnings
from collections import Counter
import nfldatatools as nfltools
rs_pbp = nfltools.gather_data(playoffs=False)
po_pbp = nfltools.gather_data(playoffs=True)
sns.set_style("whitegrid")
#Set general plot properties
sns.set_style("white", {"axes.grid": True})
sns.set_context({"figure.figsize": (10, 7)})
################################################
# Figure 1 - HeatMap
def fig1():
    filters=[
        ['Season', 2009, '>=']
    ]
    yard_grouping = 10
    fig,(ax1, ax2) = plt.subplots(1,2,figsize=(15,10))
    nfltools.plotPassingHeatMap(rs_pbp, filters=filters, ax=ax1, yard_grouping=yard_grouping)
    nfltools.plotPassingHeatMap(po_pbp, filters=filters, ax=ax2, yard_grouping=yard_grouping)
    return fig
figure_1 = fig1()
###############################################################
# Figure 1 - HeatMap
def match(playtype):
    valid_play_types = [
        'Field Goal',
        'Pass',
        'Run',
        'QB Kneel',
        'Punt',
        'Extra Point',
        'Sack',
        'Spike',
        'Timeout'
    ]
    return playtype in valid_play_types
def condense_pbp_data(df):
    new_df = df[['qtr', 'down', 'TimeUnder','TimeSecs', 'yrdline100', 'ScoreDiff', 'PlayType','Season']]
    new_df = new_df[new_df.PlayType.map(match)]
    new_df = new_df[new_df['down'].isnull()==False]
    return new_df
playoffs = condense_pbp_data(po_pbp)
regular = condense_pbp_data(rs_pbp)
def makeDF(season=2009):
    rdf = regular#[regular.Season==season]
    rdf = rdf.groupby('PlayType').agg({'qtr':len}).reset_index()
    rdf.columns = ['PlayType', 'Count']
    rdf['Percent Total'] = rdf.Count/rdf.Count.sum()*100
    rdf['ID'] = 'Regular'
    pdf = playoffs[playoffs.Season==season]
    pdf = pdf.groupby('PlayType').agg({'qtr':len}).reset_index()
    pdf.columns = ['PlayType', 'Count']
    pdf['Percent Total'] = pdf.Count/pdf.Count.sum()*100
    pdf['ID'] = 'Playoffs'
    x = rdf.append(pdf, ignore_index=True)
    fig, ax1 = plt.subplots(1,1,figsize=(12,10))
    sns.barplot(ax=ax1, data=x, y='PlayType', x='Percent Total',hue='ID', order=['Pass', 'Run', 'Punt', 'Field Goal', 'QB Kneel'])
    ax1.set_xlim(0,60)
    return fig
figure_2 = makeDF()
###############################################################
# Figure 1 - HeatMap
def fig3():
    sns.set_style('whitegrid')
    sns.set_palette(['blue', 'green','red'])
    fig, axes = plt.subplots(2, 1, figsize=(15,15))
    shade = True
    bw = '2'
    sns.kdeplot(ax=axes[0],data=rs_pbp[rs_pbp.PlayType == 'Pass'].ScoreDiff.dropna(),label='Pass',shade=shade,bw=bw)
    sns.kdeplot(ax=axes[0],data=rs_pbp[rs_pbp.PlayType == 'Run'].ScoreDiff.dropna(),label='Run',shade=shade,bw=bw)
    sns.kdeplot(ax=axes[0],data=rs_pbp[rs_pbp.PlayType == 'Extra Point'].ScoreDiff.dropna(),label='Extra Point',shade=shade,bw=bw)
    axes[0].set_xlim(-40,40)
    axes[0].set_ylim(0,0.09)
    sns.kdeplot(ax=axes[1],data=po_pbp[po_pbp.PlayType == 'Pass'].ScoreDiff.dropna(),label='Pass',shade=shade,bw=bw)
    sns.kdeplot(ax=axes[1],data=po_pbp[po_pbp.PlayType == 'Run'].ScoreDiff.dropna(),label='Run',shade=shade,bw=bw)
    sns.kdeplot(ax=axes[1],data=po_pbp[po_pbp.PlayType == 'Extra Point'].ScoreDiff.dropna(),label='Extra Point',shade=shade,bw=bw)
    axes[1].set_xlim(-40,40)
    axes[1].set_ylim(0,0.09)
    #SMOOTH IT OUT!
    return fig
figure_3 = fig3()
###############################################################
# Figure 1 - HeatMap
def plot_PlayType(df,stat,playtypelist=['Pass','Run','Field Goal','QB Kneel','Punt'],percent_total=False):
    g = df.groupby([stat,'PlayType']).count().reset_index()
    g = g[g.columns[0:3]]
    last_col_name = g.columns[-1]
    g1 = g.groupby([stat, 'PlayType']).agg({last_col_name: 'sum'})
    if percent_total:
        g1 = g1.groupby(level=1).apply(lambda x: 100 * x / float(x.sum()))
    g1 = g1.reset_index()
    g1 = g1[g1.PlayType.apply(lambda x: x in playtypelist)]
    return sns.barplot(x=stat, y=last_col_name, hue="PlayType", data=g1)
def fig4():
    fig = plt.figure(figsize=(16,32))
    ax3 = fig.add_subplot(513)
    ax3 = plot_PlayType(regular,'qtr',['Run','Pass'],False)
    ax4 = fig.add_subplot(514)
    ax4 = plot_PlayType(regular,'yrdline100',['Run','Pass'],False)
    ax4.xaxis.set_ticks(range(4, 99, 5))
    ax4.xaxis.set_ticklabels(range(5,100,5))
    ax4.grid(True,'major','both')
    return fig
figure_4 = fig4()
###############################################################
# Figure 1 - HeatMap
def fig5():
    fig, axes = plt.subplots(2,1,figsize=(14,7))
    sns.kdeplot(ax=axes[0],data=regular[regular.PlayType == 'Pass'].TimeSecs,bw=20,label='Pass')
    sns.kdeplot(ax=axes[0],data=regular[regular.PlayType == 'Run'].TimeSecs,bw=20,label='Run')
    loc = plticker.MultipleLocator(base=120.0) # this locator puts ticks at regular intervals
    axes[0].xaxis.set_major_locator(loc)
    axes[0].set_xlim(0,3600)
    axes[0].set_ylim(0,0.00085)
    axes[0].vlines([x*60 for x in [15,30,45]],0,0.0009,colors='black')
    axes[0].grid(True,'major','y')
    axes[0].grid(False,'major','x')
    sns.kdeplot(ax=axes[1],data=playoffs[playoffs.PlayType == 'Pass'].TimeSecs,bw=20,label='Pass')
    sns.kdeplot(ax=axes[1],data=playoffs[playoffs.PlayType == 'Run'].TimeSecs,bw=20,label='Run')
    loc = plticker.MultipleLocator(base=120.0) # this locator puts ticks at regular intervals
    axes[1].xaxis.set_major_locator(loc)
    axes[1].set_xlim(0,3600)
    axes[1].set_ylim(0,0.00085)
    axes[1].vlines([x*60 for x in [15,30,45]],0,0.0009,colors='black')
    axes[1].grid(True,'major','y')
    axes[1].grid(False,'major','x')
    return fig
figure_5 = fig5()
#################################################################
# Figure 1 - HeatMap
def fig6():
    rs_fg = rs_pbp[rs_pbp.PlayType =='Field Goal'].groupby('FieldGoalResult').agg({'Date':len}).reset_index()
    rs_fg.columns=['FieldGoalResult', 'Count']
    rs_fg['Percent Total'] = rs_fg.Count.apply(lambda x: 100 * x / float(rs_fg.Count.sum()))
    po_fg = po_pbp[po_pbp.PlayType =='Field Goal'].groupby('FieldGoalResult').agg({'Date':len}).reset_index()
    po_fg.columns=['FieldGoalResult', 'Count']
    po_fg['Percent Total'] = po_fg.Count.apply(lambda x: 100 * x / float(po_fg.Count.sum()))
    sns.set_palette(['green', 'orange', 'red'])
    fig, axes = plt.subplots(2, 2,sharey=True,figsize=(14,7))
    order = ['Good','Blocked','No Good']
    sns.violinplot(ax=axes[0][0], data=rs_pbp[rs_pbp.PlayType=='Field Goal'], x='FieldGoalDistance', y='FieldGoalResult',order=order, scale='width', bw=0.05)
    sns.violinplot(ax=axes[1][0], data=po_pbp[po_pbp.PlayType=='Field Goal'], x='FieldGoalDistance', y='FieldGoalResult',order=order, scale='width', bw=0.05)
    axes[0][0].set_xlim(0,100)
    axes[1][0].set_xlim(0,100)
    sns.barplot(ax=axes[0][1], data=rs_fg,y='FieldGoalResult', x='Percent Total',order=order)
    sns.barplot(ax=axes[1][1], data=po_fg,y='FieldGoalResult', x='Percent Total',order=order)
    axes[0][1].set_xlim(0,100)
    axes[1][1].set_xlim(0,100)
    axes[0][1].set_xticklabels(['0%','20%','40%','60%','80%','100%'])
    axes[1][1].set_xticklabels(['0%','20%','40%','60%','80%','100%'])
    axes[0][0].set_title('Field Goal Results by Distance')
    axes[0][0].set_xlabel('')
    axes[0][0].set_ylabel('Regular Season')
    axes[0][1].set_title('Field Goal Results Distribution')
    axes[0][1].set_xlabel('')
    axes[0][1].set_ylabel('')
    axes[1][0].set_ylabel('Playoffs')
    axes[1][0].set_xlabel('Field Goal Distance (yds)')
    axes[1][0].figure
    axes[1][1].set_ylabel('')
    axes[1][1].set_xlabel('Percent Total')
    return fig
figure_6 = fig6()
#####################################################################
# Figure 1 - HeatMap
teams = [['ARI', 'Arizona', 'Cardinals', 'Arizona Cardinals'],
 ['ATL', 'Atlanta', 'Falcons', 'Atlanta Falcons'],
 ['BAL', 'Baltimore', 'Ravens', 'Baltimore Ravens'],
 ['BUF', 'Buffalo', 'Bills', 'Buffalo Bills'],
 ['CAR', 'Carolina', 'Panthers', 'Carolina Panthers'],
 ['CHI', 'Chicago', 'Bears', 'Chicago Bears'],
 ['CIN', 'Cincinnati', 'Bengals', 'Cincinnati Bengals'],
 ['CLE', 'Cleveland', 'Browns', 'Cleveland Browns'],
 ['DAL', 'Dallas', 'Cowboys', 'Dallas Cowboys'],
 ['DEN', 'Denver', 'Broncos', 'Denver Broncos'],
 ['DET', 'Detroit', 'Lions', 'Detroit Lions'],
 ['GB', 'Green Bay', 'Packers', 'Green Bay Packers', 'G.B.', 'GNB'],
 ['HOU', 'Houston', 'Texans', 'Houston Texans'],
 ['IND', 'Indianapolis', 'Colts', 'Indianapolis Colts'],
 ['JAC', 'Jacksonville', 'Jaguars', 'Jacksonville Jaguars', 'JAX'],
 ['KC', 'Kansas City', 'Chiefs', 'Kansas City Chiefs', 'K.C.', 'KAN'],
 ['LA', 'Los Angeles', 'Rams', 'Los Angeles Rams', 'L.A.'],
 ['MIA', 'Miami', 'Dolphins', 'Miami Dolphins'],
 ['MIN', 'Minnesota', 'Vikings', 'Minnesota Vikings'],
 ['NE', 'New England', 'Patriots', 'New England Patriots', 'N.E.', 'NWE'],
 ['NO', 'New Orleans', 'Saints', 'New Orleans Saints', 'N.O.', 'NOR'],
 ['NYG', 'Giants', 'New York Giants', 'N.Y.G.'],
 ['NYJ', 'Jets', 'New York Jets', 'N.Y.J.'],
 ['OAK', 'Oakland', 'Raiders', 'Oakland Raiders'],
 ['PHI', 'Philadelphia', 'Eagles', 'Philadelphia Eagles'],
 ['PIT', 'Pittsburgh', 'Steelers', 'Pittsburgh Steelers'],
 ['SD', 'San Diego', 'Chargers', 'San Diego Chargers', 'S.D.', 'SDG'],
 ['SEA', 'Seattle', 'Seahawks', 'Seattle Seahawks'],
 ['SF', 'San Francisco', '49ers', 'San Francisco 49ers', 'S.F.', 'SFO'],
 ['STL', 'St. Louis', 'Rams', 'St. Louis Rams', 'S.T.L.'],
 ['TB', 'Tampa Bay', 'Buccaneers', 'Tampa Bay Buccaneers', 'T.B.', 'TAM'],
 ['TEN', 'Tennessee', 'Titans', 'Tennessee Titans'],
 ['WAS', 'Washington', 'Redskins', 'Washington Redskins', 'WSH']]
teams_dict = {x[3]:x[0] for x in teams}
 # Jacksonville Data Fix
rs_pbp.posteam = rs_pbp.posteam.replace('JAX', 'JAC')
rs_pbp.HomeTeam = rs_pbp.HomeTeam.replace('JAX', 'JAC')
rs_pbp.AwayTeam = rs_pbp.AwayTeam.replace('JAX', 'JAC')
pass_rush_attempts_by_team = rs_pbp.groupby(['posteam','Season']).agg(sum)[['PassAttempt','RushAttempt']]
pass_rush_attempts_by_team['PassRushRatio'] = pass_rush_attempts_by_team.apply(lambda x: (x.PassAttempt * 1.0) / x.RushAttempt, axis=1)
sns.set_palette('muted')
plot_df = pass_rush_attempts_by_team
plot_teams = teams_dict
def plotPassRushByTeam(team_focus_1, team_focus_2):
    fig,ax = plt.subplots(1,1,figsize=(15,8))
    for team in plot_teams:
        if (plot_teams[team] != team_focus_1) or (plot_teams[team] != team_focus_1):
            plt.plot(plot_df.loc[plot_teams[team]]['PassRushRatio'], color='0.91')
    plt.plot(plot_df.loc[team_focus_1]['PassRushRatio'], color='Blue', axes=ax)
    plt.plot(plot_df.loc[team_focus_2]['PassRushRatio'], color='Red', axes=ax)
    return fig
def fig7():
    sns.set_style('white')
    return plotPassRushByTeam(team_focus_1 = 'NYG', team_focus_2 = 'NYJ')
figure_7 = fig7()
##########################################################
# Figure 1 - HeatMap
playoff_teams = {year:po_pbp.mask('Season',year).posteam.dropna().unique().tolist() for year in np.arange(2009,2017,1)}
def madeit(row):
    team, season = row.name
    return int(team in playoff_teams[season])
next_df = pass_rush_attempts_by_team.copy()
next_df['PO'] = next_df.apply(madeit, axis=1)
next_df.reset_index().groupby(['posteam','PO']).agg({'PassRushRatio':np.mean}).reset_index().pivot('posteam','PO','PassRushRatio')
def fig8():
    sns.set_context('talk')
    #sns.heatmap(data = pass_rush_attempts_by_team.reset_index().pivot('posteam','PO','PassRushRatio'),
    #            vmin=0,vmax=1,square=False,cmap='rainbow', annot=False)
    fig,ax = plt.subplots(1,1)
    new_df = next_df.reset_index().groupby(['posteam','PO']).agg({'PassRushRatio':np.mean}).reset_index().pivot('posteam','PO','PassRushRatio')
    sns.heatmap(data = new_df, square=False, annot=False, cmap='Greens')
    return fig
figure_8 = fig8()
############################################################
def fig9():
    fig,ax = plt.subplots(1,1)
    pass_rush_attempts_by_team.loc['DEN']['PassRushRatio'].plot()
    return fig
figure_9 = fig9()
#############################################################
def fig10():
    fig, ax = plt.subplots(1,1,figsize=(3,5))
    sns.boxplot(data=next_df.reset_index(),x='PO', y='PassRushRatio', ax=ax)
    return fig
figure_10 = fig10()
#############################################################
avg_prr_by_team = pass_rush_attempts_by_team.reset_index().groupby('posteam').agg({'PassRushRatio':np.mean}).sort_values('PassRushRatio')
avg_prr_by_season = pass_rush_attempts_by_team.reset_index().groupby('Season').agg({'PassRushRatio':np.mean}).sort_values('PassRushRatio')
def fig11():
    with sns.axes_style('ticks'):
        fig,ax = plt.subplots(1,1,figsize=(20,7))
        sns.boxplot(data=next_df.reset_index(),x='posteam', y='PassRushRatio', ax=ax, order=avg_prr_by_team.index.tolist(),hue='PO')
    return fig
figure_11 = fig11()
 | 
	mit | 
| 
	amancevice/stanhope | 
	stanhope/stanhope/tables.py | 
	1 | 
	9826 | 
	"""
StanhopeFramers Tables
"""
import io
import subprocess
import pandas
from stanhope import utils
pandas.set_option('display.max_rows', 999)
pandas.set_option('display.width', 999)
pandas.set_option('display.max_colwidth', 999)
class Table(object):
    def __init__(self, *tables):
        self.tables = tables or (type(self).__name__,)
        self.frame = None
    @staticmethod
    def read(table):
        cmd = ['mdb-export', '/data/StanhopeFramers.mdb', table]
        out = subprocess.check_output(cmd)
        return io.BytesIO(out)
    def load(self):
        frame = pandas.DataFrame()
        for table in self.tables:
            page = pandas.read_csv(self.read(table), **self.READ_CSV)
            # page['Table'] = table
            frame = frame.append(page)
        self.frame = frame
        return frame
class Customers(Table):
    READ_CSV = {
        'converters': {
            'Customer Number': utils.upper,
            'Credit': utils.boolean,
            'Tax Exempt': utils.boolean,
            'Deceased': utils.boolean},
        'parse_dates': [
            'Date',
            'Last Order',
            'Last Update']}
    def accounts(self):
        frame = self.frame.copy()
        # Add Legacy ID
        frame['Legacy ID'] = \
            frame[['Customer Number']].apply(utils.legacy_id, axis=1)
        # Copy legacy record
        frame['Legacy Record'] = frame.drop('Legacy ID', axis=1)\
                                      .apply(utils.legacy_record, axis=1)
        # Drop unused columns
        frame.drop(['Address',
                    'City',
                    'Date',
                    'Deceased',
                    'Email',
                    'Last Order',
                    'Last Update',
                    'State',
                    'Telephone',
                    'Zip'],
                   axis=1,
                   inplace=True)
        # Rename columns
        frame.rename(inplace=True,
                     columns={'Customer Number': 'Legacy Customer Number',
                              'Name': 'Account',
                              'Comment': 'Comments'})
        # Set account name
        frame.loc[:, 'Account'] = \
            frame['Account'].apply(utils.replace_newline)\
                            .combine_first(frame['Legacy Customer Number'])
        frame['Primary Contact'] = frame['Account']
        # Massage fields
        frame.loc[~frame['Category'].isnull(), 'Category'] = \
            frame.loc[~frame['Category'].isnull(), 'Category'].apply(
                utils.account_category)
        frame.loc[~frame['Source'].isnull(), 'Source'] = \
            frame.loc[~frame['Source'].isnull(), 'Source'].apply(utils.source)
        frame.loc[:, 'Comments'] = \
            frame['Comments'].apply(lambda x: utils.replace_newline(x, '\n'))
        # Return
        return frame
    def contacts(self):
        frame = self.frame.copy()
        # Add Legacy ID
        frame['Legacy ID'] = \
            frame[['Customer Number']].apply(utils.legacy_id, axis=1)
        # Drop unused columns
        frame.drop(['Category',
                    'Comment',
                    'Credit',
                    'Date',
                    'Last Order',
                    'Last Update',
                    'Source',
                    'Tax Exempt'],
                   axis=1,
                   inplace=True)
        # Rename columns
        frame.rename(inplace=True,
                     columns={'Customer Number': 'Account Link',
                              'Name': 'Contact',
                              'Deceased': 'Removed'})
        # Massage fields
        frame.loc[:, 'Contact'] = \
            frame['Contact'].apply(utils.replace_newline)\
                            .combine_first(frame['Account Link'])
        frame.loc[:, 'Contact'] = frame['Contact'].apply(utils.replace_newline)
        frame.loc[:, 'Address'] = frame['Address'].apply(utils.replace_newline)
        frame.loc[:, 'City'] = frame['City'].apply(utils.replace_newline)
        frame.loc[:, 'State'] = frame['State'].apply(utils.replace_newline)
        frame.loc[:, 'Zip'] = frame['Zip'].apply(utils.replace_newline)
        frame.loc[:, 'Telephone'] = \
            frame['Telephone'].apply(utils.replace_newline)
        # Return
        return frame
class FrameOrders(Table):
    READ_CSV = {
        'converters': {'CustomerNo': utils.upper, 'OrderNo': utils.upper},
        'parse_dates': ['DateCompleted', 'DueDate', 'OrderDate']}
    def orders(self):
        frame = self.frame.copy()
        # Add Legacy ID
        frame['Legacy ID'] = \
            frame[['CustomerNo', 'OrderNo', 'OrderDate']]\
            .apply(utils.legacy_id, axis=1)
        # Copy legacy record
        frame['Legacy Record'] = frame.drop('Legacy ID', axis=1)\
                                      .apply(utils.legacy_record, axis=1)
        # Set status
        frame.loc[frame['SalesType'] == 'VOID', 'Status'] = 'V'
        # Drop unused columns
        frame.drop(['Artist',
                    'BinNo',
                    'Comments',
                    'DateCompleted',
                    'Fitting',
                    'Frame Height',
                    'Frame Width',
                    'FrameMfg',
                    'FrameNo',
                    'Glazing',
                    'Joining',
                    'Mat',
                    'MatColor',
                    'MatMfg',
                    'Matting',
                    'MattingSize',
                    'ProductionComments',
                    'Qty',
                    'SalesCatgy',
                    'SalesType',
                    'TotalSale'],
                   axis=1,
                   inplace=True)
        # Rename columns
        frame.rename(inplace=True,
                     columns={'OrderNo': 'Order Number',
                              'OrderDate': 'Order Date',
                              'DueDate': 'Due Date',
                              'CustomerNo': 'Account Link',
                              'Status': 'Order Status',
                              'Location': 'Order Location',
                              'SalesPers': 'Salesperson Link',
                              'Delivery': 'Delivery Location',
                              'Cust-Client': 'Client'})
        # Massage fields
        frame.loc[:, 'Delivery Location'] = \
            frame['Delivery Location'].apply(utils.delivery_location)
        frame.loc[:, 'Discount'] = frame['Discount'].apply(utils.discount)
        frame.loc[:, 'Order Location'] = \
            frame['Order Location'].apply(utils.order_location)
        frame.loc[:, 'Order Status'] = \
            frame['Order Status'].apply(utils.status)
        frame.loc[:, 'Salesperson Link'] = \
            frame['Salesperson Link'].apply(utils.salesperson)
        frame.loc[:, 'Delivery Location'] = \
            frame['Delivery Location'].combine_first(frame['Order Location'])
        # Return
        return frame
    def treatments(self):
        frame = self.frame.copy()
        # Add Legacy ID
        frame['Legacy ID'] = \
            frame[['CustomerNo', 'OrderNo', 'OrderDate']]\
            .apply(utils.legacy_id, axis=1)
        # Add Legacy Order ID
        frame['Order Link'] = frame['Legacy ID']
        # Drop unused columns
        frame.drop(['Cust-Client',
                    'CustomerNo',
                    'DateCompleted',
                    'Delivery',
                    'Discount',
                    'DueDate',
                    'Fitting',
                    'Location',
                    'Matting',
                    'OrderDate',
                    'OrderNo',
                    'SalesCatgy',
                    'SalesPers',
                    'Status'],
                   axis=1,
                   inplace=True)
        # Rename fields
        frame.rename(inplace=True,
                     columns={'BinNo': 'Bin Number',
                              'Comments': 'Description',
                              'FrameNo': 'Frame Style',
                              'FrameMfg': 'Frame Manufacturer',
                              'Joining': 'Frame Join',
                              'Mat': 'Matting / Mounting',
                              'MatColor': 'Mat Color',
                              'MatMfg': 'Mat Manufacturer',
                              'MattingSize': 'Mat Size',
                              'ProductionComments': 'Production Comments',
                              'Qty': 'Quantity',
                              'SalesType': 'Treatment',
                              'TotalSale': 'Price'})
        # Massage fields
        frame.loc[:, 'Frame Join'] = frame['Frame Join'].apply(utils.join)
        frame.loc[:, 'Mat Manufacturer'] = \
            frame['Mat Manufacturer'].apply(utils.matmfg)
        frame.loc[:, 'Frame Manufacturer'] = \
            frame['Frame Manufacturer'].apply(utils.framemfg)
        frame.loc[:, 'Matting / Mounting'] = \
            frame['Matting / Mounting'].apply(utils.mat)
        frame.loc[:, 'Glazing'] = \
            frame['Glazing'].apply(utils.glazing)
        frame.loc[:, 'Treatment'] = frame['Treatment'].apply(utils.sales_type)
        # Add dimensions
        frame['Frame Width Inches'] = \
            frame['Frame Width'].apply(utils.inches)
        frame['Frame Width Fraction'] = \
            frame['Frame Width'].apply(utils.fraction)
        frame['Frame Height Inches'] = \
            frame['Frame Height'].apply(utils.inches)
        frame['Frame Height Fraction'] = \
            frame['Frame Height'].apply(utils.fraction)
        frame.drop(['Frame Width', 'Frame Height'], axis=1, inplace=True)
        # Return
        return frame
 | 
	mit | 
| 
	mikeireland/pynrm | 
	go.py | 
	1 | 
	3044 | 
	# -*- coding: utf-8 -*-
"""
Created on Fri May  2 13:49:11 2014
@author: mireland
A script for testing...  Change this to try out your own analysis.
"""
import astropy.io.fits as pyfits
import numpy as np
import matplotlib.pyplot as plt
from azimuthalAverage import *
# This includes an AO Instrument called "aoinst"
import pypoise
import nirc2
import glob
import pdb
#Create a pypoise instance with a nirc2 AO instrument
pp = pypoise.PYPOISE(nirc2.NIRC2())
plt.ion()
#Reduction Directory - Lp full pupil
pp.aoinst.rdir = '/Users/mireland/tel/nirc2/redux/generic2015/'
pp.aoinst.cdir = '/Users/mireland/tel/nirc2/redux/TauPAH15/'
#Data directory
pp.aoinst.ddir =  '/Users/mireland/data/nirc2/151128/'
pp.aoinst.read_summary_csv()
if(False): 
  pp.process_block(fstart='n1251.fits',fend='n1293.fits', dither=True)
if(False): 
  pp.process_block(fstart='n1493.fits',fend='n1517.fits', dither=True)
targname = 'AB Aur'
targname = 'SU Aur'
targname = 'RY Tau'
if(True):
 #The argument "target_file" is just there to determine which object is the target.
 summary_files = pp.poise_process(target=targname, use_powerspect=False)
 print(summary_files)
 
if(True): 
# summary_files = glob.glob('*LkCa*poise_cube*.fits')
 implane_file = pp.aoinst.cdir + targname + '_implane.fits'
 pxscale = 5.0
#pdb.set_trace()
if (True):
 kp_implane = pp.kp_to_implane(summary_files=summary_files, 
 	out_file=implane_file, sz=141, pxscale=pxscale, use_powerspect=False)
if (True):
 #Automatic from here...
 pgrid, crat, crat_sig, chi2, best_rchi2 = pp.implane_fit_binary(implane_file, maxrad=250)
 print "Grid Fit: ", pgrid
 pgrid = np.array(pgrid)
 if (pgrid[2] > 0.5):
    print "Contrast too high to use kerphase for fitting (i.e. near-equal binary)."
 else:
    p,errs,cov = pp.kp_binary_fit(summary_files,pgrid)
    fitfile = open(targname + '_binaryfit.txt','w')
    fitfile.write('Separation (mas) & Position angle (degs) & Contrast \\\\\n')
    fitfile.write('{0:5.2f} $\pm$ {1:5.2f} & {2:5.2f} $\pm$ {3:5.2f} & {4:6.4f} $\pm$ {5:6.4f} \\\\ \n'.format(\
            p[0],errs[0], p[1],errs[1], p[2],errs[2]))
    fitfile.write('Contrast (mags) & Separation (mas) & Position angle (degs) \\\\\n')
    fit_crat = -2.5*np.log10(p[2])
    fit_crat_sig = 2.5/np.log(10)*errs[2]/p[2]
    fitfile.write('{0:5.2f} $\pm$ {1:5.2f} & {2:5.2f} $\pm$ {3:5.2f} & {4:5.3f} $\pm$ {5:5.3f} \\\\ \n'.format(\
            fit_crat, fit_crat_sig, p[0],errs[0], p[1],errs[1] ))
    fitfile.close()
 a = azimuthalAverage(crat_sig*np.sqrt(best_rchi2), returnradii=True,binsize=1)
 sep_null = a[0]*pxscale
 contrast_null = -2.5*np.log10(5*a[1])
 plt.clf()
 plt.plot(sep_null, contrast_null)
 plt.title(targname)
 plt.xlabel("Separation (milli-arcsec)")
 plt.ylabel("5-sigma contrast (mags)")
 sep_out = np.arange(20,301,10)
 contrast_out = np.interp(sep_out, sep_null, contrast_null)
 for i in range(len(sep_out)):
  print '{0:4d} {1:5.1f}'.format(int(sep_out[i]),contrast_out[i])
 plt.axis((0,300,2,7))
 plt.savefig(pp.aoinst.cdir + targname + '_contrast_curve.png')
 | 
	mit | 
| 
	JeanKossaifi/scikit-learn | 
	sklearn/neighbors/nearest_centroid.py | 
	199 | 
	7249 | 
	# -*- coding: utf-8 -*-
"""
Nearest Centroid Classification
"""
# Author: Robert Layton <[email protected]>
#         Olivier Grisel <[email protected]>
#
# License: BSD 3 clause
import warnings
import numpy as np
from scipy import sparse as sp
from ..base import BaseEstimator, ClassifierMixin
from ..metrics.pairwise import pairwise_distances
from ..preprocessing import LabelEncoder
from ..utils.validation import check_array, check_X_y, check_is_fitted
from ..utils.sparsefuncs import csc_median_axis_0
class NearestCentroid(BaseEstimator, ClassifierMixin):
    """Nearest centroid classifier.
    Each class is represented by its centroid, with test samples classified to
    the class with the nearest centroid.
    Read more in the :ref:`User Guide <nearest_centroid_classifier>`.
    Parameters
    ----------
    metric: string, or callable
        The metric to use when calculating distance between instances in a
        feature array. If metric is a string or callable, it must be one of
        the options allowed by metrics.pairwise.pairwise_distances for its
        metric parameter.
        The centroids for the samples corresponding to each class is the point
        from which the sum of the distances (according to the metric) of all
        samples that belong to that particular class are minimized.
        If the "manhattan" metric is provided, this centroid is the median and
        for all other metrics, the centroid is now set to be the mean.
    shrink_threshold : float, optional (default = None)
        Threshold for shrinking centroids to remove features.
    Attributes
    ----------
    centroids_ : array-like, shape = [n_classes, n_features]
        Centroid of each class
    Examples
    --------
    >>> from sklearn.neighbors.nearest_centroid import NearestCentroid
    >>> import numpy as np
    >>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
    >>> y = np.array([1, 1, 1, 2, 2, 2])
    >>> clf = NearestCentroid()
    >>> clf.fit(X, y)
    NearestCentroid(metric='euclidean', shrink_threshold=None)
    >>> print(clf.predict([[-0.8, -1]]))
    [1]
    See also
    --------
    sklearn.neighbors.KNeighborsClassifier: nearest neighbors classifier
    Notes
    -----
    When used for text classification with tf-idf vectors, this classifier is
    also known as the Rocchio classifier.
    References
    ----------
    Tibshirani, R., Hastie, T., Narasimhan, B., & Chu, G. (2002). Diagnosis of
    multiple cancer types by shrunken centroids of gene expression. Proceedings
    of the National Academy of Sciences of the United States of America,
    99(10), 6567-6572. The National Academy of Sciences.
    """
    def __init__(self, metric='euclidean', shrink_threshold=None):
        self.metric = metric
        self.shrink_threshold = shrink_threshold
    def fit(self, X, y):
        """
        Fit the NearestCentroid model according to the given training data.
        Parameters
        ----------
        X : {array-like, sparse matrix}, shape = [n_samples, n_features]
            Training vector, where n_samples in the number of samples and
            n_features is the number of features.
            Note that centroid shrinking cannot be used with sparse matrices.
        y : array, shape = [n_samples]
            Target values (integers)
        """
        # If X is sparse and the metric is "manhattan", store it in a csc
        # format is easier to calculate the median.
        if self.metric == 'manhattan':
            X, y = check_X_y(X, y, ['csc'])
        else:
            X, y = check_X_y(X, y, ['csr', 'csc'])
        is_X_sparse = sp.issparse(X)
        if is_X_sparse and self.shrink_threshold:
            raise ValueError("threshold shrinking not supported"
                             " for sparse input")
        n_samples, n_features = X.shape
        le = LabelEncoder()
        y_ind = le.fit_transform(y)
        self.classes_ = classes = le.classes_
        n_classes = classes.size
        if n_classes < 2:
            raise ValueError('y has less than 2 classes')
        # Mask mapping each class to it's members.
        self.centroids_ = np.empty((n_classes, n_features), dtype=np.float64)
        # Number of clusters in each class.
        nk = np.zeros(n_classes)
        for cur_class in range(n_classes):
            center_mask = y_ind == cur_class
            nk[cur_class] = np.sum(center_mask)
            if is_X_sparse:
                center_mask = np.where(center_mask)[0]
            # XXX: Update other averaging methods according to the metrics.
            if self.metric == "manhattan":
                # NumPy does not calculate median of sparse matrices.
                if not is_X_sparse:
                    self.centroids_[cur_class] = np.median(X[center_mask], axis=0)
                else:
                    self.centroids_[cur_class] = csc_median_axis_0(X[center_mask])
            else:
                if self.metric != 'euclidean':
                    warnings.warn("Averaging for metrics other than "
                                  "euclidean and manhattan not supported. "
                                  "The average is set to be the mean."
                                  )
                self.centroids_[cur_class] = X[center_mask].mean(axis=0)
        if self.shrink_threshold:
            dataset_centroid_ = np.mean(X, axis=0)
            # m parameter for determining deviation
            m = np.sqrt((1. / nk) + (1. / n_samples))
            # Calculate deviation using the standard deviation of centroids.
            variance = (X - self.centroids_[y_ind]) ** 2
            variance = variance.sum(axis=0)
            s = np.sqrt(variance / (n_samples - n_classes))
            s += np.median(s)  # To deter outliers from affecting the results.
            mm = m.reshape(len(m), 1)  # Reshape to allow broadcasting.
            ms = mm * s
            deviation = ((self.centroids_ - dataset_centroid_) / ms)
            # Soft thresholding: if the deviation crosses 0 during shrinking,
            # it becomes zero.
            signs = np.sign(deviation)
            deviation = (np.abs(deviation) - self.shrink_threshold)
            deviation[deviation < 0] = 0
            deviation *= signs
            # Now adjust the centroids using the deviation
            msd = ms * deviation
            self.centroids_ = dataset_centroid_[np.newaxis, :] + msd
        return self
    def predict(self, X):
        """Perform classification on an array of test vectors X.
        The predicted class C for each sample in X is returned.
        Parameters
        ----------
        X : array-like, shape = [n_samples, n_features]
        Returns
        -------
        C : array, shape = [n_samples]
        Notes
        -----
        If the metric constructor parameter is "precomputed", X is assumed to
        be the distance matrix between the data to be predicted and
        ``self.centroids_``.
        """
        check_is_fitted(self, 'centroids_')
        X = check_array(X, accept_sparse='csr')
        return self.classes_[pairwise_distances(
            X, self.centroids_, metric=self.metric).argmin(axis=1)]
 | 
	bsd-3-clause | 
| 
	boomsbloom/dtm-fmri | 
	DTM/for_gensim/lib/python2.7/site-packages/mpl_toolkits/axisartist/axis_artist.py | 
	7 | 
	52735 | 
	"""
axis_artist.py module provides axis-related artists. They are
 * axis line
 * tick lines
 * tick labels
 * axis label
 * grid lines
The main artist class is a AxisArtist and a GridlinesCollection. The
GridlinesCollection is responsible for drawing grid lines and the
AxisArtist is responsible for all other artists. The AxisArtist class
has attributes that are associated with each type of artists.
 * line : axis line
 * major_ticks : major tick lines
 * major_ticklabels : major tick labels
 * minor_ticks : minor tick lines
 * minor_ticklabels : minor tick labels
 * label : axis label
Typically, the AxisArtist associated with a axes will be accessed with
the *axis* dictionary of the axes, i.e., the AxisArtist for the bottom
axis is
  ax.axis["bottom"]
where *ax* is an instance of axes (mpl_toolkits.axislines.Axes).  Thus,
ax.axis["bottom"].line is an artist associated with the axis line, and
ax.axis["bottom"].major_ticks is an artist associated with the major tick
lines.
You can change the colors, fonts, line widths, etc. of these artists
by calling suitable set method. For example, to change the color of the major
ticks of the bottom axis to red,
  ax.axis["bottom"].major_ticks.set_color("r")
However, things like the locations of ticks, and their ticklabels need
to be changed from the side of the grid_helper.
axis_direction
--------------
AxisArtist, AxisLabel, TickLabels have *axis_direction* attribute,
which adjusts the location, angle, etc.,. The *axis_direction* must be
one of [left, right, bottom, top] and they follow the matplotlib
convention for the rectangle axis.
For example, for the *bottom* axis (the left and right is relative to
the direction of the increasing coordinate),
 * ticklabels and axislabel are on the right
 * ticklabels and axislabel have text angle of 0
 * ticklabels are baseline, center-aligned
 * axislabel is top, center-aligned
The text angles are actually relative to (90 + angle of the direction
to the ticklabel), which gives 0 for bottom axis.
                        left bottom right top
 ticklabels location    left right  right left
 axislabel location     left right  right left
 ticklabels angle       90    0      -90  180
 axislabel angle        180   0     0     180
 ticklabel va           center baseline center baseline
 axislabel va           center top      center bottom
 ticklabel ha           right  center   right  center
 axislabel ha           right  center   right  center
Ticks are by default direct opposite side of the ticklabels. To make
ticks to the same side of the ticklabels,
  ax.axis["bottom"].major_ticks.set_ticks_out(True)
Following attributes can be customized (use set_xxx method)
 * Ticks : ticksize, tick_out
 * TickLabels : pad
 * AxisLabel : pad
"""
from __future__ import (absolute_import, division, print_function,
                        unicode_literals)
from matplotlib.externals import six
# FIXME :
# * : angles are given in data coordinate - need to convert it to canvas coordinate
import matplotlib.axes as maxes
import matplotlib.artist as martist
import matplotlib.text as mtext
import matplotlib.font_manager as font_manager
from matplotlib.path import Path
from matplotlib.transforms import Affine2D, ScaledTranslation, \
     IdentityTransform, TransformedPath, Bbox
from matplotlib.collections import LineCollection
from matplotlib import rcParams
from matplotlib.artist import allow_rasterization
import warnings
import numpy as np
import matplotlib.lines as mlines
from .axisline_style import AxislineStyle
class BezierPath(mlines.Line2D):
    def __init__(self, path, *kl, **kw):
        mlines.Line2D.__init__(self, [], [], *kl, **kw)
        self._path = path
        self._invalid = False
    def recache(self):
        self._transformed_path = TransformedPath(self._path, self.get_transform())
        self._invalid = False
    def set_path(self, path):
        self._path = path
        self._invalid = True
    def draw(self, renderer):
        if self._invalid:
            self.recache()
        if not self._visible: return
        renderer.open_group('line2d')
        gc = renderer.new_gc()
        self._set_gc_clip(gc)
        gc.set_foreground(self._color)
        gc.set_antialiased(self._antialiased)
        gc.set_linewidth(self._linewidth)
        gc.set_alpha(self._alpha)
        if self.is_dashed():
            cap = self._dashcapstyle
            join = self._dashjoinstyle
        else:
            cap = self._solidcapstyle
            join = self._solidjoinstyle
        gc.set_joinstyle(join)
        gc.set_capstyle(cap)
        funcname = self._lineStyles.get(self._linestyle, '_draw_nothing')
        if funcname != '_draw_nothing':
            tpath, affine = self._transformed_path.get_transformed_path_and_affine()
            lineFunc = getattr(self, funcname)
            lineFunc(renderer, gc, tpath, affine.frozen())
        gc.restore()
        renderer.close_group('line2d')
class UnimplementedException(Exception):
    pass
from matplotlib.artist import Artist
class AttributeCopier(object):
    def __init__(self, ref_artist, klass=Artist):
        self._klass = klass
        self._ref_artist = ref_artist
        super(AttributeCopier, self).__init__()
    def set_ref_artist(self, artist):
        self._ref_artist = artist
    def get_ref_artist(self):
        raise RuntimeError("get_ref_artist must overridden")
    #return self._ref_artist
    def get_attribute_from_ref_artist(self, attr_name, default_value):
        get_attr_method_name = "get_"+attr_name
        c = getattr(self._klass, get_attr_method_name)(self)
        if c == 'auto':
            ref_artist = self.get_ref_artist()
            if ref_artist:
                attr = getattr(ref_artist,
                               get_attr_method_name)()
                return attr
            else:
                return default_value
        return c
from matplotlib.lines import Line2D
class Ticks(Line2D, AttributeCopier):
    """
    Ticks are derived from Line2D, and note that ticks themselves
    are markers. Thus, you should use set_mec, set_mew, etc.
    To change the tick size (length), you need to use
    set_ticksize. To change the direction of the ticks (ticks are
    in opposite direction of ticklabels by default), use
    set_tick_out(False).
    """
    def __init__(self, ticksize, tick_out=False, **kwargs):
        self._ticksize = ticksize
        self.locs_angles_labels = []
        self.set_tick_out(tick_out)
        self._axis = kwargs.pop("axis", None)
        if self._axis is not None:
            if "color" not in kwargs:
                kwargs["color"] = "auto"
            if ("mew" not in kwargs) and ("markeredgewidth" not in kwargs):
                kwargs["markeredgewidth"] = "auto"
        Line2D.__init__(self, [0.], [0.], **kwargs)
        AttributeCopier.__init__(self, self._axis, klass=Line2D)
        self.set_snap(True)
    def get_ref_artist(self):
        #return self._ref_artist.get_ticklines()[0]
        return self._ref_artist.majorTicks[0].tick1line
    def get_color(self):
        return self.get_attribute_from_ref_artist("color", "k")
    def get_markeredgecolor(self):
        if self._markeredgecolor == 'auto':
            return self.get_color()
        else:
            return self._markeredgecolor
    def get_markeredgewidth(self):
        return self.get_attribute_from_ref_artist("markeredgewidth", .5)
    def set_tick_out(self, b):
        """
        set True if tick need to be rotated by 180 degree.
        """
        self._tick_out = b
    def get_tick_out(self):
        """
        Return True if the tick will be rotated by 180 degree.
        """
        return self._tick_out
    def set_ticksize(self, ticksize):
        """
        set length of the ticks in points.
        """
        self._ticksize = ticksize
    def get_ticksize(self):
        """
        Return length of the ticks in points.
        """
        return self._ticksize
    def set_locs_angles(self, locs_angles):
        self.locs_angles = locs_angles
    def _update(self, renderer):
        pass
    _tickvert_path = Path([[0., 0.], [1., 0.]])
    def draw(self, renderer):
        if not self.get_visible():
            return
        self._update(renderer) # update the tick
        size = self._ticksize
        path_trans = self.get_transform()
        # set gc : copied from lines.py
#         gc = renderer.new_gc()
#         self._set_gc_clip(gc)
#         gc.set_foreground(self.get_color())
#         gc.set_antialiased(self._antialiased)
#         gc.set_linewidth(self._linewidth)
#         gc.set_alpha(self._alpha)
#         if self.is_dashed():
#             cap = self._dashcapstyle
#             join = self._dashjoinstyle
#         else:
#             cap = self._solidcapstyle
#             join = self._solidjoinstyle
#         gc.set_joinstyle(join)
#         gc.set_capstyle(cap)
#         gc.set_snap(self.get_snap())
        gc = renderer.new_gc()
        self._set_gc_clip(gc)
        gc.set_foreground(self.get_markeredgecolor())
        gc.set_linewidth(self.get_markeredgewidth())
        gc.set_alpha(self._alpha)
        offset = renderer.points_to_pixels(size)
        marker_scale = Affine2D().scale(offset, offset)
        if self.get_tick_out():
            add_angle = 180
        else:
            add_angle = 0
        marker_rotation = Affine2D()
        marker_transform = marker_scale + marker_rotation
        for loc, angle in self.locs_angles:
            marker_rotation.rotate_deg(angle+add_angle)
            locs = path_trans.transform_non_affine(np.array([loc, loc]))
            renderer.draw_markers(gc, self._tickvert_path, marker_transform,
                                  Path(locs), path_trans.get_affine())
            marker_rotation.clear()
        gc.restore()
def test_ticks():
    import matplotlib.pyplot as plt
    fig = plt.figure(1)
    fig.clf()
    ax = fig.add_subplot(111)
    ax.xaxis.set_visible(False)
    ax.yaxis.set_visible(False)
    ticks = Ticks(ticksize=10, axis=ax.xaxis)
    ax.add_artist(ticks)
    locs_angles = [((0.2, 0.), 90),
                          ((0.4, 0.), 120)]
    ticks.set_locs_angles(locs_angles)
    plt.draw()
class LabelBase(mtext.Text):
    """
    A base class for AxisLabel and TickLabels. The position and angle
    of the text are calculated by to offset_ref_angle,
    text_ref_angle, and offset_radius attributes.
    """
    def __init__(self, *kl, **kwargs):
        self.locs_angles_labels = []
        self._ref_angle = 0
        self._offset_radius = 0.
        super(LabelBase, self).__init__(*kl,
                                        **kwargs)
        self.set_rotation_mode("anchor")
        self._text_follow_ref_angle = True
        #self._offset_ref_angle = 0
    def _set_ref_angle(self, a):
        self._ref_angle = a
    def _get_ref_angle(self):
        return self._ref_angle
    def _get_text_ref_angle(self):
        if self._text_follow_ref_angle:
            return self._get_ref_angle()+90
        else:
            return 0 #self.get_ref_angle()
    def _get_offset_ref_angle(self):
        return self._get_ref_angle()
    def _set_offset_radius(self, offset_radius):
        self._offset_radius = offset_radius
    def _get_offset_radius(self):
        return self._offset_radius
    _get_opposite_direction = {"left":"right",
                               "right":"left",
                               "top":"bottom",
                               "bottom":"top"}.__getitem__
    def _update(self, renderer):
        pass
    def draw(self, renderer):
        if not self.get_visible(): return
        self._update(renderer)
        # save original and adjust some properties
        tr = self.get_transform()
        angle_orig = self.get_rotation()
        offset_tr = Affine2D()
        self.set_transform(tr+offset_tr)
        text_ref_angle = self._get_text_ref_angle()
        offset_ref_angle = self._get_offset_ref_angle()
        theta = (offset_ref_angle)/180.*np.pi
        dd = self._get_offset_radius()
        dx, dy = dd * np.cos(theta), dd * np.sin(theta)
        offset_tr.translate(dx, dy)
        self.set_rotation(text_ref_angle+angle_orig)
        super(LabelBase, self).draw(renderer)
        offset_tr.clear()
        # restore original properties
        self.set_transform(tr)
        self.set_rotation(angle_orig)
    def get_window_extent(self, renderer):
        self._update(renderer)
        # save original and adjust some properties
        tr = self.get_transform()
        angle_orig = self.get_rotation()
        offset_tr = Affine2D()
        self.set_transform(tr+offset_tr)
        text_ref_angle = self._get_text_ref_angle()
        offset_ref_angle = self._get_offset_ref_angle()
        theta = (offset_ref_angle)/180.*np.pi
        dd = self._get_offset_radius()
        dx, dy = dd * np.cos(theta), dd * np.sin(theta)
        offset_tr.translate(dx, dy)
        self.set_rotation(text_ref_angle+angle_orig)
        bbox = super(LabelBase, self).get_window_extent(renderer).frozen()
        offset_tr.clear()
        # restore original properties
        self.set_transform(tr)
        self.set_rotation(angle_orig)
        return bbox
def test_labelbase():
    import matplotlib.pyplot as plt
    fig = plt.figure(1)
    fig.clf()
    ax = fig.add_subplot(111)
    ax.plot([0.5], [0.5], "o")
    label = LabelBase(0.5, 0.5, "Test")
    a = -90
    label._set_ref_angle(a)
    label._set_offset_radius(offset_radius=50)
    label.set_rotation(-90)
    label.set(ha="center", va="top")
    ax.add_artist(label)
    plt.draw()
class AxisLabel(LabelBase, AttributeCopier):
    """
    Axis Label. Derived from Text. The position of the text is updated
    in the fly, so changing text position has no effect. Otherwise, the
    properties can be changed as a normal Text.
    To change the pad between ticklabels and axis label, use set_pad.
    """
    def __init__(self, *kl, **kwargs):
        axis_direction = kwargs.pop("axis_direction", "bottom")
        self._axis = kwargs.pop("axis", None)
        #super(AxisLabel, self).__init__(*kl, **kwargs)
        LabelBase.__init__(self, *kl, **kwargs)
        AttributeCopier.__init__(self, self._axis, klass=LabelBase)
        self.set_axis_direction(axis_direction)
        self._pad = 5
        self._extra_pad = 0
    def set_pad(self, pad):
        """
        Set the pad in points. Note that the actual pad will be the
        sum of the internal pad and the external pad (that are set
        automatically by the AxisArtist), and it only set the internal
        pad
        """
        self._pad = pad
    def get_pad(self):
        """
        return pad in points. See set_pad for more details.
        """
        return self._pad
    def _set_external_pad(self, p):
        """
        Set external pad IN PIXELS. This is intended to be set by the
        AxisArtist, bot by user..
        """
        self._extra_pad = p
    def _get_external_pad(self):
        """
        Get external pad.
        """
        return self._extra_pad
    def get_ref_artist(self):
        return self._axis.get_label()
    def get_text(self):
        t = super(AxisLabel, self).get_text()
        if t == "__from_axes__":
            return self._axis.get_label().get_text()
        return self._text
    _default_alignments = dict(left=("bottom", "center"),
                               right=("top", "center"),
                               bottom=("top", "center"),
                               top=("bottom", "center"))
    def set_default_alignment(self, d):
        if d not in ["left", "right", "top", "bottom"]:
            raise ValueError('direction must be on of "left", "right", "top", "bottom"')
        va, ha = self._default_alignments[d]
        self.set_va(va)
        self.set_ha(ha)
    _default_angles = dict(left=180,
                           right=0,
                           bottom=0,
                           top=180)
    def set_default_angle(self, d):
        if d not in ["left", "right", "top", "bottom"]:
            raise ValueError('direction must be on of "left", "right", "top", "bottom"')
        self.set_rotation(self._default_angles[d])
    def set_axis_direction(self, d):
        """
        Adjust the text angle and text alignment of axis label
        according to the matplotlib convention.
        =====================    ========== ========= ========== ==========
        property                 left       bottom    right      top
        =====================    ========== ========= ========== ==========
        axislabel angle          180        0         0          180
        axislabel va             center     top       center     bottom
        axislabel ha             right      center    right      center
        =====================    ========== ========= ========== ==========
        Note that the text angles are actually relative to (90 + angle
        of the direction to the ticklabel), which gives 0 for bottom
        axis.
        """
        if d not in ["left", "right", "top", "bottom"]:
            raise ValueError('direction must be on of "left", "right", "top", "bottom"')
        self.set_default_alignment(d)
        self.set_default_angle(d)
    def get_color(self):
        return self.get_attribute_from_ref_artist("color", "k")
    def draw(self, renderer):
        if not self.get_visible():
            return
        pad = renderer.points_to_pixels(self.get_pad())
        r = self._get_external_pad() + pad
        self._set_offset_radius(r)
        super(AxisLabel, self).draw(renderer)
    def get_window_extent(self, renderer):
        if not self.get_visible():
            return
        pad = renderer.points_to_pixels(self.get_pad())
        r = self._get_external_pad() + pad
        self._set_offset_radius(r)
        bb = super(AxisLabel, self).get_window_extent(renderer)
        return bb
class TickLabels(AxisLabel, AttributeCopier): # mtext.Text
    """
    Tick Labels. While derived from Text, this single artist draws all
    ticklabels. As in AxisLabel, the position of the text is updated
    in the fly, so changing text position has no effect. Otherwise,
    the properties can be changed as a normal Text. Unlike the
    ticklabels of the mainline matplotlib, properties of single
    ticklabel alone cannot modified.
    To change the pad between ticks and ticklabels, use set_pad.
    """
    def __init__(self, **kwargs):
        axis_direction = kwargs.pop("axis_direction", "bottom")
        AxisLabel.__init__(self, **kwargs)
        self.set_axis_direction(axis_direction)
        #self._axis_direction = axis_direction
        self._axislabel_pad = 0
        #self._extra_pad = 0
    # attribute copier
    def get_ref_artist(self):
        return self._axis.get_ticklabels()[0]
    def set_axis_direction(self, label_direction):
        """
        Adjust the text angle and text alignment of ticklabels
        according to the matplotlib convention.
        The *label_direction* must be one of [left, right, bottom,
        top].
        =====================    ========== ========= ========== ==========
        property                 left       bottom    right      top
        =====================    ========== ========= ========== ==========
        ticklabels angle         90         0         -90        180
        ticklabel va             center     baseline  center     baseline
        ticklabel ha             right      center    right      center
        =====================    ========== ========= ========== ==========
        Note that the text angles are actually relative to (90 + angle
        of the direction to the ticklabel), which gives 0 for bottom
        axis.
        """
        if label_direction not in ["left", "right", "top", "bottom"]:
            raise ValueError('direction must be one of "left", "right", "top", "bottom"')
        self._axis_direction = label_direction
        self.set_default_alignment(label_direction)
        self.set_default_angle(label_direction)
    def invert_axis_direction(self):
        label_direction = self._get_opposite_direction(self._axis_direction)
        self.set_axis_direction(label_direction)
    def _get_ticklabels_offsets(self, renderer, label_direction):
        """
        Calculates the offsets of the ticklabels from the tick and
        their total heights. The offset only takes account the offset
        due to the vertical alignment of the ticklabels, i.e.,if axis
        direction is bottom and va is ;top', it will return 0. if va
        is 'baseline', it will return (height-descent).
        """
        whd_list = self.get_texts_widths_heights_descents(renderer)
        if not whd_list:
            return 0, 0
        r = 0
        va, ha = self.get_va(), self.get_ha()
        if label_direction == "left":
            pad = max([w for (w, h, d) in whd_list])
            if ha == "left":
                r = pad
            elif ha == "center":
                r = .5 * pad
        elif label_direction == "right":
            pad = max([w for (w, h, d) in whd_list])
            if ha == "right":
                r = pad
            elif ha == "center":
                r = .5 * pad
        elif label_direction == "bottom":
            pad = max([h for (w, h, d) in whd_list])
            if va == "bottom":
                r = pad
            elif va == "center":
                r =.5 * pad
            elif va == "baseline":
                max_ascent = max([(h-d) for (w, h, d) in whd_list])
                max_descent = max([d for (w, h, d) in whd_list])
                r  = max_ascent
                pad = max_ascent + max_descent
        elif label_direction == "top":
            pad = max([h for (w, h, d) in whd_list])
            if va == "top":
                r = pad
            elif va == "center":
                r =.5 * pad
            elif va == "baseline":
                max_ascent = max([(h-d) for (w, h, d) in whd_list])
                max_descent = max([d for (w, h, d) in whd_list])
                r  = max_descent
                pad = max_ascent + max_descent
        #tick_pad = renderer.points_to_pixels(self.get_pad())
        # r : offset
        # pad : total height of the ticklabels. This will be used to
        # calculate the pad for the axislabel.
        return r, pad
    _default_alignments = dict(left=("center", "right"),
                               right=("center", "left"),
                               bottom=("baseline", "center"),
                               top=("baseline", "center"))
    # set_default_alignments(self, d)
    _default_angles = dict(left=90,
                           right=-90,
                           bottom=0,
                           top=180)
    def draw(self, renderer):
        if not self.get_visible():
            self._axislabel_pad = self._get_external_pad()
            return
        r, total_width = self._get_ticklabels_offsets(renderer,
                                                      self._axis_direction)
        #self._set_external_pad(r+self._get_external_pad())
        pad = self._get_external_pad() + \
              renderer.points_to_pixels(self.get_pad())
        self._set_offset_radius(r+pad)
        #self._set_offset_radius(r)
        for (x, y), a, l in self._locs_angles_labels:
            if not l.strip(): continue
            self._set_ref_angle(a) #+ add_angle
            self.set_x(x)
            self.set_y(y)
            self.set_text(l)
            LabelBase.draw(self, renderer)
        self._axislabel_pad = total_width \
                              + pad # the value saved will be used to draw axislabel.
    def set_locs_angles_labels(self, locs_angles_labels):
        self._locs_angles_labels = locs_angles_labels
    def get_window_extents(self, renderer):
        if not self.get_visible():
            self._axislabel_pad = self._get_external_pad()
            return []
        bboxes = []
        r, total_width = self._get_ticklabels_offsets(renderer,
                                                     self._axis_direction)
        pad = self._get_external_pad() + \
              renderer.points_to_pixels(self.get_pad())
        self._set_offset_radius(r+pad)
        for (x, y), a, l in self._locs_angles_labels:
            self._set_ref_angle(a) #+ add_angle
            self.set_x(x)
            self.set_y(y)
            self.set_text(l)
            bb = LabelBase.get_window_extent(self, renderer)
            bboxes.append(bb)
        self._axislabel_pad = total_width \
                              + pad # the value saved will be used to draw axislabel.
        return bboxes
    def get_texts_widths_heights_descents(self, renderer):
        """
        return a list of width, height, descent for ticklabels.
        """
        whd_list = []
        for (x, y), a, l in self._locs_angles_labels:
            if not l.strip(): continue
            clean_line, ismath = self.is_math_text(l)
            whd = renderer.get_text_width_height_descent(
                clean_line, self._fontproperties, ismath=ismath)
            whd_list.append(whd)
        return whd_list
def test_ticklabels():
    import matplotlib.pyplot as plt
    fig = plt.figure(1)
    fig.clf()
    ax = fig.add_subplot(111)
    ax.xaxis.set_visible(False)
    ax.yaxis.set_visible(False)
    ax.plot([0.2, 0.4], [0.5, 0.5], "o")
    ticks = Ticks(ticksize=10, axis=ax.xaxis)
    ax.add_artist(ticks)
    locs_angles_labels = [((0.2, 0.5), -90, "0.2"),
                          ((0.4, 0.5), -120, "0.4")]
    tick_locs_angles = [(xy, a+180) for xy, a, l in locs_angles_labels]
    ticks.set_locs_angles(tick_locs_angles)
    ax.plot([0.5], [0.5], ",")
    axislabel = AxisLabel(0.5, 0.5, "Test")
    axislabel._set_offset_radius(20)
    axislabel._set_ref_angle(0)
    axislabel.set_axis_direction("bottom")
    #axislabel._text_follow_ref_angle = True
    #axislabel.set(va="center", ha="right")
    ax.add_artist(axislabel)
    if 1:
        ticklabels = TickLabels(axis_direction="left")
        ticklabels._locs_angles_labels = locs_angles_labels
        #ticklabels.set_rotation(90)
        ticklabels.set_pad(10)
        ax.add_artist(ticklabels)
    ax.set_xlim(0, 1); ax.set_ylim(0, 1)
    plt.draw()
class GridlinesCollection(LineCollection):
    def __init__(self, *kl, **kwargs):
        """
        *which* : "major" or "minor"
        *axis* : "both", "x" or "y"
        """
        self._which = kwargs.pop("which", "major")
        self._axis = kwargs.pop("axis", "both")
        super(GridlinesCollection, self).__init__(*kl, **kwargs)
        self.set_grid_helper(None)
    def set_which(self, which):
        self._which = which
    def set_axis(self, axis):
        self._axis = axis
    def set_grid_helper(self, grid_helper):
        self._grid_helper = grid_helper
    def draw(self, renderer):
        if self._grid_helper is not None:
            self._grid_helper.update_lim(self.axes)
            gl = self._grid_helper.get_gridlines(self._which, self._axis)
            if gl:
                self.set_segments([np.transpose(l) for l in gl])
            else:
                self.set_segments([])
        super(GridlinesCollection, self).draw(renderer)
class AxisArtist(martist.Artist):
    """
    An artist which draws axis (a line along which the n-th axes coord
    is constant) line, ticks, ticklabels, and axis label.
    """
    ZORDER=2.5
    # LABELPAD : as property
    def _set_labelpad(self, v):
        return self.label.set_pad(v)
    def _get_labelpad(self):
        return self.label.get_pad()
    LABELPAD = property(_get_labelpad, _set_labelpad)
    def __init__(self, axes,
                 helper,
                 offset=None,
                 axis_direction="bottom",
                 **kw):
        """
        *axes* : axes
        *helper* : an AxisArtistHelper instance.
        """
        #axes is also used to follow the axis attribute (tick color, etc).
        super(AxisArtist, self).__init__(**kw)
        self.axes = axes
        self._axis_artist_helper = helper
        if offset is None:
            offset = (0, 0)
        self.dpi_transform = Affine2D()
        self.offset_transform = ScaledTranslation(offset[0], offset[1],
                                                  self.dpi_transform)
        self._label_visible = True
        self._majortick_visible = True
        self._majorticklabel_visible = True
        self._minortick_visible = True
        self._minorticklabel_visible = True
        #if self._axis_artist_helper._loc in ["left", "right"]:
        if axis_direction in ["left", "right"]:
            axis_name = "ytick"
            self.axis = axes.yaxis
        else:
            axis_name = "xtick"
            self.axis = axes.xaxis
        self._axisline_style = None
        self._axis_direction = axis_direction
        self._init_line()
        self._init_ticks(axis_name, **kw)
        self._init_offsetText(axis_direction)
        self._init_label()
        self.set_zorder(self.ZORDER)
        self._rotate_label_along_line = False
        # axis direction
        self._tick_add_angle = 180.
        self._ticklabel_add_angle = 0.
        self._axislabel_add_angle = 0.
        self.set_axis_direction(axis_direction)
    # axis direction
    def set_axis_direction(self, axis_direction):
        """
        Adjust the direction, text angle, text alignment of
        ticklabels, labels following the matplotlib convention for
        the rectangle axes.
        The *axis_direction* must be one of [left, right, bottom,
        top].
        =====================    ========== ========= ========== ==========
        property                 left       bottom    right      top
        =====================    ========== ========= ========== ==========
        ticklabels location      "-"        "+"       "+"        "-"
        axislabel location       "-"        "+"       "+"        "-"
        ticklabels angle         90         0         -90        180
        ticklabel va             center     baseline  center     baseline
        ticklabel ha             right      center    right      center
        axislabel angle          180        0         0          180
        axislabel va             center     top       center     bottom
        axislabel ha             right      center    right      center
        =====================    ========== ========= ========== ==========
        Note that the direction "+" and "-" are relative to the direction of
        the increasing coordinate. Also, the text angles are actually
        relative to (90 + angle of the direction to the ticklabel),
        which gives 0 for bottom axis.
        """
        if axis_direction not in ["left", "right", "top", "bottom"]:
            raise ValueError('direction must be on of "left", "right", "top", "bottom"')
        self._axis_direction = axis_direction
        if axis_direction in ["left", "top"]:
            #self._set_tick_direction("+")
            self.set_ticklabel_direction("-")
            self.set_axislabel_direction("-")
        else:
            #self._set_tick_direction("-")
            self.set_ticklabel_direction("+")
            self.set_axislabel_direction("+")
        self.major_ticklabels.set_axis_direction(axis_direction)
        self.label.set_axis_direction(axis_direction)
    # def _set_tick_direction(self, d):
    #     if d not in ["+", "-"]:
    #         raise ValueError('direction must be on of "in", "out"')
    #     if d == "+":
    #         self._tick_add_angle = 0 #get_helper()._extremes=0, 10
    #     else:
    #         self._tick_add_angle = 180 #get_helper()._extremes=0, 10
    def set_ticklabel_direction(self, tick_direction):
        """
        Adjust the direction of the ticklabel.
         ACCEPTS: [ "+" | "-" ]
        Note that the label_direction '+' and '-' are relative to the
        direction of the increasing coordinate.
        """
        if tick_direction not in ["+", "-"]:
            raise ValueError('direction must be one of "+", "-"')
        if tick_direction == "-":
            self._ticklabel_add_angle = 180
        else:
            self._ticklabel_add_angle = 0
    def invert_ticklabel_direction(self):
        self._ticklabel_add_angle = (self._ticklabel_add_angle + 180) % 360
        self.major_ticklabels.invert_axis_direction()
        self.minor_ticklabels.invert_axis_direction()
    # def invert_ticks_direction(self):
    #     self.major_ticks.set_tick_out(not self.major_ticks.get_tick_out())
    #     self.minor_ticks.set_tick_out(not self.minor_ticks.get_tick_out())
    def set_axislabel_direction(self, label_direction):
        """
        Adjust the direction of the axislabel.
         ACCEPTS: [ "+" | "-" ]
        Note that the label_direction '+' and '-' are relative to the
        direction of the increasing coordinate.
        """
        if label_direction not in ["+", "-"]:
            raise ValueError('direction must be one of "+", "-"')
        if label_direction == "-":
            self._axislabel_add_angle = 180
        else:
            self._axislabel_add_angle = 0
    def get_transform(self):
        return self.axes.transAxes + self.offset_transform
    def get_helper(self):
        """
        Return axis artist helper instance.
        """
        return self._axis_artist_helper
    def set_axisline_style(self, axisline_style=None, **kw):
        """
        Set the axisline style.
        *axisline_style* can be a string with axisline style name with optional
         comma-separated attributes. Alternatively, the attrs can
         be provided as keywords.
         set_arrowstyle("->,size=1.5")
         set_arrowstyle("->", size=1.5)
        Old attrs simply are forgotten.
        Without argument (or with arrowstyle=None), return
        available styles as a list of strings.
        """
        if axisline_style==None:
            return AxislineStyle.pprint_styles()
        if isinstance(axisline_style, AxislineStyle._Base):
            self._axisline_style = axisline_style
        else:
            self._axisline_style = AxislineStyle(axisline_style, **kw)
        self._init_line()
    def get_axisline_style(self):
        """
        return the current axisline style.
        """
        return self._axisline_style
    def _init_line(self):
        """
        Initialize the *line* artist that is responsible to draw the axis line.
        """
        tran = self._axis_artist_helper.get_line_transform(self.axes) \
               + self.offset_transform
        axisline_style = self.get_axisline_style()
        if axisline_style is None:
            self.line = BezierPath(self._axis_artist_helper.get_line(self.axes),
                                   color=rcParams['axes.edgecolor'],
                                   linewidth=rcParams['axes.linewidth'],
                                   transform=tran)
        else:
            self.line = axisline_style(self, transform=tran)
    def _draw_line(self, renderer):
        self.line.set_path(self._axis_artist_helper.get_line(self.axes))
        if self.get_axisline_style() is not None:
            self.line.set_line_mutation_scale(self.major_ticklabels.get_size())
        self.line.draw(renderer)
    def _init_ticks(self, axis_name, **kw):
        trans=self._axis_artist_helper.get_tick_transform(self.axes) \
               + self.offset_transform
        major_tick_size = kw.get("major_tick_size",
                                 rcParams['%s.major.size'%axis_name])
        major_tick_pad = kw.get("major_tick_pad",
                                rcParams['%s.major.pad'%axis_name])
        minor_tick_size = kw.get("minor_tick_size",
                                 rcParams['%s.minor.size'%axis_name])
        minor_tick_pad = kw.get("minor_tick_pad",
                                rcParams['%s.minor.pad'%axis_name])
        self.major_ticks = Ticks(major_tick_size,
                                 axis=self.axis,
                                 transform=trans)
        self.minor_ticks = Ticks(minor_tick_size,
                                 axis=self.axis,
                                 transform=trans)
        if axis_name == "xaxis":
            size = rcParams['xtick.labelsize']
        else:
            size = rcParams['ytick.labelsize']
        fontprops = font_manager.FontProperties(size=size)
        self.major_ticklabels = TickLabels(size=size, axis=self.axis,
                                           axis_direction=self._axis_direction)
        self.minor_ticklabels = TickLabels(size=size, axis=self.axis,
                                           axis_direction=self._axis_direction)
        self.major_ticklabels.set(figure = self.axes.figure,
                                  transform=trans,
                                  fontproperties=fontprops)
        self.major_ticklabels.set_pad(major_tick_pad)
        self.minor_ticklabels.set(figure = self.axes.figure,
                                  transform=trans,
                                  fontproperties=fontprops)
        self.minor_ticklabels.set_pad(minor_tick_pad)
    def _get_tick_info(self, tick_iter):
        """
        return ticks_loc_angle, ticklabels_loc_angle_label
        ticks_loc_angle : list of locs and angles for ticks
        ticklabels_loc_angle_label : list of locs, angles and labels for tickslabels
        """
        ticks_loc_angle = []
        ticklabels_loc_angle_label = []
        tick_add_angle = self._tick_add_angle
        ticklabel_add_angle = self._ticklabel_add_angle
        for loc, angle_normal, angle_tangent, label in tick_iter:
            angle_label = angle_tangent  - 90
            angle_label += ticklabel_add_angle
            if np.cos((angle_label - angle_normal)/180.*np.pi) < 0.:
                angle_tick = angle_normal
            else:
                angle_tick = angle_normal + 180
            ticks_loc_angle.append([loc, angle_tick])
            ticklabels_loc_angle_label.append([loc, angle_label, label])
        return ticks_loc_angle, ticklabels_loc_angle_label
    def _update_ticks(self, renderer):
        # set extra pad for major and minor ticklabels:
        # use ticksize of majorticks even for minor ticks. not clear what is best.
        dpi_cor = renderer.points_to_pixels(1.)
        if self.major_ticks.get_visible() and self.major_ticks.get_tick_out():
            self.major_ticklabels._set_external_pad(self.major_ticks._ticksize*dpi_cor)
            self.minor_ticklabels._set_external_pad(self.major_ticks._ticksize*dpi_cor)
        else:
            self.major_ticklabels._set_external_pad(0)
            self.minor_ticklabels._set_external_pad(0)
        majortick_iter,  minortick_iter = \
                self._axis_artist_helper.get_tick_iterators(self.axes)
        tick_loc_angle, ticklabel_loc_angle_label \
                              = self._get_tick_info(majortick_iter)
        self.major_ticks.set_locs_angles(tick_loc_angle)
        self.major_ticklabels.set_locs_angles_labels(ticklabel_loc_angle_label)
        #self.major_ticks.draw(renderer)
        #self.major_ticklabels.draw(renderer)
        # minor ticks
        tick_loc_angle, ticklabel_loc_angle_label \
                              = self._get_tick_info(minortick_iter)
        self.minor_ticks.set_locs_angles(tick_loc_angle)
        self.minor_ticklabels.set_locs_angles_labels(ticklabel_loc_angle_label)
        #self.minor_ticks.draw(renderer)
        #self.minor_ticklabels.draw(renderer)
        #if (self.major_ticklabels.get_visible() or self.minor_ticklabels.get_visible()):
        #    self._draw_offsetText(renderer)
        return self.major_ticklabels.get_window_extents(renderer)
    def _draw_ticks(self, renderer):
        extents = self._update_ticks(renderer)
        self.major_ticks.draw(renderer)
        self.major_ticklabels.draw(renderer)
        self.minor_ticks.draw(renderer)
        self.minor_ticklabels.draw(renderer)
        if (self.major_ticklabels.get_visible() or self.minor_ticklabels.get_visible()):
            self._draw_offsetText(renderer)
        return extents
    def _draw_ticks2(self, renderer):
        # set extra pad for major and minor ticklabels:
        # use ticksize of majorticks even for minor ticks. not clear what is best.
        dpi_cor = renderer.points_to_pixels(1.)
        if self.major_ticks.get_visible() and self.major_ticks.get_tick_out():
            self.major_ticklabels._set_external_pad(self.major_ticks._ticksize*dpi_cor)
            self.minor_ticklabels._set_external_pad(self.major_ticks._ticksize*dpi_cor)
        else:
            self.major_ticklabels._set_external_pad(0)
            self.minor_ticklabels._set_external_pad(0)
        majortick_iter,  minortick_iter = \
                self._axis_artist_helper.get_tick_iterators(self.axes)
        tick_loc_angle, ticklabel_loc_angle_label \
                              = self._get_tick_info(majortick_iter)
        self.major_ticks.set_locs_angles(tick_loc_angle)
        self.major_ticklabels.set_locs_angles_labels(ticklabel_loc_angle_label)
        self.major_ticks.draw(renderer)
        self.major_ticklabels.draw(renderer)
        # minor ticks
        tick_loc_angle, ticklabel_loc_angle_label \
                              = self._get_tick_info(minortick_iter)
        self.minor_ticks.set_locs_angles(tick_loc_angle)
        self.minor_ticklabels.set_locs_angles_labels(ticklabel_loc_angle_label)
        self.minor_ticks.draw(renderer)
        self.minor_ticklabels.draw(renderer)
        if (self.major_ticklabels.get_visible() or self.minor_ticklabels.get_visible()):
            self._draw_offsetText(renderer)
        return self.major_ticklabels.get_window_extents(renderer)
    _offsetText_pos = dict(left=(0, 1, "bottom", "right"),
                           right=(1, 1, "bottom", "left"),
                           bottom=(1, 0, "top", "right"),
                           top=(1, 1, "bottom", "right"))
    def _init_offsetText(self, direction):
        x,y,va,ha = self._offsetText_pos[direction]
        self.offsetText = mtext.Annotation("",
                                           xy=(x,y), xycoords="axes fraction",
                                           xytext=(0,0), textcoords="offset points",
                                           #fontproperties = fp,
                                           color = rcParams['xtick.color'],
                                           verticalalignment=va,
                                           horizontalalignment=ha,
                                           )
        self.offsetText.set_transform(IdentityTransform())
        self.axes._set_artist_props(self.offsetText)
    def _update_offsetText(self):
        self.offsetText.set_text( self.axis.major.formatter.get_offset() )
        self.offsetText.set_size(self.major_ticklabels.get_size())
        offset = self.major_ticklabels.get_pad() + self.major_ticklabels.get_size() + 2.
        self.offsetText.xyann= (0, offset)
    def _draw_offsetText(self, renderer):
        self._update_offsetText()
        self.offsetText.draw(renderer)
    def _init_label(self, **kw):
        # x in axes coords, y in display coords (to be updated at draw
        # time by _update_label_positions)
        labelsize = kw.get("labelsize",
                           rcParams['axes.labelsize'])
        #labelcolor = kw.get("labelcolor",
        #                    rcParams['axes.labelcolor'])
        fontprops = font_manager.FontProperties(
            size=labelsize,
            weight=rcParams['axes.labelweight'])
        textprops = dict(fontproperties = fontprops)
                         #color = labelcolor)
        tr = self._axis_artist_helper.get_axislabel_transform(self.axes) \
             + self.offset_transform
        self.label = AxisLabel(0, 0, "__from_axes__",
                               color = "auto", #rcParams['axes.labelcolor'],
                               fontproperties=fontprops,
                               axis=self.axis,
                               transform=tr,
                               axis_direction=self._axis_direction,
                               )
        self.label.set_figure(self.axes.figure)
        labelpad = kw.get("labelpad", 5)
        self.label.set_pad(labelpad)
    def _update_label(self, renderer):
        if not self.label.get_visible():
            return
        fontprops = font_manager.FontProperties(
            size=rcParams['axes.labelsize'],
            weight=rcParams['axes.labelweight'])
        #pad_points = self.major_tick_pad
        #print self._ticklabel_add_angle - self._axislabel_add_angle
        #if abs(self._ticklabel_add_angle - self._axislabel_add_angle)%360 > 90:
        if self._ticklabel_add_angle !=  self._axislabel_add_angle:
            if (self.major_ticks.get_visible() and not self.major_ticks.get_tick_out()) \
               or \
               (self.minor_ticks.get_visible() and not self.major_ticks.get_tick_out()):
                axislabel_pad = self.major_ticks._ticksize
            else:
                axislabel_pad = 0
        else:
            axislabel_pad = max([self.major_ticklabels._axislabel_pad,
                                 self.minor_ticklabels._axislabel_pad])
        #label_offset =  axislabel_pad + self.LABELPAD
        #self.label._set_offset_radius(label_offset)
        self.label._set_external_pad(axislabel_pad)
        xy, angle_tangent = self._axis_artist_helper.get_axislabel_pos_angle(self.axes)
        if xy is None: return
        angle_label = angle_tangent  - 90
        x, y = xy
        self.label._set_ref_angle(angle_label+self._axislabel_add_angle)
        self.label.set(x=x, y=y)
    def _draw_label(self, renderer):
        self._update_label(renderer)
        self.label.draw(renderer)
    def _draw_label2(self, renderer):
        if not self.label.get_visible():
            return
        fontprops = font_manager.FontProperties(
            size=rcParams['axes.labelsize'],
            weight=rcParams['axes.labelweight'])
        #pad_points = self.major_tick_pad
        #print self._ticklabel_add_angle - self._axislabel_add_angle
        #if abs(self._ticklabel_add_angle - self._axislabel_add_angle)%360 > 90:
        if self._ticklabel_add_angle !=  self._axislabel_add_angle:
            if (self.major_ticks.get_visible() and not self.major_ticks.get_tick_out()) \
               or \
               (self.minor_ticks.get_visible() and not self.major_ticks.get_tick_out()):
                axislabel_pad = self.major_ticks._ticksize
            else:
                axislabel_pad = 0
        else:
            axislabel_pad = max([self.major_ticklabels._axislabel_pad,
                                 self.minor_ticklabels._axislabel_pad])
        #label_offset =  axislabel_pad + self.LABELPAD
        #self.label._set_offset_radius(label_offset)
        self.label._set_external_pad(axislabel_pad)
        xy, angle_tangent = self._axis_artist_helper.get_axislabel_pos_angle(self.axes)
        if xy is None: return
        angle_label = angle_tangent  - 90
        x, y = xy
        self.label._set_ref_angle(angle_label+self._axislabel_add_angle)
        self.label.set(x=x, y=y)
        self.label.draw(renderer)
    def set_label(self, s):
        self.label.set_text(s)
    def get_tightbbox(self, renderer):
        if not self.get_visible(): return
        self._axis_artist_helper.update_lim(self.axes)
        dpi_cor = renderer.points_to_pixels(1.)
        self.dpi_transform.clear().scale(dpi_cor, dpi_cor)
        bb = []
        self._update_ticks(renderer)
        #if self.major_ticklabels.get_visible():
        bb.extend(self.major_ticklabels.get_window_extents(renderer))
        #if self.minor_ticklabels.get_visible():
        bb.extend(self.minor_ticklabels.get_window_extents(renderer))
        self._update_label(renderer)
        #if self.label.get_visible():
        bb.append(self.label.get_window_extent(renderer))
        bb.append(self.offsetText.get_window_extent(renderer))
        bb = [b for b in bb if b and (b.width!=0 or b.height!=0)]
        if bb:
            _bbox = Bbox.union(bb)
            return _bbox
        else:
            return None
        #self._draw_line(renderer)
        #self._draw_ticks(renderer)
        #self._draw_offsetText(renderer)
        #self._draw_label(renderer)
    @allow_rasterization
    def draw(self, renderer):
        'Draw the axis lines, tick lines and labels'
        if not self.get_visible(): return
        renderer.open_group(__name__)
        self._axis_artist_helper.update_lim(self.axes)
        dpi_cor = renderer.points_to_pixels(1.)
        self.dpi_transform.clear().scale(dpi_cor, dpi_cor)
        self._draw_ticks(renderer)
        self._draw_line(renderer)
        #self._draw_offsetText(renderer)
        self._draw_label(renderer)
        renderer.close_group(__name__)
    #def get_ticklabel_extents(self, renderer):
    #    pass
    def toggle(self, all=None, ticks=None, ticklabels=None, label=None):
        """
        Toggle visibility of ticks, ticklabels, and (axis) label.
        To turn all off, ::
          axis.toggle(all=False)
        To turn all off but ticks on ::
          axis.toggle(all=False, ticks=True)
        To turn all on but (axis) label off ::
          axis.toggle(all=True, label=False))
        """
        if all:
            _ticks, _ticklabels, _label = True, True, True
        elif all is not None:
            _ticks, _ticklabels, _label = False, False, False
        else:
            _ticks, _ticklabels, _label = None, None, None
        if ticks is not None:
            _ticks = ticks
        if ticklabels is not None:
            _ticklabels = ticklabels
        if label is not None:
            _label = label
        if _ticks is not None:
            self.major_ticks.set_visible(_ticks)
            self.minor_ticks.set_visible(_ticks)
        if _ticklabels is not None:
            self.major_ticklabels.set_visible(_ticklabels)
            self.minor_ticklabels.set_visible(_ticklabels)
        if _label is not None:
            self.label.set_visible(_label)
def test_axis_artist():
    global axisline
    #self._axislines[loc] = new_fixed_axis(loc=loc, axes=axes)
    from mpl_toolkits.axisartist import AxisArtistHelperRectlinear
    fig = plt.figure(1)
    fig.clf()
    ax=fig.add_subplot(111)
    ax.xaxis.set_visible(False)
    ax.yaxis.set_visible(False)
    if 1:
        _helper = AxisArtistHelperRectlinear.Fixed(ax, loc="left")
        axisline = AxisArtist(ax, _helper, offset=None, axis_direction="left")
        ax.add_artist(axisline)
        _helper = AxisArtistHelperRectlinear.Fixed(ax, loc="right")
        axisline = AxisArtist(ax, _helper, offset=None, axis_direction="right")
        ax.add_artist(axisline)
    _helper = AxisArtistHelperRectlinear.Fixed(ax, loc="bottom")
    axisline = AxisArtist(ax, _helper, offset=None, axis_direction="bottom")
    axisline.set_label("TTT")
    #axisline.label.set_visible(False)
    ax.add_artist(axisline)
    #axisline.major_ticklabels.set_axis_direction("bottom")
    axisline.major_ticks.set_tick_out(False)
    ax.set_ylabel("Test")
    axisline.label.set_pad(5)
    plt.draw()
def test_axis_artist2():
    global axisline
    #self._axislines[loc] = new_fixed_axis(loc=loc, axes=axes)
    from mpl_toolkits.axislines import AxisArtistHelperRectlinear
    fig = plt.figure(1)
    fig.clf()
    ax=fig.add_subplot(111)
    ax.xaxis.set_visible(False)
    ax.yaxis.set_visible(False)
    _helper = AxisArtistHelperRectlinear.Fixed(ax, loc="bottom")
    axisline = AxisArtist(ax, _helper, offset=None, axis_direction="bottom")
    axisline.set_label("TTT")
    ax.add_artist(axisline)
    #axisline.major_ticklabels.set_axis_direction("bottom")
    axisline.major_ticks.set_tick_out(False)
    ax.set_ylabel("Test")
    plt.draw()
if __name__ == "__main__":
    #test_labelbase()
    #test_ticklabels()
    test_axis_artist()
    #test_axis_artist2()
# DONE
# *. ticks, ticklabels, axislabels
# *. workon axisartist
# TODO
 | 
	mit | 
| 
	1kastner/analyse_weather_data | 
	gather_weather_data/wunderground/summarize_raw_airport_data.py | 
	1 | 
	8898 | 
	"""
Summarize all downloaded airport weather station data files.
Uses UTC time zone.
Use
-m gather_weather_data.wunderground.summarize_raw_airport_data
to run the demo
"""
import os
import json
import datetime
import logging
import numpy
import pandas
import metar.Metar  # needs https://github.com/tomp/python-metar/pull/25 to work stable
from . import WUNDERGROUND_RAW_AIRPORT_DATA_DIR
from . import PROCESSED_DATA_DIR
from .summarize_raw_data import _parse_utc_date
from .summarize_raw_data import _cast_number
from .summarize_raw_data import _get_file_name
HEADER_FORMAT = ("{datetime},{temperature},{dewpoint},{windspeed},{windgust},{winddirection},{pressure},{humidity},"
                 "{precipitation},{cloudcover}")
def _get_header():
    """
    
    :return: Formatted header complying csv standards
    """
    return HEADER_FORMAT.replace("{", "").replace("}", "")
def max_of_total_order(collection_of_interest, given_total_order):
    """
    
    :param collection_of_interest: Find the maximum in this collection
    :param given_total_order: Describe the total order to use on the collection
    :return: max element
    """
    l = [given_total_order.index(e) for e in collection_of_interest]
    return given_total_order[max(l)]
def get_cloud_cover(metar_string, date_of_observation):
    """
    This needs a small modification as described in https://github.com/tomp/python-metar/pull/25
    
    :param metar_string: A classical meteorological METAR
    :param date_of_observation: Used to parse the metar at hand
    :return: The cloud cover name
    """
    d = date_of_observation
    m = metar.Metar.Metar(
        metar_string,
        d.month,
        d.year,
        drop_unsupported_observations=True
    )
    cloud_cover = "CAVOC"  # 0 octas
    if not m.sky:
        return cloud_cover
    else:
        sorted_possible_cloud_covers = [
            "SKC", "CLR", "NSC",  # 0 octas
            "FEW",  # 1-2 octas
            "SCT",  # 3-4 octas
            "BKN",  # 5-7 octas
            "OVC",  # 8 octas
            "VV",  # clouds can not be seen because of fog or rain
        ]
        sky_covers = [cover for (cover, height, cloud) in m.sky]
        return max_of_total_order(sky_covers, sorted_possible_cloud_covers)
def _get_data_for_single_day(station, day):
    """
    At the current time the day provided is interpreted as local time at wunderground.
    
    :param station: The name of the station, e.g. 'IHAMBURG69'
    :param day: The day to pick the json from
    :return: A valid csv file content with header
    :rtype: str
    """
    json_file_name = _get_file_name(station, day, 'json')
    json_file_path = os.path.join(WUNDERGROUND_RAW_AIRPORT_DATA_DIR, station, json_file_name)
    if not os.path.isfile(json_file_path):
        # search for files of other project
        json_file_name = station + "_" + day.strftime("%Y%m%d") + ".json"
        json_file_path = os.path.join(WUNDERGROUND_RAW_AIRPORT_DATA_DIR, station, json_file_name)
    if not os.path.isfile(json_file_path):
        # search for files created by yet another project
        json_file_name = day.strftime("%Y-%m-%d") + ".json"
        json_file_path = os.path.join(WUNDERGROUND_RAW_AIRPORT_DATA_DIR, station, json_file_name)
    if not os.path.isfile(json_file_path):
        logging.warning("missing input file: " + json_file_path)
        return
    if os.path.getsize(json_file_path) == 0:
        logging.warning("encountered an empty file: ", json_file_path)
        os.remove(json_file_path)
        return
    with open(json_file_path) as f:
        raw_json_weather_data = json.load(f)
    # These are the relevant observations we want to keep
    observations = []
    header = _get_header()
    observations.append(header)
    for raw_observation in raw_json_weather_data["history"]["observations"]:
        observation = {}
        utc_date = _parse_utc_date(raw_observation["utcdate"])
        observation["datetime"] = utc_date.isoformat()
        observation["temperature"] = _cast_number(raw_observation["tempm"])
        observation["dewpoint"] = _cast_number(raw_observation["dewptm"])
        observation["windspeed"] = _cast_number(raw_observation["wspdm"], raw_observation["wspdi"])
        observation["windgust"] = _cast_number(raw_observation["wgustm"], raw_observation["wgusti"])
        observation["winddirection"] = _cast_number(raw_observation["wdird"], raw_observation["wdird"])
        observation["pressure"] = _cast_number(raw_observation["pressurem"])
        observation["humidity"] = _cast_number(raw_observation["hum"])
        if "precip_ratem" in raw_observation:
            observation["precipitation"] = _cast_number(raw_observation["precip_ratem"],
                                                        raw_observation["precip_ratei"])
        else:
            observation["precipitation"] = ""
        if raw_observation["metar"].startswith("METAR"):  # some other record
            observation["cloudcover"] = get_cloud_cover(raw_observation["metar"], utc_date)
        else:
            observation["cloudcover"] = numpy.nan
        observations.append(HEADER_FORMAT.format(**observation))
    return "\n".join(observations)
def _open_daily_summary(station, day):
    """
    :param station: The name of the station, e.g. 'IHAMBURG69'
    :param day: The day to get the summary for (can be naive)
    :return: The corresponding data frame
    """
    csv_file = os.path.join(WUNDERGROUND_RAW_AIRPORT_DATA_DIR, station, _get_file_name(station, day, "csv"))
    data_frame = pandas.read_csv(csv_file, index_col="datetime", parse_dates=["datetime"])
    return data_frame
def _create_csv_from_json(station, day, force_overwrite):
    """
    
    :param force_overwrite: Whether to overwrite old daily summary files.
    :param station: The name of the station, e.g. 'IHAMBURG69'
    :param day: The day to pick the json from
    """
    processed_station_dir = os.path.join(WUNDERGROUND_RAW_AIRPORT_DATA_DIR, station)
    if not os.path.isdir(processed_station_dir):
        os.mkdir(processed_station_dir)
    csv_path = os.path.join(processed_station_dir, _get_file_name(station, day, 'csv'))
    if os.path.isfile(csv_path) and os.path.getsize(csv_path) and not force_overwrite:
        logging.info("skip " + csv_path)
        return
    with open(csv_path, "w") as f:
        csv_file_content = _get_data_for_single_day(station, day)
        if csv_file_content is not None:
            f.write(csv_file_content)
        else:
            f.write(_get_header())
def join_daily_summaries(station, start_date, end_date, force_overwrite):
    """
    
    :param station: 
    :param start_date: 
    :param end_date: 
    :param force_overwrite: 
    :return: 
    """
    date_to_check = start_date
    span_summary_file_name = station + "_" + start_date.strftime("%Y%m%d") + "_" + end_date.strftime("%Y%m%d") + ".csv"
    output_dir = os.path.join(PROCESSED_DATA_DIR, "station_summaries")
    if not os.path.isdir(output_dir):
        os.mkdir(output_dir)
    span_summary_path = os.path.join(output_dir, span_summary_file_name)
    if os.path.isdir(span_summary_path) and not force_overwrite:
        logging.info("skip " + span_summary_path)
        return
    data_frame = _open_daily_summary(station, start_date)
    start_date += datetime.timedelta(days=1)
    while date_to_check <= end_date:
        data_frame_next = _open_daily_summary(station, date_to_check)
        data_frame = data_frame.append(data_frame_next)
        date_to_check = date_to_check + datetime.timedelta(days=1)
    # remove duplicates (happens if same entry exists for two days)
    data_frame.groupby(data_frame.index).first()
    data_frame.sort_index(inplace=True)
    data_frame.to_csv(span_summary_path)
def create_daily_summaries_for_time_span(station, start_date, end_date, force_overwrite):
    """
    :param force_overwrite: Whether to overwrite old daily summary files.
    :param station: The name of the station, e.g. 'IHAMBURG69'
    :param start_date: The date to start (included) 
    :param end_date: The date to stop (included)
    :return: 
    """
    date_to_check = start_date
    while date_to_check <= end_date:
        _create_csv_from_json(station, date_to_check, force_overwrite)
        date_to_check = date_to_check + datetime.timedelta(days=1)
def demo():
    stations = ["EDDH"]
    for station in stations:
        logging.info(station)
        start_date = datetime.datetime(2016, 1, 1)
        end_date = datetime.datetime(2016, 12, 31)
        logging.info("create daily summaries")
        create_daily_summaries_for_time_span(station, start_date, end_date, False)
        logging.info("create time span summary")
        join_daily_summaries(station, start_date, end_date, True)
if __name__ == "__main__":
    logging.basicConfig(level=logging.INFO)
    demo()
 | 
	agpl-3.0 | 
| 
	shangwuhencc/scikit-learn | 
	sklearn/decomposition/tests/test_incremental_pca.py | 
	297 | 
	8265 | 
	"""Tests for Incremental PCA."""
import numpy as np
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn import datasets
from sklearn.decomposition import PCA, IncrementalPCA
iris = datasets.load_iris()
def test_incremental_pca():
    # Incremental PCA on dense arrays.
    X = iris.data
    batch_size = X.shape[0] // 3
    ipca = IncrementalPCA(n_components=2, batch_size=batch_size)
    pca = PCA(n_components=2)
    pca.fit_transform(X)
    X_transformed = ipca.fit_transform(X)
    np.testing.assert_equal(X_transformed.shape, (X.shape[0], 2))
    assert_almost_equal(ipca.explained_variance_ratio_.sum(),
                        pca.explained_variance_ratio_.sum(), 1)
    for n_components in [1, 2, X.shape[1]]:
        ipca = IncrementalPCA(n_components, batch_size=batch_size)
        ipca.fit(X)
        cov = ipca.get_covariance()
        precision = ipca.get_precision()
        assert_array_almost_equal(np.dot(cov, precision),
                                  np.eye(X.shape[1]))
def test_incremental_pca_check_projection():
    # Test that the projection of data is correct.
    rng = np.random.RandomState(1999)
    n, p = 100, 3
    X = rng.randn(n, p) * .1
    X[:10] += np.array([3, 4, 5])
    Xt = 0.1 * rng.randn(1, p) + np.array([3, 4, 5])
    # Get the reconstruction of the generated data X
    # Note that Xt has the same "components" as X, just separated
    # This is what we want to ensure is recreated correctly
    Yt = IncrementalPCA(n_components=2).fit(X).transform(Xt)
    # Normalize
    Yt /= np.sqrt((Yt ** 2).sum())
    # Make sure that the first element of Yt is ~1, this means
    # the reconstruction worked as expected
    assert_almost_equal(np.abs(Yt[0][0]), 1., 1)
def test_incremental_pca_inverse():
    # Test that the projection of data can be inverted.
    rng = np.random.RandomState(1999)
    n, p = 50, 3
    X = rng.randn(n, p)  # spherical data
    X[:, 1] *= .00001  # make middle component relatively small
    X += [5, 4, 3]  # make a large mean
    # same check that we can find the original data from the transformed
    # signal (since the data is almost of rank n_components)
    ipca = IncrementalPCA(n_components=2, batch_size=10).fit(X)
    Y = ipca.transform(X)
    Y_inverse = ipca.inverse_transform(Y)
    assert_almost_equal(X, Y_inverse, decimal=3)
def test_incremental_pca_validation():
    # Test that n_components is >=1 and <= n_features.
    X = [[0, 1], [1, 0]]
    for n_components in [-1, 0, .99, 3]:
        assert_raises(ValueError, IncrementalPCA(n_components,
                                                 batch_size=10).fit, X)
def test_incremental_pca_set_params():
    # Test that components_ sign is stable over batch sizes.
    rng = np.random.RandomState(1999)
    n_samples = 100
    n_features = 20
    X = rng.randn(n_samples, n_features)
    X2 = rng.randn(n_samples, n_features)
    X3 = rng.randn(n_samples, n_features)
    ipca = IncrementalPCA(n_components=20)
    ipca.fit(X)
    # Decreasing number of components
    ipca.set_params(n_components=10)
    assert_raises(ValueError, ipca.partial_fit, X2)
    # Increasing number of components
    ipca.set_params(n_components=15)
    assert_raises(ValueError, ipca.partial_fit, X3)
    # Returning to original setting
    ipca.set_params(n_components=20)
    ipca.partial_fit(X)
def test_incremental_pca_num_features_change():
    # Test that changing n_components will raise an error.
    rng = np.random.RandomState(1999)
    n_samples = 100
    X = rng.randn(n_samples, 20)
    X2 = rng.randn(n_samples, 50)
    ipca = IncrementalPCA(n_components=None)
    ipca.fit(X)
    assert_raises(ValueError, ipca.partial_fit, X2)
def test_incremental_pca_batch_signs():
    # Test that components_ sign is stable over batch sizes.
    rng = np.random.RandomState(1999)
    n_samples = 100
    n_features = 3
    X = rng.randn(n_samples, n_features)
    all_components = []
    batch_sizes = np.arange(10, 20)
    for batch_size in batch_sizes:
        ipca = IncrementalPCA(n_components=None, batch_size=batch_size).fit(X)
        all_components.append(ipca.components_)
    for i, j in zip(all_components[:-1], all_components[1:]):
        assert_almost_equal(np.sign(i), np.sign(j), decimal=6)
def test_incremental_pca_batch_values():
    # Test that components_ values are stable over batch sizes.
    rng = np.random.RandomState(1999)
    n_samples = 100
    n_features = 3
    X = rng.randn(n_samples, n_features)
    all_components = []
    batch_sizes = np.arange(20, 40, 3)
    for batch_size in batch_sizes:
        ipca = IncrementalPCA(n_components=None, batch_size=batch_size).fit(X)
        all_components.append(ipca.components_)
    for i, j in zip(all_components[:-1], all_components[1:]):
        assert_almost_equal(i, j, decimal=1)
def test_incremental_pca_partial_fit():
    # Test that fit and partial_fit get equivalent results.
    rng = np.random.RandomState(1999)
    n, p = 50, 3
    X = rng.randn(n, p)  # spherical data
    X[:, 1] *= .00001  # make middle component relatively small
    X += [5, 4, 3]  # make a large mean
    # same check that we can find the original data from the transformed
    # signal (since the data is almost of rank n_components)
    batch_size = 10
    ipca = IncrementalPCA(n_components=2, batch_size=batch_size).fit(X)
    pipca = IncrementalPCA(n_components=2, batch_size=batch_size)
    # Add one to make sure endpoint is included
    batch_itr = np.arange(0, n + 1, batch_size)
    for i, j in zip(batch_itr[:-1], batch_itr[1:]):
        pipca.partial_fit(X[i:j, :])
    assert_almost_equal(ipca.components_, pipca.components_, decimal=3)
def test_incremental_pca_against_pca_iris():
    # Test that IncrementalPCA and PCA are approximate (to a sign flip).
    X = iris.data
    Y_pca = PCA(n_components=2).fit_transform(X)
    Y_ipca = IncrementalPCA(n_components=2, batch_size=25).fit_transform(X)
    assert_almost_equal(np.abs(Y_pca), np.abs(Y_ipca), 1)
def test_incremental_pca_against_pca_random_data():
    # Test that IncrementalPCA and PCA are approximate (to a sign flip).
    rng = np.random.RandomState(1999)
    n_samples = 100
    n_features = 3
    X = rng.randn(n_samples, n_features) + 5 * rng.rand(1, n_features)
    Y_pca = PCA(n_components=3).fit_transform(X)
    Y_ipca = IncrementalPCA(n_components=3, batch_size=25).fit_transform(X)
    assert_almost_equal(np.abs(Y_pca), np.abs(Y_ipca), 1)
def test_explained_variances():
    # Test that PCA and IncrementalPCA calculations match
    X = datasets.make_low_rank_matrix(1000, 100, tail_strength=0.,
                                      effective_rank=10, random_state=1999)
    prec = 3
    n_samples, n_features = X.shape
    for nc in [None, 99]:
        pca = PCA(n_components=nc).fit(X)
        ipca = IncrementalPCA(n_components=nc, batch_size=100).fit(X)
        assert_almost_equal(pca.explained_variance_, ipca.explained_variance_,
                            decimal=prec)
        assert_almost_equal(pca.explained_variance_ratio_,
                            ipca.explained_variance_ratio_, decimal=prec)
        assert_almost_equal(pca.noise_variance_, ipca.noise_variance_,
                            decimal=prec)
def test_whitening():
    # Test that PCA and IncrementalPCA transforms match to sign flip.
    X = datasets.make_low_rank_matrix(1000, 10, tail_strength=0.,
                                      effective_rank=2, random_state=1999)
    prec = 3
    n_samples, n_features = X.shape
    for nc in [None, 9]:
        pca = PCA(whiten=True, n_components=nc).fit(X)
        ipca = IncrementalPCA(whiten=True, n_components=nc,
                              batch_size=250).fit(X)
        Xt_pca = pca.transform(X)
        Xt_ipca = ipca.transform(X)
        assert_almost_equal(np.abs(Xt_pca), np.abs(Xt_ipca), decimal=prec)
        Xinv_ipca = ipca.inverse_transform(Xt_ipca)
        Xinv_pca = pca.inverse_transform(Xt_pca)
        assert_almost_equal(X, Xinv_ipca, decimal=prec)
        assert_almost_equal(X, Xinv_pca, decimal=prec)
        assert_almost_equal(Xinv_pca, Xinv_ipca, decimal=prec)
 | 
	bsd-3-clause | 
| 
	manjunaths/tensorflow | 
	tensorflow/contrib/learn/__init__.py | 
	8 | 
	2286 | 
	# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# TODO(ptucker,ipolosukhin): Improve descriptions.
"""High level API for learning with TensorFlow.
## Estimators
Train and evaluate TensorFlow models.
@@BaseEstimator
@@Estimator
@@Trainable
@@Evaluable
@@KMeansClustering
@@ModeKeys
@@ModelFnOps
@@MetricSpec
@@PredictionKey
@@DNNClassifier
@@DNNRegressor
@@DNNLinearCombinedRegressor
@@DNNLinearCombinedClassifier
@@LinearClassifier
@@LinearRegressor
@@LogisticRegressor
## Distributed training utilities
@@Experiment
@@ExportStrategy
@@TaskType
## Graph actions
Perform various training, evaluation, and inference actions on a graph.
@@NanLossDuringTrainingError
@@RunConfig
@@evaluate
@@infer
@@run_feeds
@@run_n
@@train
## Input processing
Queue and read batched input data.
@@extract_dask_data
@@extract_dask_labels
@@extract_pandas_data
@@extract_pandas_labels
@@extract_pandas_matrix
@@infer_real_valued_columns_from_input
@@infer_real_valued_columns_from_input_fn
@@read_batch_examples
@@read_batch_features
@@read_batch_record_features
Export utilities
@@build_parsing_serving_input_fn
@@ProblemType
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=wildcard-import
from tensorflow.contrib.learn.python.learn import *
# pylint: enable=wildcard-import
from tensorflow.python.util.all_util import remove_undocumented
_allowed_symbols = ['datasets', 'head', 'io', 'models',
                    'monitors', 'NotFittedError', 'ops', 'preprocessing',
                    'utils', 'graph_actions']
remove_undocumented(__name__, _allowed_symbols)
 | 
	apache-2.0 | 
| 
	305120262/ArcGISServerManageTools | 
	GetServerLog.py | 
	1 | 
	3475 | 
	#coding=utf-8
"""
-------------------------------------------------------------------------------
Name:        getsvrlog.py
Purpose:     Collect ArcGIS Server Site Logs
Author:      Sean.L ([email protected])
Created:     8/25/16
Copyright:   (c) Sean.L 2016
-------------------------------------------------------------------------------
"""
from __future__ import print_function
# 3rd-party library: requests
# http://docs.python-requests.org/en/latest/
# pip install requests
import requests
import json
import pandas as pd
SITE_LIST = ["http://192.168.1.75:6080/arcgis",""]
USER = "siteadmin"
PASSWORD = "esri"
LOGPATH = r"D:\\"
# note that services are suffixed by type when passed to admin REST API
SERVICES = [r"MajorCity.MapServer"]
class AGSRestError(Exception): pass
class ServerError(Exception): pass
def _validate_response(response):
    """ Tests response for HTTP 200 code, tests that response is json,
        and searches for typical AGS error indicators in json.
        Raises an exception if response does not validate successfully.
    """
    if not response.ok:
        raise ServerError("Server Error: {}".format(response.text))
    try:
        response_json = response.json()
        if "error" in response_json:
            raise AGSRestError(response_json["error"])
        if "status" in response_json and response_json["status"] != "success":
            error = response_json["status"]
            if "messages" in response_json:
                for message in response_json["messages"]:
                    error += "\n" + message
            raise AGSRestError(error)
    except ValueError:
        print(response.text)
        raise ServerError("Server returned HTML: {}".format(response.text))
def _get_token(site,username, password):
    """ Returns token from server """
    token_url = "{host}/tokens/".format(
        host=site)
    data = { "f": "json",
             "username": username,
             "password": password,
             "client": "requestip",
             "expiration": 5 }
    response = requests.post(token_url, data,verify=False)
    _validate_response(response)
    token = response.json()['token']
    return token
def _get_log(site):
    getlog_url="{host}/admin/logs/query?f=json".format(
        host=site)
    data = { "token": token,
             "startTime":'',
             "endTime":'',
             "level":'SEVERE',
             "filterType":'json',
             "filter":'{\"server\": \"*\",\
                        \"services\": \"*\",\
                        \"machines":\"*\" }',
             "pageSize":10000}
    response = requests.post(getlog_url, data,verify=False)
    _validate_response(response)
    response_json = response.json()
    #print (response_json['logMessages'])
    myFrame=pd.DataFrame(response_json['logMessages'])
    sitewaip= site[site.index("/")+2:site.index(":",7)]
    myFrame["sitewaip"]=sitewaip
    file_name = sitewaip.replace(".","_");
    myFrame.to_csv(r"{root}{site_name}.csv".format(root=LOGPATH,site_name=file_name), index=False)
    return myFrame
if __name__ == "__main__":
    frames = []
    for site in SITE_LIST:
        print("Retrieving token...")
        token = _get_token(site,USER, PASSWORD)
        print("Retrieved: {}".format(token))
        df = _get_log(site)
        print( df.columns)
        frames.append(df)
    all_frame = pd.concat(frames)
    all_frame.to_csv(r"{root}allsites.csv".format(root=LOGPATH),index=False)
 | 
	apache-2.0 | 
| 
	rahul-c1/scikit-learn | 
	benchmarks/bench_lasso.py | 
	297 | 
	3305 | 
	"""
Benchmarks of Lasso vs LassoLars
First, we fix a training set and increase the number of
samples. Then we plot the computation time as function of
the number of samples.
In the second benchmark, we increase the number of dimensions of the
training set. Then we plot the computation time as function of
the number of dimensions.
In both cases, only 10% of the features are informative.
"""
import gc
from time import time
import numpy as np
from sklearn.datasets.samples_generator import make_regression
def compute_bench(alpha, n_samples, n_features, precompute):
    lasso_results = []
    lars_lasso_results = []
    it = 0
    for ns in n_samples:
        for nf in n_features:
            it += 1
            print('==================')
            print('Iteration %s of %s' % (it, max(len(n_samples),
                                          len(n_features))))
            print('==================')
            n_informative = nf // 10
            X, Y, coef_ = make_regression(n_samples=ns, n_features=nf,
                                          n_informative=n_informative,
                                          noise=0.1, coef=True)
            X /= np.sqrt(np.sum(X ** 2, axis=0))  # Normalize data
            gc.collect()
            print("- benchmarking Lasso")
            clf = Lasso(alpha=alpha, fit_intercept=False,
                        precompute=precompute)
            tstart = time()
            clf.fit(X, Y)
            lasso_results.append(time() - tstart)
            gc.collect()
            print("- benchmarking LassoLars")
            clf = LassoLars(alpha=alpha, fit_intercept=False,
                            normalize=False, precompute=precompute)
            tstart = time()
            clf.fit(X, Y)
            lars_lasso_results.append(time() - tstart)
    return lasso_results, lars_lasso_results
if __name__ == '__main__':
    from sklearn.linear_model import Lasso, LassoLars
    import pylab as pl
    alpha = 0.01  # regularization parameter
    n_features = 10
    list_n_samples = np.linspace(100, 1000000, 5).astype(np.int)
    lasso_results, lars_lasso_results = compute_bench(alpha, list_n_samples,
                                            [n_features], precompute=True)
    pl.figure('scikit-learn LASSO benchmark results')
    pl.subplot(211)
    pl.plot(list_n_samples, lasso_results, 'b-',
                            label='Lasso')
    pl.plot(list_n_samples, lars_lasso_results, 'r-',
                            label='LassoLars')
    pl.title('precomputed Gram matrix, %d features, alpha=%s' % (n_features, alpha))
    pl.legend(loc='upper left')
    pl.xlabel('number of samples')
    pl.ylabel('Time (s)')
    pl.axis('tight')
    n_samples = 2000
    list_n_features = np.linspace(500, 3000, 5).astype(np.int)
    lasso_results, lars_lasso_results = compute_bench(alpha, [n_samples],
                                           list_n_features, precompute=False)
    pl.subplot(212)
    pl.plot(list_n_features, lasso_results, 'b-', label='Lasso')
    pl.plot(list_n_features, lars_lasso_results, 'r-', label='LassoLars')
    pl.title('%d samples, alpha=%s' % (n_samples, alpha))
    pl.legend(loc='upper left')
    pl.xlabel('number of features')
    pl.ylabel('Time (s)')
    pl.axis('tight')
    pl.show()
 | 
	bsd-3-clause | 
| 
	cwu2011/scikit-learn | 
	examples/linear_model/plot_lasso_coordinate_descent_path.py | 
	254 | 
	2639 | 
	"""
=====================
Lasso and Elastic Net
=====================
Lasso and elastic net (L1 and L2 penalisation) implemented using a
coordinate descent.
The coefficients can be forced to be positive.
"""
print(__doc__)
# Author: Alexandre Gramfort <[email protected]>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import lasso_path, enet_path
from sklearn import datasets
diabetes = datasets.load_diabetes()
X = diabetes.data
y = diabetes.target
X /= X.std(axis=0)  # Standardize data (easier to set the l1_ratio parameter)
# Compute paths
eps = 5e-3  # the smaller it is the longer is the path
print("Computing regularization path using the lasso...")
alphas_lasso, coefs_lasso, _ = lasso_path(X, y, eps, fit_intercept=False)
print("Computing regularization path using the positive lasso...")
alphas_positive_lasso, coefs_positive_lasso, _ = lasso_path(
    X, y, eps, positive=True, fit_intercept=False)
print("Computing regularization path using the elastic net...")
alphas_enet, coefs_enet, _ = enet_path(
    X, y, eps=eps, l1_ratio=0.8, fit_intercept=False)
print("Computing regularization path using the positve elastic net...")
alphas_positive_enet, coefs_positive_enet, _ = enet_path(
    X, y, eps=eps, l1_ratio=0.8, positive=True, fit_intercept=False)
# Display results
plt.figure(1)
ax = plt.gca()
ax.set_color_cycle(2 * ['b', 'r', 'g', 'c', 'k'])
l1 = plt.plot(-np.log10(alphas_lasso), coefs_lasso.T)
l2 = plt.plot(-np.log10(alphas_enet), coefs_enet.T, linestyle='--')
plt.xlabel('-Log(alpha)')
plt.ylabel('coefficients')
plt.title('Lasso and Elastic-Net Paths')
plt.legend((l1[-1], l2[-1]), ('Lasso', 'Elastic-Net'), loc='lower left')
plt.axis('tight')
plt.figure(2)
ax = plt.gca()
ax.set_color_cycle(2 * ['b', 'r', 'g', 'c', 'k'])
l1 = plt.plot(-np.log10(alphas_lasso), coefs_lasso.T)
l2 = plt.plot(-np.log10(alphas_positive_lasso), coefs_positive_lasso.T,
              linestyle='--')
plt.xlabel('-Log(alpha)')
plt.ylabel('coefficients')
plt.title('Lasso and positive Lasso')
plt.legend((l1[-1], l2[-1]), ('Lasso', 'positive Lasso'), loc='lower left')
plt.axis('tight')
plt.figure(3)
ax = plt.gca()
ax.set_color_cycle(2 * ['b', 'r', 'g', 'c', 'k'])
l1 = plt.plot(-np.log10(alphas_enet), coefs_enet.T)
l2 = plt.plot(-np.log10(alphas_positive_enet), coefs_positive_enet.T,
              linestyle='--')
plt.xlabel('-Log(alpha)')
plt.ylabel('coefficients')
plt.title('Elastic-Net and positive Elastic-Net')
plt.legend((l1[-1], l2[-1]), ('Elastic-Net', 'positive Elastic-Net'),
           loc='lower left')
plt.axis('tight')
plt.show()
 | 
	bsd-3-clause | 
| 
	glouppe/scikit-learn | 
	benchmarks/bench_lasso.py | 
	297 | 
	3305 | 
	"""
Benchmarks of Lasso vs LassoLars
First, we fix a training set and increase the number of
samples. Then we plot the computation time as function of
the number of samples.
In the second benchmark, we increase the number of dimensions of the
training set. Then we plot the computation time as function of
the number of dimensions.
In both cases, only 10% of the features are informative.
"""
import gc
from time import time
import numpy as np
from sklearn.datasets.samples_generator import make_regression
def compute_bench(alpha, n_samples, n_features, precompute):
    lasso_results = []
    lars_lasso_results = []
    it = 0
    for ns in n_samples:
        for nf in n_features:
            it += 1
            print('==================')
            print('Iteration %s of %s' % (it, max(len(n_samples),
                                          len(n_features))))
            print('==================')
            n_informative = nf // 10
            X, Y, coef_ = make_regression(n_samples=ns, n_features=nf,
                                          n_informative=n_informative,
                                          noise=0.1, coef=True)
            X /= np.sqrt(np.sum(X ** 2, axis=0))  # Normalize data
            gc.collect()
            print("- benchmarking Lasso")
            clf = Lasso(alpha=alpha, fit_intercept=False,
                        precompute=precompute)
            tstart = time()
            clf.fit(X, Y)
            lasso_results.append(time() - tstart)
            gc.collect()
            print("- benchmarking LassoLars")
            clf = LassoLars(alpha=alpha, fit_intercept=False,
                            normalize=False, precompute=precompute)
            tstart = time()
            clf.fit(X, Y)
            lars_lasso_results.append(time() - tstart)
    return lasso_results, lars_lasso_results
if __name__ == '__main__':
    from sklearn.linear_model import Lasso, LassoLars
    import pylab as pl
    alpha = 0.01  # regularization parameter
    n_features = 10
    list_n_samples = np.linspace(100, 1000000, 5).astype(np.int)
    lasso_results, lars_lasso_results = compute_bench(alpha, list_n_samples,
                                            [n_features], precompute=True)
    pl.figure('scikit-learn LASSO benchmark results')
    pl.subplot(211)
    pl.plot(list_n_samples, lasso_results, 'b-',
                            label='Lasso')
    pl.plot(list_n_samples, lars_lasso_results, 'r-',
                            label='LassoLars')
    pl.title('precomputed Gram matrix, %d features, alpha=%s' % (n_features, alpha))
    pl.legend(loc='upper left')
    pl.xlabel('number of samples')
    pl.ylabel('Time (s)')
    pl.axis('tight')
    n_samples = 2000
    list_n_features = np.linspace(500, 3000, 5).astype(np.int)
    lasso_results, lars_lasso_results = compute_bench(alpha, [n_samples],
                                           list_n_features, precompute=False)
    pl.subplot(212)
    pl.plot(list_n_features, lasso_results, 'b-', label='Lasso')
    pl.plot(list_n_features, lars_lasso_results, 'r-', label='LassoLars')
    pl.title('%d samples, alpha=%s' % (n_samples, alpha))
    pl.legend(loc='upper left')
    pl.xlabel('number of features')
    pl.ylabel('Time (s)')
    pl.axis('tight')
    pl.show()
 | 
	bsd-3-clause | 
| 
	kastnerkyle/pylearn2 | 
	pylearn2/cross_validation/tests/test_train_cv_extensions.py | 
	49 | 
	1681 | 
	"""
Tests for TrainCV extensions.
"""
import os
import tempfile
from pylearn2.config import yaml_parse
from pylearn2.testing.skip import skip_if_no_sklearn
def test_monitor_based_save_best_cv():
    """Test MonitorBasedSaveBestCV."""
    handle, filename = tempfile.mkstemp()
    skip_if_no_sklearn()
    trainer = yaml_parse.load(test_yaml_monitor_based_save_best_cv %
                              {'save_path': filename})
    trainer.main_loop()
    # clean up
    os.remove(filename)
test_yaml_monitor_based_save_best_cv = """
!obj:pylearn2.cross_validation.TrainCV {
    dataset_iterator:
        !obj:pylearn2.cross_validation.dataset_iterators.DatasetKFold {
        dataset:
            !obj:pylearn2.testing.datasets.random_one_hot_dense_design_matrix
            {
                rng: !obj:numpy.random.RandomState { seed: 1 },
                num_examples: 100,
                dim: 10,
                num_classes: 2,
            },
    },
    model: !obj:pylearn2.models.autoencoder.Autoencoder {
        nvis: 10,
        nhid: 8,
        act_enc: sigmoid,
        act_dec: linear
    },
    algorithm: !obj:pylearn2.training_algorithms.bgd.BGD {
        batch_size: 50,
        line_search_mode: exhaustive,
        conjugate: 1,
        termination_criterion:
            !obj:pylearn2.termination_criteria.EpochCounter {
                    max_epochs: 1,
        },
        cost: !obj:pylearn2.costs.autoencoder.MeanSquaredReconstructionError {
        },
    },
    cv_extensions: [
  !obj:pylearn2.cross_validation.train_cv_extensions.MonitorBasedSaveBestCV {
        channel_name: train_objective,
        save_path: %(save_path)s,
      },
    ],
}
"""
 | 
	bsd-3-clause | 
| 
	valexandersaulys/airbnb_kaggle_contest | 
	venv/lib/python3.4/site-packages/sklearn/neighbors/graph.py | 
	208 | 
	7031 | 
	"""Nearest Neighbors graph functions"""
# Author: Jake Vanderplas <[email protected]>
#
# License: BSD 3 clause (C) INRIA, University of Amsterdam
import warnings
from .base import KNeighborsMixin, RadiusNeighborsMixin
from .unsupervised import NearestNeighbors
def _check_params(X, metric, p, metric_params):
    """Check the validity of the input parameters"""
    params = zip(['metric', 'p', 'metric_params'],
                 [metric, p, metric_params])
    est_params = X.get_params()
    for param_name, func_param in params:
        if func_param != est_params[param_name]:
            raise ValueError(
                "Got %s for %s, while the estimator has %s for "
                "the same parameter." % (
                    func_param, param_name, est_params[param_name]))
def _query_include_self(X, include_self, mode):
    """Return the query based on include_self param"""
    # Done to preserve backward compatibility.
    if include_self is None:
        if mode == "connectivity":
            warnings.warn(
                "The behavior of 'kneighbors_graph' when mode='connectivity' "
                "will change in version 0.18. Presently, the nearest neighbor "
                "of each sample is the sample itself. Beginning in version "
                "0.18, the default behavior will be to exclude each sample "
                "from being its own nearest neighbor. To maintain the current "
                "behavior, set include_self=True.", DeprecationWarning)
            include_self = True
        else:
            include_self = False
    if include_self:
        query = X._fit_X
    else:
        query = None
    return query
def kneighbors_graph(X, n_neighbors, mode='connectivity', metric='minkowski',
                     p=2, metric_params=None, include_self=None):
    """Computes the (weighted) graph of k-Neighbors for points in X
    Read more in the :ref:`User Guide <unsupervised_neighbors>`.
    Parameters
    ----------
    X : array-like or BallTree, shape = [n_samples, n_features]
        Sample data, in the form of a numpy array or a precomputed
        :class:`BallTree`.
    n_neighbors : int
        Number of neighbors for each sample.
    mode : {'connectivity', 'distance'}, optional
        Type of returned matrix: 'connectivity' will return the
        connectivity matrix with ones and zeros, in 'distance' the
        edges are Euclidean distance between points.
    metric : string, default 'minkowski'
        The distance metric used to calculate the k-Neighbors for each sample
        point. The DistanceMetric class gives a list of available metrics.
        The default distance is 'euclidean' ('minkowski' metric with the p
        param equal to 2.)
    include_self: bool, default backward-compatible.
        Whether or not to mark each sample as the first nearest neighbor to
        itself. If `None`, then True is used for mode='connectivity' and False
        for mode='distance' as this will preserve backwards compatibilty. From
        version 0.18, the default value will be False, irrespective of the
        value of `mode`.
    p : int, default 2
        Power parameter for the Minkowski metric. When p = 1, this is
        equivalent to using manhattan_distance (l1), and euclidean_distance
        (l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
    metric_params: dict, optional
        additional keyword arguments for the metric function.
    Returns
    -------
    A : sparse matrix in CSR format, shape = [n_samples, n_samples]
        A[i, j] is assigned the weight of edge that connects i to j.
    Examples
    --------
    >>> X = [[0], [3], [1]]
    >>> from sklearn.neighbors import kneighbors_graph
    >>> A = kneighbors_graph(X, 2)
    >>> A.toarray()
    array([[ 1.,  0.,  1.],
           [ 0.,  1.,  1.],
           [ 1.,  0.,  1.]])
    See also
    --------
    radius_neighbors_graph
    """
    if not isinstance(X, KNeighborsMixin):
        X = NearestNeighbors(n_neighbors, metric=metric, p=p,
                             metric_params=metric_params).fit(X)
    else:
        _check_params(X, metric, p, metric_params)
    query = _query_include_self(X, include_self, mode)
    return X.kneighbors_graph(X=query, n_neighbors=n_neighbors, mode=mode)
def radius_neighbors_graph(X, radius, mode='connectivity', metric='minkowski',
                           p=2, metric_params=None, include_self=None):
    """Computes the (weighted) graph of Neighbors for points in X
    Neighborhoods are restricted the points at a distance lower than
    radius.
    Read more in the :ref:`User Guide <unsupervised_neighbors>`.
    Parameters
    ----------
    X : array-like or BallTree, shape = [n_samples, n_features]
        Sample data, in the form of a numpy array or a precomputed
        :class:`BallTree`.
    radius : float
        Radius of neighborhoods.
    mode : {'connectivity', 'distance'}, optional
        Type of returned matrix: 'connectivity' will return the
        connectivity matrix with ones and zeros, in 'distance' the
        edges are Euclidean distance between points.
    metric : string, default 'minkowski'
        The distance metric used to calculate the neighbors within a
        given radius for each sample point. The DistanceMetric class
        gives a list of available metrics. The default distance is
        'euclidean' ('minkowski' metric with the param equal to 2.)
    include_self: bool, default None
        Whether or not to mark each sample as the first nearest neighbor to
        itself. If `None`, then True is used for mode='connectivity' and False
        for mode='distance' as this will preserve backwards compatibilty. From
        version 0.18, the default value will be False, irrespective of the
        value of `mode`.
    p : int, default 2
        Power parameter for the Minkowski metric. When p = 1, this is
        equivalent to using manhattan_distance (l1), and euclidean_distance
        (l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
    metric_params: dict, optional
        additional keyword arguments for the metric function.
    Returns
    -------
    A : sparse matrix in CSR format, shape = [n_samples, n_samples]
        A[i, j] is assigned the weight of edge that connects i to j.
    Examples
    --------
    >>> X = [[0], [3], [1]]
    >>> from sklearn.neighbors import radius_neighbors_graph
    >>> A = radius_neighbors_graph(X, 1.5)
    >>> A.toarray()
    array([[ 1.,  0.,  1.],
           [ 0.,  1.,  0.],
           [ 1.,  0.,  1.]])
    See also
    --------
    kneighbors_graph
    """
    if not isinstance(X, RadiusNeighborsMixin):
        X = NearestNeighbors(radius=radius, metric=metric, p=p,
                             metric_params=metric_params).fit(X)
    else:
        _check_params(X, metric, p, metric_params)
    query = _query_include_self(X, include_self, mode)
    return X.radius_neighbors_graph(query, radius, mode)
 | 
	gpl-2.0 | 
| 
	cxmo/project-beta | 
	code/dataprep_script.py | 
	4 | 
	1758 | 
	
""" The following script will apply a 3mm Gaussian filter on all the data spatially
and will save each smoothed run into the data folder as 'smoothed_run_i', where  
0 <= i <= 7 is the index of the run. 
"""
#Import libraries
import numpy as np
import scipy
import scipy.ndimage
from scipy.ndimage.filters import gaussian_filter
import nibabel as nb
import matplotlib.pyplot as plt
import utils.data_loading as dl
#All file strings corresponding to BOLD data for subject 4 
files = ['../data/task001_run001.bold_dico.nii.gz', '../data/task001_run002.bold_dico.nii.gz', 
         '../data/task001_run003.bold_dico.nii.gz', '../data/task001_run004.bold_dico.nii.gz', 
         '../data/task001_run005.bold_dico.nii.gz', '../data/task001_run006.bold_dico.nii.gz',
         '../data/task001_run007.bold_dico.nii.gz', '../data/task001_run008.bold_dico.nii.gz']
all_data = []
for index, filename in enumerate(files):
    new_data = dl.load_data(filename) #load_data function drops first 4 for us
    num_vols = new_data.shape[-1]
    if index != 0 and index != 7:
        new_num_vols = num_vols - 4   
        new_data = new_data[:,:,:,:new_num_vols] #Drop last 4 volumes for middle runs    
    all_data.append(new_data)
#Create an array of all smoothed data  
for index, run in enumerate(all_data):
    num_vols = np.shape(run)[-1]
    run_i_smoothed = []
    for time in range(num_vols):
        smoothed = dl.smooth_gauss(run, 3, time)
        smoothed.shape = (132, 175, 48, 1)
        run_i_smoothed.append(smoothed)
    run_i_smoothed = np.concatenate(run_i_smoothed, axis = 3)
    np.save('../data/smoothed_run_' + str(index), run_i_smoothed) #save in data folder
    print('finished run' + str(index))
    run_i_smoothed = None #Save memory space 
 | 
	bsd-3-clause | 
| 
	zimmermegan/smarda | 
	nltk-3.0.3/nltk/parse/transitionparser.py | 
	5 | 
	31354 | 
	# Natural Language Toolkit: Arc-Standard and Arc-eager Transition Based Parsers
#
# Author: Long Duong <[email protected]>
#
# Copyright (C) 2001-2015 NLTK Project
# URL: <http://nltk.org/>
# For license information, see LICENSE.TXT
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tempfile
import pickle
from os import remove
from copy import deepcopy
from operator import itemgetter
try:
    from numpy import array
    from scipy import sparse
    from sklearn.datasets import load_svmlight_file
    from sklearn import svm
except ImportError:
    pass
from nltk.parse import ParserI, DependencyGraph, DependencyEvaluator
class Configuration(object):
    """
    Class for holding configuration which is the partial analysis of the input sentence.
    The transition based parser aims at finding set of operators that transfer the initial
    configuration to the terminal configuration.
    The configuration includes:
        - Stack: for storing partially proceeded words
        - Buffer: for storing remaining input words
        - Set of arcs: for storing partially built dependency tree
    This class also provides a method to represent a configuration as list of features.
    """
    def __init__(self, dep_graph):
        """
        :param dep_graph: the representation of an input in the form of dependency graph.
        :type dep_graph: DependencyGraph where the dependencies are not specified.
        """
        # dep_graph.nodes contain list of token for a sentence
        self.stack = [0]  # The root element
        self.buffer = list(range(1, len(dep_graph.nodes)))  # The rest is in the buffer
        self.arcs = []  # empty set of arc
        self._tokens = dep_graph.nodes
        self._max_address = len(self.buffer)
    def __str__(self):
        return 'Stack : ' + \
            str(self.stack) + '  Buffer : ' + str(self.buffer) + '   Arcs : ' + str(self.arcs)
    def _check_informative(self, feat, flag=False):
        """
        Check whether a feature is informative
        The flag control whether "_" is informative or not
        """
        if feat is None:
            return False
        if feat == '':
            return False
        if flag is False:
            if feat == '_':
                return False
        return True
    def extract_features(self):
        """
        Extract the set of features for the current configuration. Implement standard features as describe in
        Table 3.2 (page 31) in Dependency Parsing book by Sandra Kubler, Ryan McDonal, Joakim Nivre.
        Please note that these features are very basic.
        :return: list(str)
        """
        result = []
        # Todo : can come up with more complicated features set for better
        # performance.
        if len(self.stack) > 0:
            # Stack 0
            stack_idx0 = self.stack[len(self.stack) - 1]
            token = self._tokens[stack_idx0]
            if self._check_informative(token['word'], True):
                result.append('STK_0_FORM_' + token['word'])
            if 'lemma' in token and self._check_informative(token['lemma']):
                result.append('STK_0_LEMMA_' + token['lemma'])
            if self._check_informative(token['tag']):
                result.append('STK_0_POS_' + token['tag'])
            if 'feats' in token and self._check_informative(token['feats']):
                feats = token['feats'].split("|")
                for feat in feats:
                    result.append('STK_0_FEATS_' + feat)
            # Stack 1
            if len(self.stack) > 1:
                stack_idx1 = self.stack[len(self.stack) - 2]
                token = self._tokens[stack_idx1]
                if self._check_informative(token['tag']):
                    result.append('STK_1_POS_' + token['tag'])
            # Left most, right most dependency of stack[0]
            left_most = 1000000
            right_most = -1
            dep_left_most = ''
            dep_right_most = ''
            for (wi, r, wj) in self.arcs:
                if wi == stack_idx0:
                    if (wj > wi) and (wj > right_most):
                        right_most = wj
                        dep_right_most = r
                    if (wj < wi) and (wj < left_most):
                        left_most = wj
                        dep_left_most = r
            if self._check_informative(dep_left_most):
                result.append('STK_0_LDEP_' + dep_left_most)
            if self._check_informative(dep_right_most):
                result.append('STK_0_RDEP_' + dep_right_most)
        # Check Buffered 0
        if len(self.buffer) > 0:
            # Buffer 0
            buffer_idx0 = self.buffer[0]
            token = self._tokens[buffer_idx0]
            if self._check_informative(token['word'], True):
                result.append('BUF_0_FORM_' + token['word'])
            if 'lemma' in token and self._check_informative(token['lemma']):
                result.append('BUF_0_LEMMA_' + token['lemma'])
            if self._check_informative(token['tag']):
                result.append('BUF_0_POS_' + token['tag'])
            if 'feats' in token and self._check_informative(token['feats']):
                feats = token['feats'].split("|")
                for feat in feats:
                    result.append('BUF_0_FEATS_' + feat)
            # Buffer 1
            if len(self.buffer) > 1:
                buffer_idx1 = self.buffer[1]
                token = self._tokens[buffer_idx1]
                if self._check_informative(token['word'], True):
                    result.append('BUF_1_FORM_' + token['word'])
                if self._check_informative(token['tag']):
                    result.append('BUF_1_POS_' + token['tag'])
            if len(self.buffer) > 2:
                buffer_idx2 = self.buffer[2]
                token = self._tokens[buffer_idx2]
                if self._check_informative(token['tag']):
                    result.append('BUF_2_POS_' + token['tag'])
            if len(self.buffer) > 3:
                buffer_idx3 = self.buffer[3]
                token = self._tokens[buffer_idx3]
                if self._check_informative(token['tag']):
                    result.append('BUF_3_POS_' + token['tag'])
                    # Left most, right most dependency of stack[0]
            left_most = 1000000
            right_most = -1
            dep_left_most = ''
            dep_right_most = ''
            for (wi, r, wj) in self.arcs:
                if wi == buffer_idx0:
                    if (wj > wi) and (wj > right_most):
                        right_most = wj
                        dep_right_most = r
                    if (wj < wi) and (wj < left_most):
                        left_most = wj
                        dep_left_most = r
            if self._check_informative(dep_left_most):
                result.append('BUF_0_LDEP_' + dep_left_most)
            if self._check_informative(dep_right_most):
                result.append('BUF_0_RDEP_' + dep_right_most)
        return result
class Transition(object):
    """
    This class defines a set of transition which is applied to a configuration to get another configuration
    Note that for different parsing algorithm, the transition is different.
    """
    # Define set of transitions
    LEFT_ARC = 'LEFTARC'
    RIGHT_ARC = 'RIGHTARC'
    SHIFT = 'SHIFT'
    REDUCE = 'REDUCE'
    def __init__(self, alg_option):
        """
        :param alg_option: the algorithm option of this parser. Currently support `arc-standard` and `arc-eager` algorithm
        :type alg_option: str
        """
        self._algo = alg_option
        if alg_option not in [
                TransitionParser.ARC_STANDARD,
                TransitionParser.ARC_EAGER]:
            raise ValueError(" Currently we only support %s and %s " %
                                        (TransitionParser.ARC_STANDARD, TransitionParser.ARC_EAGER))
    def left_arc(self, conf, relation):
        """
        Note that the algorithm for left-arc is quite similar except for precondition for both arc-standard and arc-eager
            :param configuration: is the current configuration
            :return : A new configuration or -1 if the pre-condition is not satisfied
        """
        if (len(conf.buffer) <= 0) or (len(conf.stack) <= 0):
            return -1
        if conf.buffer[0] == 0:
            # here is the Root element
            return -1
        idx_wi = conf.stack[len(conf.stack) - 1]
        flag = True
        if self._algo == TransitionParser.ARC_EAGER:
            for (idx_parent, r, idx_child) in conf.arcs:
                if idx_child == idx_wi:
                    flag = False
        if flag:
            conf.stack.pop()
            idx_wj = conf.buffer[0]
            conf.arcs.append((idx_wj, relation, idx_wi))
        else:
            return -1
    def right_arc(self, conf, relation):
        """
        Note that the algorithm for right-arc is DIFFERENT for arc-standard and arc-eager
            :param configuration: is the current configuration
            :return : A new configuration or -1 if the pre-condition is not satisfied
        """
        if (len(conf.buffer) <= 0) or (len(conf.stack) <= 0):
            return -1
        if self._algo == TransitionParser.ARC_STANDARD:
            idx_wi = conf.stack.pop()
            idx_wj = conf.buffer[0]
            conf.buffer[0] = idx_wi
            conf.arcs.append((idx_wi, relation, idx_wj))
        else:  # arc-eager
            idx_wi = conf.stack[len(conf.stack) - 1]
            idx_wj = conf.buffer.pop(0)
            conf.stack.append(idx_wj)
            conf.arcs.append((idx_wi, relation, idx_wj))
    def reduce(self, conf):
        """
        Note that the algorithm for reduce is only available for arc-eager
            :param configuration: is the current configuration
            :return : A new configuration or -1 if the pre-condition is not satisfied
        """
        if self._algo != TransitionParser.ARC_EAGER:
            return -1
        if len(conf.stack) <= 0:
            return -1
        idx_wi = conf.stack[len(conf.stack) - 1]
        flag = False
        for (idx_parent, r, idx_child) in conf.arcs:
            if idx_child == idx_wi:
                flag = True
        if flag:
            conf.stack.pop()  # reduce it
        else:
            return -1
    def shift(self, conf):
        """
        Note that the algorithm for shift is the SAME for arc-standard and arc-eager
            :param configuration: is the current configuration
            :return : A new configuration or -1 if the pre-condition is not satisfied
        """
        if len(conf.buffer) <= 0:
            return -1
        idx_wi = conf.buffer.pop(0)
        conf.stack.append(idx_wi)
class TransitionParser(ParserI):
    """
    Class for transition based parser. Implement 2 algorithms which are "arc-standard" and "arc-eager"
    """
    ARC_STANDARD = 'arc-standard'
    ARC_EAGER = 'arc-eager'
    def __init__(self, algorithm):
        """
        :param algorithm: the algorithm option of this parser. Currently support `arc-standard` and `arc-eager` algorithm
        :type algorithm: str
        """
        if not(algorithm in [self.ARC_STANDARD, self.ARC_EAGER]):
            raise ValueError(" Currently we only support %s and %s " %
                                        (self.ARC_STANDARD, self.ARC_EAGER))
        self._algorithm = algorithm
        self._dictionary = {}
        self._transition = {}
        self._match_transition = {}
    def _get_dep_relation(self, idx_parent, idx_child, depgraph):
        p_node = depgraph.nodes[idx_parent]
        c_node = depgraph.nodes[idx_child]
        if c_node['word'] is None:
            return None  # Root word
        if c_node['head'] == p_node['address']:
            return c_node['rel']
        else:
            return None
    def _convert_to_binary_features(self, features):
        """
        :param features: list of feature string which is needed to convert to binary features
        :type features: list(str)
        :return : string of binary features in libsvm format  which is 'featureID:value' pairs
        """
        unsorted_result = []
        for feature in features:
            self._dictionary.setdefault(feature, len(self._dictionary))
            unsorted_result.append(self._dictionary[feature])
        # Default value of each feature is 1.0
        return ' '.join(str(featureID) + ':1.0' for featureID in sorted(unsorted_result))
    def _is_projective(self, depgraph):
        arc_list = []
        for key in depgraph.nodes:
            node = depgraph.nodes[key]           
            
            if 'head' in node:
                childIdx = node['address']
                parentIdx = node['head']
                if parentIdx is not None:
                    arc_list.append((parentIdx, childIdx))
        for (parentIdx, childIdx) in arc_list:
            # Ensure that childIdx < parentIdx
            if childIdx > parentIdx:
                temp = childIdx
                childIdx = parentIdx
                parentIdx = temp
            for k in range(childIdx + 1, parentIdx):
                for m in range(len(depgraph.nodes)):
                    if (m < childIdx) or (m > parentIdx):
                        if (k, m) in arc_list:
                            return False
                        if (m, k) in arc_list:
                            return False
        return True
    def _write_to_file(self, key, binary_features, input_file):
        """
        write the binary features to input file and update the transition dictionary
        """
        self._transition.setdefault(key, len(self._transition) + 1)
        self._match_transition[self._transition[key]] = key
        input_str = str(self._transition[key]) + ' ' + binary_features + '\n'
        input_file.write(input_str.encode('utf-8'))
    def _create_training_examples_arc_std(self, depgraphs, input_file):
        """
        Create the training example in the libsvm format and write it to the input_file.
        Reference : Page 32, Chapter 3. Dependency Parsing by Sandra Kubler, Ryan McDonal and Joakim Nivre (2009)
        """
        operation = Transition(self.ARC_STANDARD)
        count_proj = 0
        training_seq = []
        for depgraph in depgraphs:
            if not self._is_projective(depgraph):
                continue
            count_proj += 1
            conf = Configuration(depgraph)
            while len(conf.buffer) > 0:
                b0 = conf.buffer[0]
                features = conf.extract_features()
                binary_features = self._convert_to_binary_features(features)
                if len(conf.stack) > 0:
                    s0 = conf.stack[len(conf.stack) - 1]
                    # Left-arc operation
                    rel = self._get_dep_relation(b0, s0, depgraph)
                    if rel is not None:
                        key = Transition.LEFT_ARC + ':' + rel
                        self._write_to_file(key, binary_features, input_file)
                        operation.left_arc(conf, rel)
                        training_seq.append(key)
                        continue
                    # Right-arc operation
                    rel = self._get_dep_relation(s0, b0, depgraph)
                    if rel is not None:
                        precondition = True
                        # Get the max-index of buffer
                        maxID = conf._max_address
                        for w in range(maxID + 1):
                            if w != b0:
                                relw = self._get_dep_relation(b0, w, depgraph)
                                if relw is not None:
                                    if (b0, relw, w) not in conf.arcs:
                                        precondition = False
                        if precondition:
                            key = Transition.RIGHT_ARC + ':' + rel
                            self._write_to_file(
                                key,
                                binary_features,
                                input_file)
                            operation.right_arc(conf, rel)
                            training_seq.append(key)
                            continue
                # Shift operation as the default
                key = Transition.SHIFT
                self._write_to_file(key, binary_features, input_file)
                operation.shift(conf)
                training_seq.append(key)
        print(" Number of training examples : " + str(len(depgraphs)))
        print(" Number of valid (projective) examples : " + str(count_proj))
        return training_seq
    def _create_training_examples_arc_eager(self, depgraphs, input_file):
        """
        Create the training example in the libsvm format and write it to the input_file.
        Reference : 'A Dynamic Oracle for Arc-Eager Dependency Parsing' by Joav Goldberg and Joakim Nivre
        """
        operation = Transition(self.ARC_EAGER)
        countProj = 0
        training_seq = []
        for depgraph in depgraphs:
            if not self._is_projective(depgraph):
                continue
            countProj += 1
            conf = Configuration(depgraph)
            while len(conf.buffer) > 0:
                b0 = conf.buffer[0]
                features = conf.extract_features()
                binary_features = self._convert_to_binary_features(features)
                if len(conf.stack) > 0:
                    s0 = conf.stack[len(conf.stack) - 1]
                    # Left-arc operation
                    rel = self._get_dep_relation(b0, s0, depgraph)
                    if rel is not None:
                        key = Transition.LEFT_ARC + ':' + rel
                        self._write_to_file(key, binary_features, input_file)
                        operation.left_arc(conf, rel)
                        training_seq.append(key)
                        continue
                    # Right-arc operation
                    rel = self._get_dep_relation(s0, b0, depgraph)
                    if rel is not None:
                        key = Transition.RIGHT_ARC + ':' + rel
                        self._write_to_file(key, binary_features, input_file)
                        operation.right_arc(conf, rel)
                        training_seq.append(key)
                        continue
                    # reduce operation
                    flag = False
                    for k in range(s0):
                        if self._get_dep_relation(k, b0, depgraph) is not None:
                            flag = True
                        if self._get_dep_relation(b0, k, depgraph) is not None:
                            flag = True
                    if flag:
                        key = Transition.REDUCE
                        self._write_to_file(key, binary_features, input_file)
                        operation.reduce(conf)
                        training_seq.append(key)
                        continue
                # Shift operation as the default
                key = Transition.SHIFT
                self._write_to_file(key, binary_features, input_file)
                operation.shift(conf)
                training_seq.append(key)
        print(" Number of training examples : " + str(len(depgraphs)))
        print(" Number of valid (projective) examples : " + str(countProj))
        return training_seq
    def train(self, depgraphs, modelfile):
        """
        :param depgraphs : list of DependencyGraph as the training data
        :type depgraphs : DependencyGraph
        :param modelfile : file name to save the trained model
        :type modelfile : str
        """
        try:
            input_file = tempfile.NamedTemporaryFile(
                prefix='transition_parse.train',
                dir=tempfile.gettempdir(),
                delete=False)
            if self._algorithm == self.ARC_STANDARD:
                self._create_training_examples_arc_std(depgraphs, input_file)
            else:
                self._create_training_examples_arc_eager(depgraphs, input_file)
            input_file.close()
            # Using the temporary file to train the libsvm classifier
            x_train, y_train = load_svmlight_file(input_file.name)
            # The parameter is set according to the paper:
            # Algorithms for Deterministic Incremental Dependency Parsing by Joakim Nivre
            # Todo : because of probability = True => very slow due to
            # cross-validation. Need to improve the speed here
            model = svm.SVC(
                kernel='poly',
                degree=2,
                coef0=0,
                gamma=0.2,
                C=0.5,
                verbose=True,
                probability=True)
            model.fit(x_train, y_train)
            # Save the model to file name (as pickle)
            pickle.dump(model, open(modelfile, 'wb'))
        finally:
            remove(input_file.name)
    def parse(self, depgraphs, modelFile):
        """
        :param depgraphs: the list of test sentence, each sentence is represented as a dependency graph where the 'head' information is dummy
        :type depgraphs: list(DependencyGraph)
        :param modelfile: the model file
        :type modelfile: str
        :return: list (DependencyGraph) with the 'head' and 'rel' information
        """
        result = []
        # First load the model
        model = pickle.load(open(modelFile, 'rb'))
        operation = Transition(self._algorithm)
        for depgraph in depgraphs:
            conf = Configuration(depgraph)
            while len(conf.buffer) > 0:
                features = conf.extract_features()
                col = []
                row = []
                data = []
                for feature in features:
                    if feature in self._dictionary:
                        col.append(self._dictionary[feature])
                        row.append(0)
                        data.append(1.0)
                np_col = array(sorted(col))  # NB : index must be sorted
                np_row = array(row)
                np_data = array(data)
                x_test = sparse.csr_matrix((np_data, (np_row, np_col)), shape=(1, len(self._dictionary)))
                # It's best to use decision function as follow BUT it's not supported yet for sparse SVM
                # Using decision funcion to build the votes array
                #dec_func = model.decision_function(x_test)[0]
                #votes = {}
                #k = 0
                # for i in range(len(model.classes_)):
                #    for j in range(i+1, len(model.classes_)):
                #        #if  dec_func[k] > 0:
                #            votes.setdefault(i,0)
                #            votes[i] +=1
                #        else:
                #           votes.setdefault(j,0)
                #           votes[j] +=1
                #        k +=1
                # Sort votes according to the values
                #sorted_votes = sorted(votes.items(), key=itemgetter(1), reverse=True)
                # We will use predict_proba instead of decision_function
                prob_dict = {}
                pred_prob = model.predict_proba(x_test)[0]
                for i in range(len(pred_prob)):
                    prob_dict[i] = pred_prob[i]
                sorted_Prob = sorted(
                    prob_dict.items(),
                    key=itemgetter(1),
                    reverse=True)
                # Note that SHIFT is always a valid operation
                for (y_pred_idx, confidence) in sorted_Prob:
                    #y_pred = model.predict(x_test)[0]
                    # From the prediction match to the operation
                    y_pred = model.classes_[y_pred_idx]
                    if y_pred in self._match_transition:
                        strTransition = self._match_transition[y_pred]
                        baseTransition = strTransition.split(":")[0]
                        if baseTransition == Transition.LEFT_ARC:
                            if operation.left_arc(conf, strTransition.split(":")[1]) != -1:
                                break
                        elif baseTransition == Transition.RIGHT_ARC:
                            if operation.right_arc(conf, strTransition.split(":")[1]) != -1:
                                break
                        elif baseTransition == Transition.REDUCE:
                            if operation.reduce(conf) != -1:
                                break
                        elif baseTransition == Transition.SHIFT:
                            if operation.shift(conf) != -1:
                                break
                    else:
                        raise ValueError("The predicted transition is not recognized, expected errors")
            # Finish with operations build the dependency graph from Conf.arcs
            new_depgraph = deepcopy(depgraph)
            for key in new_depgraph.nodes:
                node = new_depgraph.nodes[key]
                node['rel'] = ''
                # With the default, all the token depend on the Root
                node['head'] = 0
            for (head, rel, child) in conf.arcs:
                c_node = new_depgraph.nodes[child]
                c_node['head'] = head
                c_node['rel'] = rel
            result.append(new_depgraph)
        return result
def demo():
    """
    >>> from nltk.parse import DependencyGraph, DependencyEvaluator
    >>> from nltk.parse.transitionparser import TransitionParser, Configuration, Transition
    >>> gold_sent = DependencyGraph(\"""
    ... Economic  JJ     2      ATT
    ... news  NN     3       SBJ
    ... has       VBD       0       ROOT
    ... little      JJ      5       ATT
    ... effect   NN     3       OBJ
    ... on     IN      5       ATT
    ... financial       JJ       8       ATT
    ... markets    NNS      6       PC
    ... .    .      3       PU
    ... \""")
    >>> conf = Configuration(gold_sent)
    ###################### Check the Initial Feature ########################
    >>> print(', '.join(conf.extract_features()))
    STK_0_POS_TOP, BUF_0_FORM_Economic, BUF_0_LEMMA_Economic, BUF_0_POS_JJ, BUF_1_FORM_news, BUF_1_POS_NN, BUF_2_POS_VBD, BUF_3_POS_JJ
    ###################### Check The Transition #######################
    Check the Initialized Configuration
    >>> print(conf)
    Stack : [0]  Buffer : [1, 2, 3, 4, 5, 6, 7, 8, 9]   Arcs : []
    A. Do some transition checks for ARC-STANDARD
    >>> operation = Transition('arc-standard')
    >>> operation.shift(conf)
    >>> operation.left_arc(conf, "ATT")
    >>> operation.shift(conf)
    >>> operation.left_arc(conf,"SBJ")
    >>> operation.shift(conf)
    >>> operation.shift(conf)
    >>> operation.left_arc(conf, "ATT")
    >>> operation.shift(conf)
    >>> operation.shift(conf)
    >>> operation.shift(conf)
    >>> operation.left_arc(conf, "ATT")
    Middle Configuration and Features Check
    >>> print(conf)
    Stack : [0, 3, 5, 6]  Buffer : [8, 9]   Arcs : [(2, 'ATT', 1), (3, 'SBJ', 2), (5, 'ATT', 4), (8, 'ATT', 7)]
    >>> print(', '.join(conf.extract_features()))
    STK_0_FORM_on, STK_0_LEMMA_on, STK_0_POS_IN, STK_1_POS_NN, BUF_0_FORM_markets, BUF_0_LEMMA_markets, BUF_0_POS_NNS, BUF_1_FORM_., BUF_1_POS_., BUF_0_LDEP_ATT
    >>> operation.right_arc(conf, "PC")
    >>> operation.right_arc(conf, "ATT")
    >>> operation.right_arc(conf, "OBJ")
    >>> operation.shift(conf)
    >>> operation.right_arc(conf, "PU")
    >>> operation.right_arc(conf, "ROOT")
    >>> operation.shift(conf)
    Terminated Configuration Check
    >>> print(conf)
    Stack : [0]  Buffer : []   Arcs : [(2, 'ATT', 1), (3, 'SBJ', 2), (5, 'ATT', 4), (8, 'ATT', 7), (6, 'PC', 8), (5, 'ATT', 6), (3, 'OBJ', 5), (3, 'PU', 9), (0, 'ROOT', 3)]
    B. Do some transition checks for ARC-EAGER
    >>> conf = Configuration(gold_sent)
    >>> operation = Transition('arc-eager')
    >>> operation.shift(conf)
    >>> operation.left_arc(conf,'ATT')
    >>> operation.shift(conf)
    >>> operation.left_arc(conf,'SBJ')
    >>> operation.right_arc(conf,'ROOT')
    >>> operation.shift(conf)
    >>> operation.left_arc(conf,'ATT')
    >>> operation.right_arc(conf,'OBJ')
    >>> operation.right_arc(conf,'ATT')
    >>> operation.shift(conf)
    >>> operation.left_arc(conf,'ATT')
    >>> operation.right_arc(conf,'PC')
    >>> operation.reduce(conf)
    >>> operation.reduce(conf)
    >>> operation.reduce(conf)
    >>> operation.right_arc(conf,'PU')
    >>> print(conf)
    Stack : [0, 3, 9]  Buffer : []   Arcs : [(2, 'ATT', 1), (3, 'SBJ', 2), (0, 'ROOT', 3), (5, 'ATT', 4), (3, 'OBJ', 5), (5, 'ATT', 6), (8, 'ATT', 7), (6, 'PC', 8), (3, 'PU', 9)]
    ###################### Check The Training Function #######################
    A. Check the ARC-STANDARD training
    >>> import tempfile
    >>> import os
    >>> input_file = tempfile.NamedTemporaryFile(prefix='transition_parse.train', dir=tempfile.gettempdir(), delete=False)
    >>> parser_std = TransitionParser('arc-standard')
    >>> print(', '.join(parser_std._create_training_examples_arc_std([gold_sent], input_file)))
     Number of training examples : 1
     Number of valid (projective) examples : 1
    SHIFT, LEFTARC:ATT, SHIFT, LEFTARC:SBJ, SHIFT, SHIFT, LEFTARC:ATT, SHIFT, SHIFT, SHIFT, LEFTARC:ATT, RIGHTARC:PC, RIGHTARC:ATT, RIGHTARC:OBJ, SHIFT, RIGHTARC:PU, RIGHTARC:ROOT, SHIFT
    >>> parser_std.train([gold_sent],'temp.arcstd.model')
     Number of training examples : 1
     Number of valid (projective) examples : 1
    ...
    >>> remove(input_file.name)
    B. Check the ARC-EAGER training
    >>> input_file = tempfile.NamedTemporaryFile(prefix='transition_parse.train', dir=tempfile.gettempdir(),delete=False)
    >>> parser_eager = TransitionParser('arc-eager')
    >>> print(', '.join(parser_eager._create_training_examples_arc_eager([gold_sent], input_file)))
     Number of training examples : 1
     Number of valid (projective) examples : 1
    SHIFT, LEFTARC:ATT, SHIFT, LEFTARC:SBJ, RIGHTARC:ROOT, SHIFT, LEFTARC:ATT, RIGHTARC:OBJ, RIGHTARC:ATT, SHIFT, LEFTARC:ATT, RIGHTARC:PC, REDUCE, REDUCE, REDUCE, RIGHTARC:PU
    >>> parser_eager.train([gold_sent],'temp.arceager.model')
     Number of training examples : 1
     Number of valid (projective) examples : 1
    ...
    >>> remove(input_file.name)
    ###################### Check The Parsing Function ########################
    A. Check the ARC-STANDARD parser
    >>> result = parser_std.parse([gold_sent], 'temp.arcstd.model')
    >>> de = DependencyEvaluator(result, [gold_sent])
    >>> de.eval() >= (0, 0)
    True
    B. Check the ARC-EAGER parser
    >>> result = parser_eager.parse([gold_sent], 'temp.arceager.model')
    >>> de = DependencyEvaluator(result, [gold_sent])
    >>> de.eval() >= (0, 0)
    True
    Note that result is very poor because of only one training example.
    """
if __name__ == '__main__':
    import doctest
    doctest.testmod(optionflags=doctest.NORMALIZE_WHITESPACE | doctest.ELLIPSIS)
 | 
	mit | 
| 
	ua-snap/downscale | 
	snap_scripts/old_scripts/tem_iem_older_scripts_april2018/tem_inputs_iem/old_code/cru_ts_downscaling_class_d.py | 
	3 | 
	20625 | 
	# # #
# Downscale CRU Historical TS3.x data to a pre-processed climatology
#  extent, resolution, reference system
#
# Author: Michael Lindgren ([email protected])
# # #
# import some modules
import rasterio, xray, os
import numpy as np
import pandas as pd
import numpy as np
class DownscalingUtils( object ):
	def write_gtiff( self, output_arr, template_meta, output_filename, compress=True ):
		'''
		DESCRIPTION:
		------------
		output a GeoTiff given a numpy ndarray, rasterio-style 
		metadata dictionary, and and output_filename.
		If a multiband file is to be processed, the Longitude
		dimension is expected to be the right-most. 
		--> dimensions should be (band, latitude, longitude)
		ARGUMENTS:
		----------
		output_arr = [numpy.ndarray] with longitude as the right-most dimension
		template_meta = [dict] rasterio-style raster meta dictionary.  Typically 
			found in a template raster by: rasterio.open( fn ).meta
		output_filename = [str] path to and name of the output GeoTiff to be 
			created.  currently only 'GTiff' is supported.
		compress = [bool] if True (default) LZW-compression is applied to the 
			output GeoTiff.  If False, no compression is applied.
			* this can also be added (along with many other gdal creation options)
			to the template meta as a key value pair template_meta.update( compress='lzw' ).
			See Rasterio documentation for more details. This is just a common one that is 
		RETURNS:
		--------
		string path to the new output_filename created
		'''
		import os
		if 'transform' in template_meta.keys():
			_ = template_meta.pop( 'transform' )
		if not output_filename.endswith( '.tif' ):
			UserWarning( 'output_filename does not end with ".tif", it has been fixed for you.' )
			output_filename = os.path.splitext( output_filename )[0] + '.tif'
		if output_arr.ndim == 2:
			# add in a new dimension - can get you into trouble with very large rasters...
			output_arr = output_arr[ np.newaxis, ... ] 
		elif output_arr.ndim < 2:
			raise ValueError( 'output_arr must have at least 2 dimensions' )
		nbands, nrows, ncols = output_arr.shape 
		if template_meta[ 'count' ] != nbands:
			raise ValueError( 'template_meta[ "count" ] must match output_arr bands' )
		if compress == True and 'compress' not in template_meta.keys():
			template_meta.update( compress='lzw' )
		with rasterio.open( output_filename, 'w', **template_meta ) as out:
			for band in range( 1, nbands+1 ):
				out.write( output_arr[ band-1, ... ], band )
		return output_filename
	def shiftgrid( self, lon0, datain, lonsin, start=True, cyclic=360.0 ):
		"""
		Shift global lat/lon grid east or west.
		.. tabularcolumns:: |l|L|
		==============   ====================================================
		Arguments        Description
		==============   ====================================================
		lon0             starting longitude for shifted grid
						 (ending longitude if start=False). lon0 must be on
						 input grid (within the range of lonsin).
		datain           original data with longitude the right-most
						 dimension.
		lonsin           original longitudes.
		==============   ====================================================
		.. tabularcolumns:: |l|L|
		==============   ====================================================
		Keywords         Description
		==============   ====================================================
		start            if True, lon0 represents the starting longitude
						 of the new grid. if False, lon0 is the ending
						 longitude. Default True.
		cyclic           width of periodic domain (default 360)
		==============   ====================================================
		returns ``dataout,lonsout`` (data and longitudes on shifted grid).
		"""
		if np.fabs(lonsin[-1]-lonsin[0]-cyclic) > 1.e-4:
			# Use all data instead of raise ValueError, 'cyclic point not included'
			start_idx = 0
		else:
			# If cyclic, remove the duplicate point
			start_idx = 1
		if lon0 < lonsin[0] or lon0 > lonsin[-1]:
			raise ValueError('lon0 outside of range of lonsin')
		i0 = np.argmin(np.fabs(lonsin-lon0))
		i0_shift = len(lonsin)-i0
		if np.ma.isMA(datain):
			dataout  = np.ma.zeros(datain.shape,datain.dtype)
		else:
			dataout  = np.zeros(datain.shape,datain.dtype)
		if np.ma.isMA(lonsin):
			lonsout = np.ma.zeros(lonsin.shape,lonsin.dtype)
		else:
			lonsout = np.zeros(lonsin.shape,lonsin.dtype)
		if start:
			lonsout[0:i0_shift] = lonsin[i0:]
		else:
			lonsout[0:i0_shift] = lonsin[i0:]-cyclic
		dataout[...,0:i0_shift] = datain[...,i0:]
		if start:
			lonsout[i0_shift:] = lonsin[start_idx:i0+start_idx]+cyclic
		else:
			lonsout[i0_shift:] = lonsin[start_idx:i0+start_idx]
		dataout[...,i0_shift:] = datain[...,start_idx:i0+start_idx]
		return dataout,lonsout
	def bounds_to_extent( self, bounds ):
		'''
		take input rasterio bounds object and return an extent
		'''
		l,b,r,t = bounds
		return [ (l,b), (r,b), (r,t), (l,t), (l,b) ]
	def padded_bounds( self, rst, npixels, crs ):
		'''
		convert the extents of 2 overlapping rasters to a shapefile with
		an expansion of the intersection of the rasters extents by npixels
		rst1: rasterio raster object
		rst2: rasterio raster object
		npixels: tuple of 4 (left(-),bottom(-),right(+),top(+)) number of pixels to
			expand in each direction. for 5 pixels in each direction it would look like
			this: (-5. -5. 5, 5) or just in the right and top directions like this:
			(0,0,5,5).
		crs: epsg code or proj4string defining the geospatial reference 
			system
		output_shapefile: string full path to the newly created output shapefile
		'''
		import rasterio, os, sys
		from shapely.geometry import Polygon
		resolution = rst.res[0]
		new_bounds = [ bound+(expand*resolution) for bound, expand in zip( rst.bounds, npixels ) ]
		return new_bounds
	def xyz_to_grid( self, x, y, z, grid, method='cubic', output_dtype=np.float32 ):
		'''
		interpolate points to a grid. simple wrapper around
		scipy.interpolate.griddata. Points and grid must be
		in the same coordinate system
		x = 1-D np.array of x coordinates / x,y,z must be same length
		y = 1-D np.array of y coordinates / x,y,z must be same length
		z = 1-D np.array of z coordinates / x,y,z must be same length
		grid = tuple of meshgrid as made using numpy.meshgrid()
				order (xi, yi)
		method = one of 'cubic', 'near', 'linear'
		'''
		from scipy.interpolate import griddata
		zi = griddata( (x, y), z, grid, method=method )
		zi = np.flipud( zi.astype( output_dtype ) )
		return zi
	def calc_anomalies( self, fn, variable, climatology_begin='1961', climatology_end='1990', absolute=True, *args, **kwargs ):
		'''
		calculate absolute or relative anomalies given a NetCDF file
		of the Climatic Research Unit (CRU) Historical Time Series.
		'''
		import xray
		ds = xray.open_dataset( fn )
		try:
			clim_ds = ds.loc[ {'time':slice(climatology_begin, climatology_end)} ]
			climatology = clim_ds[ variable ].groupby( 'time.month' ).mean( 'time' )
		except:
			AttributeError( 'cannot slice netcdf based on climatology years given. they must overlap.' )
		# calculate anomalies
		if absolute == True:
			anomalies = ds[ variable ].groupby( 'time.month' ) - climatology
		elif absolute == False:
			anomalies = ds[ variable ].groupby( 'time.month' ) / climatology
		else:
			AttributeError( 'calc_anomalies: absolute can only be True or False' )
		return anomalies
	def interpolate_anomalies( self, anom_df, meshgrid_tuple, template_raster_fn, lons_pcll, \
								src_transform, src_crs, src_nodata, output_filename, write_anomalies, *args, **kwargs ):
		'''
		run the interpolation to a grid, and reprojection / resampling to the Alaska / Canada rasters
		extent, resolution, origin (template_raster).
		This function is intended to be used to run a pathos.multiprocessing Pool's map function
		across a list of pre-computed arguments.
		ARGUMENTS:
		---------
		anom_df = []
		meshgrid_tuple = [] 
		template_raster_fn = [] 
		lons_pcll = [] 
		src_transform = [] 
		src_crs = [] 
		src_nodata = [] 
		output_filename = [] 
		write_anomalies = [] 
				
		RETURNS:
		-------
		if write_anomalies == True: [str] path to the output filename generated
		if write_anomalies == False: [tuple] interpolated NumPy ndarray representing the 
			interpolated anomalies and the rasterio-style metadata dictionary describing
			the newly generated raster.
		'''
		from rasterio.warp import reproject, RESAMPLING
		template_raster = rasterio.open( template_raster_fn )
		template_meta = template_raster.meta
		if 'transform' in template_meta.keys():
			template_meta.pop( 'transform' )
		# update some meta configs
		template_meta.update( compress='lzw', crs={'init':'epsg:3338'} )
		interp_arr = self.xyz_to_grid( np.array(anom_df['lon'].tolist()), \
						np.array(anom_df['lat'].tolist()), \
						np.array(anom_df['anom'].tolist()), grid=meshgrid_tuple, method='cubic' ) 
		src_nodata = -9999.0 # nodata
		interp_arr[ np.isnan( interp_arr ) ] = src_nodata
		dat, lons = self.shiftgrid( 180., interp_arr, lons_pcll, start=False )
		output_arr = np.empty_like( template_raster.read( 1 ) )
		reproject( dat, output_arr, src_transform=src_transform, src_crs=src_crs, src_nodata=src_nodata, \
					dst_transform=template_meta['affine'], dst_crs=template_meta['crs'],\
					dst_nodata=None, resampling=RESAMPLING.cubic_spline, SOURCE_EXTRA=1000 )
		# mask it with the internal mask in the template raster, where 0 is oob.
		output_arr = np.ma.masked_where( template_raster.read_masks( 1 ) == 0, output_arr )
		output_arr.fill_value = template_meta[ 'nodata' ]
		output_arr = output_arr.filled()
		if write_anomalies == True:
			out = self.write_gtiff( output_arr, template_meta, output_filename, compress=True )
		elif write_anomalies == False:
			out = ( output_arr, template_meta )
		else:
			AttributeError( 'interpolate_anomalies: write_anomalies can be True or False only.')
		return out
	def downscale( self, anom_arr, baseline_arr, output_filename, \
					downscaling_operation, meta, post_downscale_function, *args, **kwargs ):
		'''
		downscale an anomaly array with a baseline array from the same period.
		Arguments:
		----------
		anom_arr = [ np.ndarray ] 2-D NumPy array representing a raster domain. 
					anom/baseline arrays must be same shape.
		baseline_arr = [ np.ndarray ] 2-D NumPy array representing a raster domain. 
					anom/baseline arrays must be same shape.
		output_filename = [ str ] full path and output filename to be created
		downscaling_operation = [ ] 
		meta = [ dict ] rasterio-style dictionary of raster metadata attributes. This 
				must jive with the dimensions and the data type of the array generated 
				through downscaling anom_arr with baseline_arr.  
		post_downscale_function = [ function ] a function that takes a 2-D downscaled 
				array as input and returns an array of the same shape / datatype.  This
				is typically used as a post-mortem for clamping the values from an output
				downscaled array that may be slightly outside the range due to the 
				interpolation method. We currently use this to clamp the values of the hur
				to 0-100.
		Returns:
		--------
		output_filename of newly generated downscaled raster.
		'''
		def add( base, anom ):
			return base + anom
		def mult( base, anom ):
			return base * anom
		def div( base, anom ):
			# this one may not be useful, but the placeholder is here
			# return base / anom
			return NotImplementedError
		try:
			operation_switch = { 'add':add, 'mult':mult, 'div':div }
		except:
			AttributeError( 'downscale: incorrect downscaling_operation str' )
		
		# [ CHECK ] This may be something better to be done before passing to this function
		# both files need to be masked here since we use a RIDICULOUS oob value...
		# for both tas and cld, values less than -200 are out of the range of acceptable values and it
		# grabs the -3.4... mask values. so lets mask using this
		baseline_arr = np.ma.masked_where( baseline_arr < -200, baseline_arr )
		anom_arr = np.ma.masked_where( anom_arr < -200, anom_arr )
		output_arr = operation_switch[ downscaling_operation ]( baseline_arr, anom_arr )
		output_arr[ np.isinf( output_arr ) ] = meta[ 'nodata' ]
		if post_downscale_function != None:
			output_arr = post_downscale_function( output_arr )
		if 'transform' in meta.keys():
			# avoid the gdal geotransform deprecation warning
			meta.pop( 'transform' )
		with rasterio.open( output_filename, 'w', **meta ) as out:
			out.write( output_arr, 1 )
		return output_filename
class DownscaleCRU( object ):
	'''
	methods to downscale the Climatic Research Unit's (CRU) Historical 
	Time Series data using a 12-month climatology pre-processed to the final
	output domain and resolution.  Typically we use a PRISM climatology or a 
	CRU CL2.0 climatology for these purposes.
	'''
	def __init__( self, cru_ts, clim_path, template_raster_fn, base_path, climatology_begin='1961', climatology_end='1990', ncores=2, \
					absolute=True, metric='metric', variable=None, post_downscale_function=None, src_crs={'init':'epsg:4326'}, write_anomalies=True, *args, **kwargs ):
		self.cru_ts = cru_ts
		self.clim_path = clim_path
		self.template_raster_fn = template_raster_fn
		self.base_path = base_path
		self.climatology_begin = climatology_begin
		self.climatology_end = climatology_end
		self.ncores = ncores
		self.absolute = absolute
		self.metric = metric
		self.variable = variable
		self.post_downscale_function = post_downscale_function
		self.src_crs = src_crs
		self.utils = DownscalingUtils()
		self.write_anomalies = write_anomalies
	@staticmethod
	def _fn_month_grouper( fn, *args, **kwargs ):
		'''
		take a filename and return the month element of the naming convention
		'''
		return os.path.splitext( os.path.basename( fn ) )[0].split( '_' )[-2]
	def _get_varname_cru( self, *args, **kwargs ):
		'''
		take as input the cru ts3* netcdf filename and return (if possible)
		the name of the variable we want to work on from that netcdf.
		Arguments:
			nc_fn = [str] filepath to the cru ts* netcdf file used in downscaling
		Returns:
			the variable name as a string if it can be deduced, and errors if
			the variable name cannot be deduced.
		'''
		ds = xray.open_dataset( self.cru_ts )
		variables = ds.variables.keys()
		variable = [ variable for variable in variables \
						if variable not in [u'lon', u'lat', u'time'] ]
		if len( variable ) == 1:
			variable = variable[ 0 ]
		else:
			AttributeError( 'cannot deduce the variable from the file. supply nc_varname and re-run' )
		return variable
	def _get_years_cru( self, *args, **kwargs ):
		ds = xray.open_dataset( self.cru_ts )
		time = pd.DatetimeIndex( ds.time.values )
		years = [ year.year for year in time ]
		return years
	def _get_version_cru( self, *args, **kwargs ):
		version = ''.join( os.path.basename( self.cru_ts ).split( '.' )[:2] )
		version = version.replace( 'ts', 'TS' ) # to follow convention
		return version
	def _interp_downscale_wrapper( self, args_dict, *args, **kwargs  ):
		'''
		interpolate anomalies and downscale to the baseline arr
		'''
		output_filename = args_dict[ 'output_filename' ]
		args_dict.update( output_filename=output_filename.replace( 'downscaled', 'anom' ) )
		anom = self.utils.interpolate_anomalies( **args_dict )
		if isinstance( anom, basestring ):
			rst = rasterio.open( anom )
			meta = rst.meta
			meta.update( compress='lzw' )
			anom_arr = rst.read( 1 )
		elif isinstance( anom, tuple ): # isinstance( anom, tuple ):
			anom_arr, meta = anom
		else:
			AttributeError( '_interp_downscale_wrapper: passed wrong instance type' )
		args_dict.update( output_filename=output_filename, anom_arr=anom_arr, meta=meta )
		return self.utils.downscale( **args_dict )
	def downscale_cru_ts( self, *args, **kwargs ):
		'''
		run the CRU downscaling using the monthly climatology files given
		'''
		from pathos.mp_map import mp_map
		import glob, affine, rasterio
		nc_varname = self._get_varname_cru( )
		# handle cases where the desired varname != one parsed from file.
		if self.variable == None:
			variable = nc_varname
		else:
			variable = self.variable
		
		# build output dirs
		anomalies_path = os.path.join( base_path, variable, 'anom' )
		if not os.path.exists( anomalies_path ):
			os.makedirs( anomalies_path )
		downscaled_path = os.path.join( base_path, variable, 'downscaled' )
		if not os.path.exists( downscaled_path ):
			os.makedirs( downscaled_path )
		# template setup 
		template_raster = rasterio.open( self.template_raster_fn )
		template_meta = template_raster.meta
		template_meta.update( crs={'init':'epsg:3338'} )
		# make a mask with values of 0=nodata and 1=data
		template_raster_mask = template_raster.read_masks( 1 ) # mask of band 1 is all we need
		template_raster_mask[ template_raster_mask == 255 ] = 1
		anomalies = self.utils.calc_anomalies( self.cru_ts, variable, absolute=self.absolute )
		anomalies_pcll, lons_pcll = self.utils.shiftgrid( 0., anomalies, anomalies.lon.data ) # grabs lons from the xray ds
		# mesh the lons and lats and unravel them to 1-D
		lo, la = [ i.ravel() for i in np.meshgrid( lons_pcll, anomalies.lat ) ]
		
		# convert into pandas.DataFrame and drop all the NaNs -- land-only dataset
		anom_df_list = [ pd.DataFrame({ 'anom':i.ravel(), 'lat':la, 'lon':lo }).dropna( axis=0, how='any' ) for i in anomalies_pcll ]
		xi, yi = np.meshgrid( lons_pcll, anomalies.lat.data )
		# argument setup -- HARDWIRED
		src_transform = affine.Affine( 0.5, 0.0, -180.0, 0.0, -0.5, 90.0 )
		src_nodata = -9999.0
			
		# output_filenames setup
		years = np.unique( self._get_years_cru( self.cru_ts ) )
		cru_ts_version = self._get_version_cru( self.cru_ts ) # works if naming convention stays same
		months = [ i if len(i)==2 else '0'+i for i in np.arange( 1, 12+1, 1 ).astype( str ).tolist() ]
		month_year = [ (month, year) for year in years for month in months ]
		output_filenames = [ os.path.join( anomalies_path, '_'.join([ variable, self.metric, cru_ts_version, 'anom', month, str(year) ])+'.tif' )
								for month, year in month_year ]
		# NEW
		# read in the pre-processed 12-month climatology
		clim_list = sorted( glob.glob( os.path.join( self.clim_path, '*.tif' ) ) ) # this could catch you.
		clim_dict = { month:rasterio.open( fn ).read( 1 ) for month, fn in zip( months, clim_list ) }
		output_filenames = [ os.path.join( downscaled_path, '_'.join([ variable, self.metric, cru_ts_version, 'downscaled', month, str(year) ])+'.tif' )
								for month, year in month_year ]
		# set downscaling_operation based on self.absolute boolean
		if self.absolute == True:
			downscaling_operation = 'add'
		elif self.absolute == False:
			downscaling_operation = 'mult'
		else:
			AttributeError( 'downscaling operation: self.absolute must be boolean' )
		args_list = [ { 'anom_df':anom_df, 
						'meshgrid_tuple':(xi, yi), 
						'template_raster_fn':template_raster_fn, 
						'lons_pcll':lons_pcll, 
						'src_transform':src_transform, 
						'src_crs':self.src_crs, \
						'src_nodata':src_nodata, 
						'output_filename':out_fn,
						'baseline_arr':clim_dict[ self._fn_month_grouper( out_fn ) ],
						'downscaling_operation':downscaling_operation, 
						'post_downscale_function':self.post_downscale_function,
						'write_anomalies':self.write_anomalies }
							for anom_df, out_fn in zip( anom_df_list, output_filenames ) ]
		# run anomalies interpolation and downscaling in a single go.
		out = mp_map( lambda args: self._interp_downscale_wrapper( args_dict=args ), args_list, nproc=self.ncores )
		return 'downscaling complete. files output at: %s' % base_path
if __name__ == '__main__':
	# * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
	# example of use of the new DownscaleCRU / DownscalingUtils classes
	# * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
	# example of post_downscale_function - pass in at DownscaleCRU()
	def clamp_vals( x ):
		''' clamp the values following the relative humidity downscaling '''
		x[ (x > 100) & (x < 500) ] = 95
		return x
	# minimum required arguments
	cru_ts = '/Data/Base_Data/Climate/World/CRU_grids/CRU_TS323/cru_ts3.23.1901.2014.cld.dat.nc'
	clim_path = '/workspace/Shared/Tech_Projects/ALFRESCO_Inputs/project_data/TEM_Data/cru_october_final/cru_cl20/cld/akcan'
	template_raster_fn = '/workspace/Shared/Tech_Projects/ALFRESCO_Inputs/project_data/TEM_Data/templates/tas_mean_C_AR5_GFDL-CM3_historical_01_1860.tif'
	base_path = '/atlas_scratch/malindgren/CMIP5'
	
	# run example
	down = DownscaleCRU( cru_ts, clim_path, template_raster_fn, base_path, absolute=False, ncores=32 )
	output = down.downscale_cru_ts()
 | 
	mit | 
| 
	waynenilsen/statsmodels | 
	statsmodels/sandbox/examples/try_quantile_regression1.py | 
	33 | 
	1188 | 
	'''Example to illustrate Quantile Regression
Author: Josef Perktold
polynomial regression with systematic deviations above
'''
import numpy as np
from statsmodels.compat.python import zip
from scipy import stats
import statsmodels.api as sm
from statsmodels.regression.quantile_regression import QuantReg
sige = 0.1
nobs, k_vars = 500, 3
x = np.random.uniform(-1, 1, size=nobs)
x.sort()
exog = np.vander(x, k_vars+1)[:,::-1]
mix = 0.1 * stats.norm.pdf(x[:,None], loc=np.linspace(-0.5, 0.75, 4), scale=0.01).sum(1)
y = exog.sum(1) + mix + sige * (np.random.randn(nobs)/2 + 1)**3
p = 0.5
res_qr = QuantReg(y, exog).fit(p)
res_qr2 = QuantReg(y, exog).fit(0.1)
res_qr3 = QuantReg(y, exog).fit(0.75)
res_ols = sm.OLS(y, exog).fit()
params = [res_ols.params, res_qr2.params, res_qr.params, res_qr3.params]
labels = ['ols', 'qr 0.1', 'qr 0.5', 'qr 0.75']
import matplotlib.pyplot as plt
plt.figure()
plt.plot(x, y, '.', alpha=0.5)
for lab, beta in zip(['ols', 'qr 0.1', 'qr 0.5', 'qr 0.75'], params):
    print('%-8s'%lab, np.round(beta, 4))
    fitted = np.dot(exog, beta)
    lw = 2
    plt.plot(x, fitted, lw=lw, label=lab)
plt.legend()
plt.title('Quantile Regression')
plt.show()
 | 
	bsd-3-clause | 
| 
	vibhorag/scikit-learn | 
	sklearn/metrics/tests/test_score_objects.py | 
	138 | 
	14048 | 
	import pickle
import numpy as np
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raises_regexp
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_not_equal
from sklearn.base import BaseEstimator
from sklearn.metrics import (f1_score, r2_score, roc_auc_score, fbeta_score,
                             log_loss, precision_score, recall_score)
from sklearn.metrics.cluster import adjusted_rand_score
from sklearn.metrics.scorer import (check_scoring, _PredictScorer,
                                    _passthrough_scorer)
from sklearn.metrics import make_scorer, get_scorer, SCORERS
from sklearn.svm import LinearSVC
from sklearn.pipeline import make_pipeline
from sklearn.cluster import KMeans
from sklearn.dummy import DummyRegressor
from sklearn.linear_model import Ridge, LogisticRegression
from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor
from sklearn.datasets import make_blobs
from sklearn.datasets import make_classification
from sklearn.datasets import make_multilabel_classification
from sklearn.datasets import load_diabetes
from sklearn.cross_validation import train_test_split, cross_val_score
from sklearn.grid_search import GridSearchCV
from sklearn.multiclass import OneVsRestClassifier
REGRESSION_SCORERS = ['r2', 'mean_absolute_error', 'mean_squared_error',
                      'median_absolute_error']
CLF_SCORERS = ['accuracy', 'f1', 'f1_weighted', 'f1_macro', 'f1_micro',
               'roc_auc', 'average_precision', 'precision',
               'precision_weighted', 'precision_macro', 'precision_micro',
               'recall', 'recall_weighted', 'recall_macro', 'recall_micro',
               'log_loss',
               'adjusted_rand_score'  # not really, but works
               ]
MULTILABEL_ONLY_SCORERS = ['precision_samples', 'recall_samples', 'f1_samples']
class EstimatorWithoutFit(object):
    """Dummy estimator to test check_scoring"""
    pass
class EstimatorWithFit(BaseEstimator):
    """Dummy estimator to test check_scoring"""
    def fit(self, X, y):
        return self
class EstimatorWithFitAndScore(object):
    """Dummy estimator to test check_scoring"""
    def fit(self, X, y):
        return self
    def score(self, X, y):
        return 1.0
class EstimatorWithFitAndPredict(object):
    """Dummy estimator to test check_scoring"""
    def fit(self, X, y):
        self.y = y
        return self
    def predict(self, X):
        return self.y
class DummyScorer(object):
    """Dummy scorer that always returns 1."""
    def __call__(self, est, X, y):
        return 1
def test_check_scoring():
    # Test all branches of check_scoring
    estimator = EstimatorWithoutFit()
    pattern = (r"estimator should a be an estimator implementing 'fit' method,"
               r" .* was passed")
    assert_raises_regexp(TypeError, pattern, check_scoring, estimator)
    estimator = EstimatorWithFitAndScore()
    estimator.fit([[1]], [1])
    scorer = check_scoring(estimator)
    assert_true(scorer is _passthrough_scorer)
    assert_almost_equal(scorer(estimator, [[1]], [1]), 1.0)
    estimator = EstimatorWithFitAndPredict()
    estimator.fit([[1]], [1])
    pattern = (r"If no scoring is specified, the estimator passed should have"
               r" a 'score' method\. The estimator .* does not\.")
    assert_raises_regexp(TypeError, pattern, check_scoring, estimator)
    scorer = check_scoring(estimator, "accuracy")
    assert_almost_equal(scorer(estimator, [[1]], [1]), 1.0)
    estimator = EstimatorWithFit()
    scorer = check_scoring(estimator, "accuracy")
    assert_true(isinstance(scorer, _PredictScorer))
    estimator = EstimatorWithFit()
    scorer = check_scoring(estimator, allow_none=True)
    assert_true(scorer is None)
def test_check_scoring_gridsearchcv():
    # test that check_scoring works on GridSearchCV and pipeline.
    # slightly redundant non-regression test.
    grid = GridSearchCV(LinearSVC(), param_grid={'C': [.1, 1]})
    scorer = check_scoring(grid, "f1")
    assert_true(isinstance(scorer, _PredictScorer))
    pipe = make_pipeline(LinearSVC())
    scorer = check_scoring(pipe, "f1")
    assert_true(isinstance(scorer, _PredictScorer))
    # check that cross_val_score definitely calls the scorer
    # and doesn't make any assumptions about the estimator apart from having a
    # fit.
    scores = cross_val_score(EstimatorWithFit(), [[1], [2], [3]], [1, 0, 1],
                             scoring=DummyScorer())
    assert_array_equal(scores, 1)
def test_make_scorer():
    # Sanity check on the make_scorer factory function.
    f = lambda *args: 0
    assert_raises(ValueError, make_scorer, f, needs_threshold=True,
                  needs_proba=True)
def test_classification_scores():
    # Test classification scorers.
    X, y = make_blobs(random_state=0, centers=2)
    X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
    clf = LinearSVC(random_state=0)
    clf.fit(X_train, y_train)
    for prefix, metric in [('f1', f1_score), ('precision', precision_score),
                           ('recall', recall_score)]:
        score1 = get_scorer('%s_weighted' % prefix)(clf, X_test, y_test)
        score2 = metric(y_test, clf.predict(X_test), pos_label=None,
                        average='weighted')
        assert_almost_equal(score1, score2)
        score1 = get_scorer('%s_macro' % prefix)(clf, X_test, y_test)
        score2 = metric(y_test, clf.predict(X_test), pos_label=None,
                        average='macro')
        assert_almost_equal(score1, score2)
        score1 = get_scorer('%s_micro' % prefix)(clf, X_test, y_test)
        score2 = metric(y_test, clf.predict(X_test), pos_label=None,
                        average='micro')
        assert_almost_equal(score1, score2)
        score1 = get_scorer('%s' % prefix)(clf, X_test, y_test)
        score2 = metric(y_test, clf.predict(X_test), pos_label=1)
        assert_almost_equal(score1, score2)
    # test fbeta score that takes an argument
    scorer = make_scorer(fbeta_score, beta=2)
    score1 = scorer(clf, X_test, y_test)
    score2 = fbeta_score(y_test, clf.predict(X_test), beta=2)
    assert_almost_equal(score1, score2)
    # test that custom scorer can be pickled
    unpickled_scorer = pickle.loads(pickle.dumps(scorer))
    score3 = unpickled_scorer(clf, X_test, y_test)
    assert_almost_equal(score1, score3)
    # smoke test the repr:
    repr(fbeta_score)
def test_regression_scorers():
    # Test regression scorers.
    diabetes = load_diabetes()
    X, y = diabetes.data, diabetes.target
    X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
    clf = Ridge()
    clf.fit(X_train, y_train)
    score1 = get_scorer('r2')(clf, X_test, y_test)
    score2 = r2_score(y_test, clf.predict(X_test))
    assert_almost_equal(score1, score2)
def test_thresholded_scorers():
    # Test scorers that take thresholds.
    X, y = make_blobs(random_state=0, centers=2)
    X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
    clf = LogisticRegression(random_state=0)
    clf.fit(X_train, y_train)
    score1 = get_scorer('roc_auc')(clf, X_test, y_test)
    score2 = roc_auc_score(y_test, clf.decision_function(X_test))
    score3 = roc_auc_score(y_test, clf.predict_proba(X_test)[:, 1])
    assert_almost_equal(score1, score2)
    assert_almost_equal(score1, score3)
    logscore = get_scorer('log_loss')(clf, X_test, y_test)
    logloss = log_loss(y_test, clf.predict_proba(X_test))
    assert_almost_equal(-logscore, logloss)
    # same for an estimator without decision_function
    clf = DecisionTreeClassifier()
    clf.fit(X_train, y_train)
    score1 = get_scorer('roc_auc')(clf, X_test, y_test)
    score2 = roc_auc_score(y_test, clf.predict_proba(X_test)[:, 1])
    assert_almost_equal(score1, score2)
    # test with a regressor (no decision_function)
    reg = DecisionTreeRegressor()
    reg.fit(X_train, y_train)
    score1 = get_scorer('roc_auc')(reg, X_test, y_test)
    score2 = roc_auc_score(y_test, reg.predict(X_test))
    assert_almost_equal(score1, score2)
    # Test that an exception is raised on more than two classes
    X, y = make_blobs(random_state=0, centers=3)
    X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
    clf.fit(X_train, y_train)
    assert_raises(ValueError, get_scorer('roc_auc'), clf, X_test, y_test)
def test_thresholded_scorers_multilabel_indicator_data():
    # Test that the scorer work with multilabel-indicator format
    # for multilabel and multi-output multi-class classifier
    X, y = make_multilabel_classification(allow_unlabeled=False,
                                          random_state=0)
    X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
    # Multi-output multi-class predict_proba
    clf = DecisionTreeClassifier()
    clf.fit(X_train, y_train)
    y_proba = clf.predict_proba(X_test)
    score1 = get_scorer('roc_auc')(clf, X_test, y_test)
    score2 = roc_auc_score(y_test, np.vstack(p[:, -1] for p in y_proba).T)
    assert_almost_equal(score1, score2)
    # Multi-output multi-class decision_function
    # TODO Is there any yet?
    clf = DecisionTreeClassifier()
    clf.fit(X_train, y_train)
    clf._predict_proba = clf.predict_proba
    clf.predict_proba = None
    clf.decision_function = lambda X: [p[:, 1] for p in clf._predict_proba(X)]
    y_proba = clf.decision_function(X_test)
    score1 = get_scorer('roc_auc')(clf, X_test, y_test)
    score2 = roc_auc_score(y_test, np.vstack(p for p in y_proba).T)
    assert_almost_equal(score1, score2)
    # Multilabel predict_proba
    clf = OneVsRestClassifier(DecisionTreeClassifier())
    clf.fit(X_train, y_train)
    score1 = get_scorer('roc_auc')(clf, X_test, y_test)
    score2 = roc_auc_score(y_test, clf.predict_proba(X_test))
    assert_almost_equal(score1, score2)
    # Multilabel decision function
    clf = OneVsRestClassifier(LinearSVC(random_state=0))
    clf.fit(X_train, y_train)
    score1 = get_scorer('roc_auc')(clf, X_test, y_test)
    score2 = roc_auc_score(y_test, clf.decision_function(X_test))
    assert_almost_equal(score1, score2)
def test_unsupervised_scorers():
    # Test clustering scorers against gold standard labeling.
    # We don't have any real unsupervised Scorers yet.
    X, y = make_blobs(random_state=0, centers=2)
    X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
    km = KMeans(n_clusters=3)
    km.fit(X_train)
    score1 = get_scorer('adjusted_rand_score')(km, X_test, y_test)
    score2 = adjusted_rand_score(y_test, km.predict(X_test))
    assert_almost_equal(score1, score2)
@ignore_warnings
def test_raises_on_score_list():
    # Test that when a list of scores is returned, we raise proper errors.
    X, y = make_blobs(random_state=0)
    f1_scorer_no_average = make_scorer(f1_score, average=None)
    clf = DecisionTreeClassifier()
    assert_raises(ValueError, cross_val_score, clf, X, y,
                  scoring=f1_scorer_no_average)
    grid_search = GridSearchCV(clf, scoring=f1_scorer_no_average,
                               param_grid={'max_depth': [1, 2]})
    assert_raises(ValueError, grid_search.fit, X, y)
@ignore_warnings
def test_scorer_sample_weight():
    # Test that scorers support sample_weight or raise sensible errors
    # Unlike the metrics invariance test, in the scorer case it's harder
    # to ensure that, on the classifier output, weighted and unweighted
    # scores really should be unequal.
    X, y = make_classification(random_state=0)
    _, y_ml = make_multilabel_classification(n_samples=X.shape[0],
                                             random_state=0)
    split = train_test_split(X, y, y_ml, random_state=0)
    X_train, X_test, y_train, y_test, y_ml_train, y_ml_test = split
    sample_weight = np.ones_like(y_test)
    sample_weight[:10] = 0
    # get sensible estimators for each metric
    sensible_regr = DummyRegressor(strategy='median')
    sensible_regr.fit(X_train, y_train)
    sensible_clf = DecisionTreeClassifier(random_state=0)
    sensible_clf.fit(X_train, y_train)
    sensible_ml_clf = DecisionTreeClassifier(random_state=0)
    sensible_ml_clf.fit(X_train, y_ml_train)
    estimator = dict([(name, sensible_regr)
                      for name in REGRESSION_SCORERS] +
                     [(name, sensible_clf)
                      for name in CLF_SCORERS] +
                     [(name, sensible_ml_clf)
                      for name in MULTILABEL_ONLY_SCORERS])
    for name, scorer in SCORERS.items():
        if name in MULTILABEL_ONLY_SCORERS:
            target = y_ml_test
        else:
            target = y_test
        try:
            weighted = scorer(estimator[name], X_test, target,
                              sample_weight=sample_weight)
            ignored = scorer(estimator[name], X_test[10:], target[10:])
            unweighted = scorer(estimator[name], X_test, target)
            assert_not_equal(weighted, unweighted,
                             msg="scorer {0} behaves identically when "
                             "called with sample weights: {1} vs "
                             "{2}".format(name, weighted, unweighted))
            assert_almost_equal(weighted, ignored,
                                err_msg="scorer {0} behaves differently when "
                                "ignoring samples and setting sample_weight to"
                                " 0: {1} vs {2}".format(name, weighted,
                                                        ignored))
        except TypeError as e:
            assert_true("sample_weight" in str(e),
                        "scorer {0} raises unhelpful exception when called "
                        "with sample weights: {1}".format(name, str(e)))
 | 
	bsd-3-clause | 
| 
	mdrumond/tensorflow | 
	tensorflow/python/estimator/inputs/pandas_io.py | 
	86 | 
	4503 | 
	# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Methods to allow pandas.DataFrame."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.estimator.inputs.queues import feeding_functions
try:
  # pylint: disable=g-import-not-at-top
  # pylint: disable=unused-import
  import pandas as pd
  HAS_PANDAS = True
except IOError:
  # Pandas writes a temporary file during import. If it fails, don't use pandas.
  HAS_PANDAS = False
except ImportError:
  HAS_PANDAS = False
def pandas_input_fn(x,
                    y=None,
                    batch_size=128,
                    num_epochs=1,
                    shuffle=None,
                    queue_capacity=1000,
                    num_threads=1,
                    target_column='target'):
  """Returns input function that would feed Pandas DataFrame into the model.
  Note: `y`'s index must match `x`'s index.
  Args:
    x: pandas `DataFrame` object.
    y: pandas `Series` object. `None` if absent.
    batch_size: int, size of batches to return.
    num_epochs: int, number of epochs to iterate over data. If not `None`,
      read attempts that would exceed this value will raise `OutOfRangeError`.
    shuffle: bool, whether to read the records in random order.
    queue_capacity: int, size of the read queue. If `None`, it will be set
      roughly to the size of `x`.
    num_threads: Integer, number of threads used for reading and enqueueing. In
      order to have predicted and repeatable order of reading and enqueueing,
      such as in prediction and evaluation mode, `num_threads` should be 1.
    target_column: str, name to give the target column `y`.
  Returns:
    Function, that has signature of ()->(dict of `features`, `target`)
  Raises:
    ValueError: if `x` already contains a column with the same name as `y`, or
      if the indexes of `x` and `y` don't match.
    TypeError: `shuffle` is not bool.
  """
  if not HAS_PANDAS:
    raise TypeError(
        'pandas_input_fn should not be called without pandas installed')
  if not isinstance(shuffle, bool):
    raise TypeError('shuffle must be explicitly set as boolean; '
                    'got {}'.format(shuffle))
  x = x.copy()
  if y is not None:
    if target_column in x:
      raise ValueError(
          'Cannot use name %s for target column: DataFrame already has a '
          'column with that name: %s' % (target_column, x.columns))
    if not np.array_equal(x.index, y.index):
      raise ValueError('Index for x and y are mismatched.\nIndex for x: %s\n'
                       'Index for y: %s\n' % (x.index, y.index))
    x[target_column] = y
  # TODO(mdan): These are memory copies. We probably don't need 4x slack space.
  # The sizes below are consistent with what I've seen elsewhere.
  if queue_capacity is None:
    if shuffle:
      queue_capacity = 4 * len(x)
    else:
      queue_capacity = len(x)
  min_after_dequeue = max(queue_capacity / 4, 1)
  def input_fn():
    """Pandas input function."""
    queue = feeding_functions._enqueue_data(  # pylint: disable=protected-access
        x,
        queue_capacity,
        shuffle=shuffle,
        min_after_dequeue=min_after_dequeue,
        num_threads=num_threads,
        enqueue_size=batch_size,
        num_epochs=num_epochs)
    if num_epochs is None:
      features = queue.dequeue_many(batch_size)
    else:
      features = queue.dequeue_up_to(batch_size)
    assert len(features) == len(x.columns) + 1, ('Features should have one '
                                                 'extra element for the index.')
    features = features[1:]
    features = dict(zip(list(x.columns), features))
    if y is not None:
      target = features.pop(target_column)
      return features, target
    return features
  return input_fn
 | 
	apache-2.0 | 
| 
	mjgrav2001/scikit-learn | 
	sklearn/metrics/tests/test_regression.py | 
	272 | 
	6066 | 
	from __future__ import division, print_function
import numpy as np
from itertools import product
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.metrics import explained_variance_score
from sklearn.metrics import mean_absolute_error
from sklearn.metrics import mean_squared_error
from sklearn.metrics import median_absolute_error
from sklearn.metrics import r2_score
from sklearn.metrics.regression import _check_reg_targets
def test_regression_metrics(n_samples=50):
    y_true = np.arange(n_samples)
    y_pred = y_true + 1
    assert_almost_equal(mean_squared_error(y_true, y_pred), 1.)
    assert_almost_equal(mean_absolute_error(y_true, y_pred), 1.)
    assert_almost_equal(median_absolute_error(y_true, y_pred), 1.)
    assert_almost_equal(r2_score(y_true, y_pred),  0.995, 2)
    assert_almost_equal(explained_variance_score(y_true, y_pred), 1.)
def test_multioutput_regression():
    y_true = np.array([[1, 0, 0, 1], [0, 1, 1, 1], [1, 1, 0, 1]])
    y_pred = np.array([[0, 0, 0, 1], [1, 0, 1, 1], [0, 0, 0, 1]])
    error = mean_squared_error(y_true, y_pred)
    assert_almost_equal(error, (1. / 3 + 2. / 3 + 2. / 3) / 4.)
    # mean_absolute_error and mean_squared_error are equal because
    # it is a binary problem.
    error = mean_absolute_error(y_true, y_pred)
    assert_almost_equal(error, (1. / 3 + 2. / 3 + 2. / 3) / 4.)
    error = r2_score(y_true, y_pred, multioutput='variance_weighted')
    assert_almost_equal(error, 1. - 5. / 2)
    error = r2_score(y_true, y_pred, multioutput='uniform_average')
    assert_almost_equal(error, -.875)
def test_regression_metrics_at_limits():
    assert_almost_equal(mean_squared_error([0.], [0.]), 0.00, 2)
    assert_almost_equal(mean_absolute_error([0.], [0.]), 0.00, 2)
    assert_almost_equal(median_absolute_error([0.], [0.]), 0.00, 2)
    assert_almost_equal(explained_variance_score([0.], [0.]), 1.00, 2)
    assert_almost_equal(r2_score([0., 1], [0., 1]), 1.00, 2)
def test__check_reg_targets():
    # All of length 3
    EXAMPLES = [
        ("continuous", [1, 2, 3], 1),
        ("continuous", [[1], [2], [3]], 1),
        ("continuous-multioutput", [[1, 1], [2, 2], [3, 1]], 2),
        ("continuous-multioutput", [[5, 1], [4, 2], [3, 1]], 2),
        ("continuous-multioutput", [[1, 3, 4], [2, 2, 2], [3, 1, 1]], 3),
    ]
    for (type1, y1, n_out1), (type2, y2, n_out2) in product(EXAMPLES,
                                                            repeat=2):
        if type1 == type2 and n_out1 == n_out2:
            y_type, y_check1, y_check2, multioutput = _check_reg_targets(
                y1, y2, None)
            assert_equal(type1, y_type)
            if type1 == 'continuous':
                assert_array_equal(y_check1, np.reshape(y1, (-1, 1)))
                assert_array_equal(y_check2, np.reshape(y2, (-1, 1)))
            else:
                assert_array_equal(y_check1, y1)
                assert_array_equal(y_check2, y2)
        else:
            assert_raises(ValueError, _check_reg_targets, y1, y2, None)
def test_regression_multioutput_array():
    y_true = [[1, 2], [2.5, -1], [4.5, 3], [5, 7]]
    y_pred = [[1, 1], [2, -1], [5, 4], [5, 6.5]]
    mse = mean_squared_error(y_true, y_pred, multioutput='raw_values')
    mae = mean_absolute_error(y_true, y_pred, multioutput='raw_values')
    r = r2_score(y_true, y_pred, multioutput='raw_values')
    evs = explained_variance_score(y_true, y_pred, multioutput='raw_values')
    assert_array_almost_equal(mse, [0.125, 0.5625], decimal=2)
    assert_array_almost_equal(mae, [0.25, 0.625], decimal=2)
    assert_array_almost_equal(r, [0.95, 0.93], decimal=2)
    assert_array_almost_equal(evs, [0.95, 0.93], decimal=2)
    # mean_absolute_error and mean_squared_error are equal because
    # it is a binary problem.
    y_true = [[0, 0]]*4
    y_pred = [[1, 1]]*4
    mse = mean_squared_error(y_true, y_pred, multioutput='raw_values')
    mae = mean_absolute_error(y_true, y_pred, multioutput='raw_values')
    r = r2_score(y_true, y_pred, multioutput='raw_values')
    assert_array_almost_equal(mse, [1., 1.], decimal=2)
    assert_array_almost_equal(mae, [1., 1.], decimal=2)
    assert_array_almost_equal(r, [0., 0.], decimal=2)
    r = r2_score([[0, -1], [0, 1]], [[2, 2], [1, 1]], multioutput='raw_values')
    assert_array_almost_equal(r, [0, -3.5], decimal=2)
    assert_equal(np.mean(r), r2_score([[0, -1], [0, 1]], [[2, 2], [1, 1]],
                 multioutput='uniform_average'))
    evs = explained_variance_score([[0, -1], [0, 1]], [[2, 2], [1, 1]],
                                   multioutput='raw_values')
    assert_array_almost_equal(evs, [0, -1.25], decimal=2)
    # Checking for the condition in which both numerator and denominator is
    # zero.
    y_true = [[1, 3], [-1, 2]]
    y_pred = [[1, 4], [-1, 1]]
    r2 = r2_score(y_true, y_pred, multioutput='raw_values')
    assert_array_almost_equal(r2, [1., -3.], decimal=2)
    assert_equal(np.mean(r2), r2_score(y_true, y_pred,
                 multioutput='uniform_average'))
    evs = explained_variance_score(y_true, y_pred, multioutput='raw_values')
    assert_array_almost_equal(evs, [1., -3.], decimal=2)
    assert_equal(np.mean(evs), explained_variance_score(y_true, y_pred))
def test_regression_custom_weights():
    y_true = [[1, 2], [2.5, -1], [4.5, 3], [5, 7]]
    y_pred = [[1, 1], [2, -1], [5, 4], [5, 6.5]]
    msew = mean_squared_error(y_true, y_pred, multioutput=[0.4, 0.6])
    maew = mean_absolute_error(y_true, y_pred, multioutput=[0.4, 0.6])
    rw = r2_score(y_true, y_pred, multioutput=[0.4, 0.6])
    evsw = explained_variance_score(y_true, y_pred, multioutput=[0.4, 0.6])
    assert_almost_equal(msew, 0.39, decimal=2)
    assert_almost_equal(maew, 0.475, decimal=3)
    assert_almost_equal(rw, 0.94, decimal=2)
    assert_almost_equal(evsw, 0.94, decimal=2)
 | 
	bsd-3-clause | 
| 
	hsiaoyi0504/scikit-learn | 
	examples/classification/plot_lda.py | 
	164 | 
	2224 | 
	"""
====================================================================
Normal and Shrinkage Linear Discriminant Analysis for classification
====================================================================
Shows how shrinkage improves classification.
"""
from __future__ import division
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import make_blobs
from sklearn.lda import LDA
n_train = 20  # samples for training
n_test = 200  # samples for testing
n_averages = 50  # how often to repeat classification
n_features_max = 75  # maximum number of features
step = 4  # step size for the calculation
def generate_data(n_samples, n_features):
    """Generate random blob-ish data with noisy features.
    This returns an array of input data with shape `(n_samples, n_features)`
    and an array of `n_samples` target labels.
    Only one feature contains discriminative information, the other features
    contain only noise.
    """
    X, y = make_blobs(n_samples=n_samples, n_features=1, centers=[[-2], [2]])
    # add non-discriminative features
    if n_features > 1:
        X = np.hstack([X, np.random.randn(n_samples, n_features - 1)])
    return X, y
acc_clf1, acc_clf2 = [], []
n_features_range = range(1, n_features_max + 1, step)
for n_features in n_features_range:
    score_clf1, score_clf2 = 0, 0
    for _ in range(n_averages):
        X, y = generate_data(n_train, n_features)
        clf1 = LDA(solver='lsqr', shrinkage='auto').fit(X, y)
        clf2 = LDA(solver='lsqr', shrinkage=None).fit(X, y)
        X, y = generate_data(n_test, n_features)
        score_clf1 += clf1.score(X, y)
        score_clf2 += clf2.score(X, y)
    acc_clf1.append(score_clf1 / n_averages)
    acc_clf2.append(score_clf2 / n_averages)
features_samples_ratio = np.array(n_features_range) / n_train
plt.plot(features_samples_ratio, acc_clf1, linewidth=2,
         label="LDA with shrinkage", color='r')
plt.plot(features_samples_ratio, acc_clf2, linewidth=2,
         label="LDA", color='g')
plt.xlabel('n_features / n_samples')
plt.ylabel('Classification accuracy')
plt.legend(loc=1, prop={'size': 12})
plt.suptitle('LDA vs. shrinkage LDA (1 discriminative feature)')
plt.show()
 | 
	bsd-3-clause | 
| 
	bjackman/lisa | 
	libs/utils/perf_analysis.py | 
	3 | 
	6952 | 
	# SPDX-License-Identifier: Apache-2.0
#
# Copyright (C) 2015, ARM Limited and contributors.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import glob
import matplotlib.gridspec as gridspec
import matplotlib.pyplot as plt
import numpy as np
import os
import pandas as pd
import pylab as pl
import re
import sys
import trappy
import logging
# Regexp to match an rt-app generated logfile
TASK_NAME_RE = re.compile('.*\/rt-app-(.+)-[0-9]+.log')
class PerfAnalysis(object):
    def __init__(self, datadir, tasks=None):
        # Dataframe of all tasks performance data
        self.perf_data = {}
        # Folder containing all rt-app data
        self.datadir = None
        # Setup logging
        self._log = logging.getLogger('PerfAnalysis')
        # Load performance data generated by rt-app workloads
        self.__loadRTAData(datadir, tasks)
        # Keep track of the datadir from where data have been loaded
        if len(self.perf_data) == 0:
            raise ValueError('No performance data found on folder [{0:s}]'\
                    .format(datadir))
        self.datadir = datadir
    def __taskNameFromLog(self, logfile):
        tname_match = re.search(TASK_NAME_RE, logfile)
        if tname_match is None:
            raise ValueError('The logfile [{0:s}] is not from rt-app'\
                    .format(logfile))
        return tname_match.group(1)
    def __logfileFromTaskName(self, taskname):
        for logfile in glob.glob(
                '{0:s}/rt-app-{1:s}.log'.format(self.datadir, taskname)):
            return logfile
        raise ValueError('No rt-app logfile found for task [{0:s}]'\
                .format(taskname))
    def tasks(self):
        """
        Return the list of tasks for which performance data have been loaded
        """
        if self.datadir is None:
            raise ValueError("rt-app performance data not (yet) loaded")
        return self.perf_data.keys()
    def logfile(self, task):
        """
        Return the logfile for the specified task
        """
        if task not in self.perf_data:
            raise ValueError('No logfile loaded for task [{0:s}]'\
                    .format(task))
        return self.perf_data[task]['logfile']
    def df(self, task):
        """
        Return the PANDAS dataframe with the performance data for the
        specified task
        """
        if self.datadir is None:
            raise ValueError("rt-app performance data not (yet) loaded")
        if task not in self.perf_data:
            raise ValueError('No dataframe loaded for task [{0:s}]'\
                    .format(task))
        return self.perf_data[task]['df']
    def __loadRTAData(self, datadir, tasks):
        """
        Load peformance data of an rt-app workload
        """
        if tasks is None:
            # Lookup for all rt-app logfile into the specified datadir
            for logfile in glob.glob('{0:s}/rt-app-*.log'.format(datadir)):
                task_name = self.__taskNameFromLog(logfile)
                self.perf_data[task_name] = {}
                self.perf_data[task_name]['logfile'] = logfile
                self._log.debug('Found rt-app logfile for task [%s]', task_name)
        else:
            # Lookup for specified rt-app task logfile into specified datadir
            for task in tasks:
                logfile = self.__logfileFromTaskName(task)
                self.perf_data[task_name] = {}
                self.perf_data[task_name]['logfile'] = logfile
                self._log.debug('Found rt-app logfile for task [%s]', task_name)
        # Load all the found logfile into a dataset
        for task in self.perf_data.keys():
            self._log.debug('Loading dataframe for task [%s]...', task)
            df = pd.read_table(self.logfile(task),
                    sep='\s+',
                    skiprows=1,
                    header=0,
                    usecols=[1,2,3,4,7,8,9,10],
                    names=[
                        'Cycles', 'Run' ,'Period', 'Timestamp',
                        'Slack', 'CRun', 'CPeriod', 'WKPLatency'
                    ])
            # Normalize time to [s] with origin on the first event
            start_time = df['Timestamp'][0]/1e6
            df['Time'] = df['Timestamp']/1e6 - start_time
            df.set_index(['Time'], inplace=True)
            # Add performance metrics column, performance is defined as:
            #             slack
            #   perf = -------------
            #          period - run
            df['PerfIndex'] = df['Slack'] / (df['CPeriod'] - df['CRun'])
            # Keep track of the loaded dataframe
            self.perf_data[task]['df'] = df
    def plotPerf(self, task, title=None):
        """
        Plot the Latency/Slack and Performance data for the specified task
        """
        # Grid
        gs = gridspec.GridSpec(2, 2, height_ratios=[4,1], width_ratios=[3,1]);
        gs.update(wspace=0.1, hspace=0.1);
        # Figure
        plt.figure(figsize=(16, 2*6));
        if title:
            plt.suptitle(title, y=.97, fontsize=16,
                    horizontalalignment='center');
        # Plot: Slack and Latency
        axes = plt.subplot(gs[0,0]);
        axes.set_title('Task [{0:s}] (start) Latency and (completion) Slack'\
                .format(task));
        data = self.df(task)[['Slack', 'WKPLatency']]
        data.plot(ax=axes, drawstyle='steps-post', style=['b', 'g']);
        # axes.set_xlim(x_min, x_max);
        axes.xaxis.set_visible(False);
        # Plot: Performance
        axes = plt.subplot(gs[1,0]);
        axes.set_title('Task [{0:s}] Performance Index'.format(task));
        data = self.df(task)[['PerfIndex',]]
        data.plot(ax=axes, drawstyle='steps-post');
        axes.set_ylim(0, 2);
        # axes.set_xlim(x_min, x_max);
        # Plot: Slack Histogram
        axes = plt.subplot(gs[0:2,1]);
        data = self.df(task)[['PerfIndex',]]
        data.hist(bins=30, ax=axes, alpha=0.4);
        # axes.set_xlim(x_min, x_max);
        pindex_avg = data.mean()[0];
        pindex_std = data.std()[0];
        self._log.info('PerfIndex, Task [%s] avg: %.2f, std: %.2f',
                task, pindex_avg, pindex_std)
        axes.axvline(pindex_avg, color='b', linestyle='--', linewidth=2);
        # Save generated plots into datadir
        figname = '{}/task_perf_{}.png'.format(self.datadir, task)
        pl.savefig(figname, bbox_inches='tight')
 | 
	apache-2.0 | 
			Subsets and Splits
				
	
				
			
				
No community queries yet
The top public SQL queries from the community will appear here once available.
