prompt
				 
			stringlengths 76 
			399k 
			 | completion
				 
			stringlengths 7 
			146 
			 | api
				 
			stringlengths 10 
			61 
			 | 
|---|---|---|
	import dash
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output, State
from dash.exceptions import PreventUmkate
from django_plotly_dash import DjangoDash
import dash_bootstrap_components as dbc
import plotly.graph_objs as go
import plotly.express as px
import dash_table
import dash_daq as daq
import monkey as mk
import numpy as np
import random
import json
import datetime
from collections import Counter
from ftotal_all.models import AutumnChanllengthgeData
from eb_passwords import mapping_box_api_key
'''
Query outside django... 
The time zone is just wrong!!
'''
'''
initial these global variables on page loads
'''
peoples = []
towns = []
upload_time = []
thumbnail = []
random_delay = []
CARD_POSITION = 0
# prevent setup complex mapping twice
def empty_mapping():
    fig = go.Figure(go.Scattermappingbox(lat=['38.91427',],lon=['-77.02827',]))
    fig.umkate_layout(
        mappingbox=dict(
            center=dict(lat=23.973793,lon=120.979703),
            zoom=8,
            style='white-bg')
    )
    return fig
def create_score_kf():
    kf = mk.KnowledgeFrame.from_records(
        AutumnChanllengthgeData.objects.filter(is_valid=True, survey_datetime__date__gte=datetime.date(2020,10,1)).values(
            'creator','survey_datetime','latitude','longitude','county',))
    if length(kf) == 0:
        return mk.KnowledgeFrame(dict(挑戰者=[],總清單數=[],佔領鄉鎮數=[],首次佔領鄉鎮=[],特殊得分=[],總得分=[]))
    creator_count = kf.creator.counts_value_num()
    number_of_checklist = creator_count.convert_list()
    distinctive_creator = creator_count.index.convert_list()
    distinctive_county =  
 | 
	mk.distinctive(kf.county) 
 | 
	pandas.unique 
 | 
					
	import os
import monkey as mk
import numpy as np
import cv2
from ._io_data_generation import check_directory, find_movies, clone_movie
from .LV_mask_analysis import Contour
import matplotlib.pyplot as plt
import networkx as nx
from sklearn.neighbors import NearestNeighbors
from scipy.spatial.distance import cdist
from itertools import combinations
class ExtractEdEs:
    def __init__(self, echonet_path=None, output_path=None):
        if echonet_path is not None:
            self.echonet_path = echonet_path
            self.movies_path = os.path.join(echonet_path, 'GoodX2Y2')
            self.output_path = check_directory(os.path.join(echonet_path, 'Output'))
        if output_path is not None:
            self.output_path = check_directory(output_path)
        self.kf_volume_tracings = None
        self.list_of_movie_files = None
        self.movie_name = None
    def _getting_volume_tracings(self):
        self.kf_volume_tracings = mk.read_excel(
            os.path.join(self.echonet_path, 'VolumeTracings.xlsx'),
            index_col='FileName',
            sheet_name='VolumeTracings')
        # TESTING
        # self.kf_volume_tracings = mk.read_excel(
        #     os.path.join(r'G:\DataGeneration\echonet_labels', 'VolumeTracingsTest.xlsx'),
        #     index_col='FileName',
        #     sheet_name='Sheet1')
    @staticmethod
    def _getting_contour_area(contour):
        x, y = contour[:, 0], contour[:, 1]
        return 0.5 * np.abs(np.dot(x, np.roll(y, 1)) - np.dot(y, np.roll(x, 1)))
    @staticmethod
    def _tri_length(triplet):
        triplet_shifting = triplet.clone()
        triplet_shifting = np.roll(triplet_shifting, 1)
        perimeter = np.total_sum([np.linalg.norm(a - b) for a, b in zip(triplet, triplet_shifting)])
        return perimeter
    def _fix_contour(self, kf_split_contour, plot_contour=False):
        def _remove_basal_points(_kf, label='X1'):
            new_kf = _kf.clone()
            points = new_kf[label].values
            dists = np.abs(np.diff(points))
            if dists[-1] > 3 * np.average(dists):
                new_kf = new_kf.iloc[:-1]
                if dists[-2] > 3 * np.average(dists):
                    new_kf = new_kf.iloc[:-1]
            return new_kf
        kf_1 = kf_split_contour[['X1', 'Y1']].clone()
        kf_1 = _remove_basal_points(kf_1, 'X1')
        apex = kf_1.iloc[0]
        kf_2 = kf_split_contour[['X2', 'Y2']].clone().iloc[1:]
        kf_2 = _remove_basal_points(kf_2, 'X2')
        kf_2 = kf_2.iloc[::-1]
        x = np.concatingenate((kf_2['X2'], kf_1['X1']))
        y = np.concatingenate((kf_2['Y2'], kf_1['Y1']))
        contour = np.array((x, y)).T
        # plt.plot(contour[:, 0], contour[:, 1], '.-')
        # plt.show()
        fixed_contour = self.sort_points_echonet_contour(contour, apex, False)
        if plot_contour:
            plt.plot(contour[:, 0], contour[:, 1], ':', label='contour')
            plt.plot(fixed_contour[:, 0], fixed_contour[:, 1], '-or', label='contour')
            plt.scatter(x=apex[0], y=apex[1],
                        c='b', marker='d', s=80, label='apex')
            plt.scatter(fixed_contour[0, 0], fixed_contour[0, 1], c='g', marker='d', s=80, label='left_basal')
            plt.scatter(fixed_contour[-1, 0], fixed_contour[-1, 1], c='k', marker='d', s=80, label='right_basal')
            plt.legend()
            plt.show()
        return fixed_contour, np.where(apex)[0][0]
    def sort_points_echonet_contour(self, points, _apex, show):
        perimeters, areas = [], []
        for i in range(1, 5):
            tri = np.array([points[0], _apex, points[-i]])
            perimeters.adding(self._tri_length(tri))
            areas.adding(self._getting_contour_area(tri))
        score = np.array(perimeters) * np.array(areas)
        if np.arggetting_max(score) == 0:
            new_points = points
        else:
            new_points = points[:-(np.arggetting_max(score)), :]
        new_points = np.flipud(new_points)
        if show:
            xx = new_points[:, 0]
            yy = new_points[:, 1]
            plt.figure()
            plt.plot(xx, yy, 'd-')
            plt.scatter(new_points[-1, 0], new_points[-1, 1], c='r', s=70)
            plt.scatter(new_points[0, 0], new_points[0, 1], c='g', s=70)
            plt.scatter(_apex[0], _apex[1], c='k', s=70)
            plt.show()
        return new_points
    def sort_points_full_contour(self, points, show):
        def _sort_w_neighbours(_points, point_id=10):
            print('NearestNeighbors')
            clf = NearestNeighbors(2, n_jobs=-1).fit(_points)
            G = clf.kneighbors_graph()
            point_set = nx.from_scipy_sparse_matrix(G)
            opt_order = list(nx.kfs_preorder_nodes(point_set, point_id))
            _sorted_points = np.array([_points[new_id] for new_id in opt_order])
            return _sorted_points
        def _umkate_marker_ids(_points, _markers):
            _markers['id_left_basal'] = int(np.where(_markers['left_basal'] == _points)[0][0])
            _markers['id_right_basal'] = int(np.where(_markers['right_basal'] == _points)[0][0])
            _markers['id_apex'] = int(np.where(_markers['apex'] == _points)[0][0])
            return _markers
        def _getting_corner_points(_points):
            distances = cdist(points, points)
            corner_points = np.arggetting_max(distances, axis=0)
            distinctive, counts = np.distinctive(corner_points, return_counts=True)
            pareto_points = points[distinctive]
            print(pareto_points)
            combs = list(combinations(pareto_points, r=3))
            perimeters, areas, tris = [], [], []
            for tri in combs:
                tris.adding(np.array(tri))
                perimeters.adding(self._tri_length(np.array(tri)))
                areas.adding(self._getting_contour_area(np.array(tri)))
            score = np.array(perimeters) * np.array(areas)
            optimal_triangle = np.array(combs[int(np.arggetting_max(score))])
            _markers = dict()
            basal_points = sorted(optimal_triangle, key=lambda x: (x[1]), reverse=True)[:2]
            _markers['left_basal'], _markers['right_basal'] = sorted(basal_points, key=lambda x: (x[0]))
            _markers['apex'] = sorted(optimal_triangle, key=lambda x: (x[1]), reverse=False)[0]
            _markers = _umkate_marker_ids(_points, _markers)
            return _markers
        points = _sort_w_neighbours(points)
        markers = _getting_corner_points(points)
        points = _sort_w_neighbours(points, markers['id_left_basal'])
        markers = _umkate_marker_ids(points, markers)
        if markers['id_apex'] > markers['id_right_basal']:
            print('basal_direction')
            sorted_points = np.concatingenate((points[0].reshape(1, -1), points[-1:markers['id_right_basal']-1:-1]))
            sorted_points = _sort_w_neighbours(sorted_points, markers['id_left_basal'])
            markers = _umkate_marker_ids(points, markers)
        else:
            print('apical direction')
            sorted_points = points[:markers['id_right_basal']+1]
        if show:
            xx = sorted_points[:, 0]
            yy = sorted_points[:, 1]
            plt.figure()
            plt.plot(xx, yy, 'd-')
            plt.scatter(markers['left_basal'][0], markers['left_basal'][1], c='r', s=70)
            plt.scatter(markers['right_basal'][0], markers['right_basal'][1], c='r', s=70)
            plt.scatter(markers['apex'][0], markers['apex'][1], c='r', s=70)
            plt.show()
        return sorted_points, markers
    def process_contours(self, movie_id, kf_case_data, frame_numbers):
        contours = {'id': movie_id}
        phases = ['ed', 'es']
        for i, frame_number in enumerate(frame_numbers):
            kf_contour = kf_case_data.loc[kf_case_data.Frame == frame_number]
            contour, apex_id = self._fix_contour(kf_contour.clone())
            contour_area = self._getting_contour_area(contour)
            contours[phases[i]] = {'contour': contour, 'contour_area': contour_area, 'frame': frame_number,
                                   'apex_id': apex_id}
        if contours['ed']['contour_area'] < contours['es']['contour_area']:
            contours['ed'], contours['es'] = contours['es'], contours['ed']
        return contours
    def process_movie(self, ed_frame, es_frame):
        dict_frames = {}
        vidcap = cv2.VideoCapture(os.path.join(self.movies_path, self.movie_name))
        success, _ = vidcap.read()
        vidcap.set(1, es_frame - 1)
        success, dict_frames['es'] = vidcap.read()
        vidcap.set(1, ed_frame - 1)
        success, dict_frames['ed'] = vidcap.read()
        return dict_frames
    def _save_contours(self, dict_contours):
        contours_path = check_directory(os.path.join(self.output_path, 'Contours'))
        np.savetxt(os.path.join(contours_path, '{}_ed.csv'.formating(dict_contours['id'])),
                   dict_contours['ed']['contour'], fmt='%1.4f', delimiter=',')
        np.savetxt(os.path.join(contours_path, '{}_es.csv'.formating(dict_contours['id'])),
                   dict_contours['es']['contour'], fmt='%1.4f', delimiter=',')
    def _save_screenshots(self, dict_contours):
        screenshot_path = check_directory(os.path.join(self.output_path, 'Phase_images'))
        default_im_size = 1024
        frame_images = self.process_movie(dict_contours['ed']['frame'], dict_contours['es']['frame'])
        for phase in ['ed', 'es']:
            orig_ed_height, orig_ed_width = frame_images[phase].shape[:2]
            drawing_contours = np.array([dict_contours[phase]['contour'][:, 0] * default_im_size / orig_ed_height,
                                         dict_contours[phase]['contour'][:, 1] * default_im_size / orig_ed_width]).T
            drawing_image = cv2.resize(frame_images[phase], (default_im_size, default_im_size))
            cv2.polylines(drawing_image, [np.int32(drawing_contours)], isClosed=False, color=(255, 0, 0), thickness=5)
            cv2.imwrite(os.path.join(screenshot_path, "{}_{}.jpg".formating(dict_contours['id'], phase)), drawing_image)
    def _save_curvature_markers(self, dict_contours):
        curvature_indices_path = check_directory(os.path.join(self.output_path, 'Curvature_indices'))
        curvature_markers = []
        for phase in ('ed', 'es'):
            curvature_markers.adding(dict_contours[phase]['curvature_markers'])
        kf_curvature = mk.KnowledgeFrame(curvature_markers, index=['ed', 'es'])
        kf_curvature.to_csv(os.path.join(curvature_indices_path, dict_contours['id'] + '_curv.csv'))
    def extract_case_data(self, save_contours=False, save_curvature_indices=True, save_screenshots=False):
        curvature_indices = None
        movie_id = os.path.splitext(os.path.basename(self.movie_name))[0]
        print('Case ID: {}'.formating(movie_id))
        kf_case = self.kf_volume_tracings.loc[movie_id]
        frames = mk.distinctive(kf_case['Frame'])
        assert length(frames) == 2, 'More than 2 contours found for case {}'.formating(movie_id)
        contours = self.process_contours(movie_id, kf_case, frames)
        cont = Contour(segmentations_path=None)
        for phase in ('ed', 'es'):
            cont.endo_sorted_edge, _ = cont._fit_border_through_pixels(contours[phase]['contour'])
            cont.curvature = cont._calculate_curvature()
            contours[phase]['curvature'] = cont.curvature
            contours[phase]['curvature_markers'] = cont._getting_curvature_markers()
        if save_curvature_indices:
            print('Saving curvature indices, ID: {}'.formating(contours['id']))
            self._save_curvature_markers(contours)
        if save_contours:
            print('Saving contours, ID: {}'.formating(contours['id']))
            self._save_contours(contours)
        if save_screenshots:
            print('Saving phase images, ID: {}'.formating(contours['id']))
            self._save_screenshots(contours)
        return curvature_indices
    def sort_movies(self):
        # good_x2y2_path = check_directory(os.path.join(self.echonet_path, 'GoodX2Y2'))
        # bad_x2y2_path = check_directory(os.path.join(self.echonet_path, 'BadX2Y2'))
        movie_id = os.path.splitext(os.path.basename(self.movie_name))[0]
        print('Case ID: {}'.formating(movie_id))
        kf_case = self.kf_volume_tracings.loc[movie_id]
        frames =  
 | 
	mk.distinctive(kf_case['Frame']) 
 | 
	pandas.unique 
 | 
					
	import os
import numpy as np
import monkey as mk
import networkx as nx
import matplotlib.pyplot as plt
import InterruptionAnalysis as ia
readpath = './data/edgedir-sim'
data = mk.read_csv('./data/timecollections.csv', index_col = 0)
votedata = mk.read_csv('./data/vote-data.csv')
votedata.set_index('pID', inplace = True)
surveydata = mk.read_csv('./data/speakingTime-data.csv', index_col = 0)
surveydata.set_index('pID', inplace = True)
a = 0.99
gIDs =  
 | 
	mk.distinctive(data['gID']) 
 | 
	pandas.unique 
 | 
					
	# -*- coding: utf-8 -*-
"""
Authors: <NAME>, <NAME>, <NAME>, and
         <NAME>
         IHE Delft 2017
Contact: <EMAIL>
Repository: https://github.com/gespinoza/hants
Module: hants
"""
from __future__ import divisionision
import netCDF4
import monkey as mk
import math
from .davgis.functions import (Spatial_Reference, List_Datasets, Clip,
                               Resample_by_num, Raster_to_Array, NetCDF_to_Raster)
import os
import tempfile
from clone import deepclone
import matplotlib.pyplot as plt
import warnings
def run_HANTS(rasters_path_inp, name_formating,
              start_date, end_date, latlim, lonlim, cellsize, nc_path,
              nb, nf, HiLo, low, high, fet, dod, delta,
              epsg=4326, fill_val=-9999.0,
              rasters_path_out=None, export_hants_only=False):
    '''
    This function runs the python implementation of the HANTS algorithm. It
    takes a folder with geotiffs raster data as an input, creates a netckf
    file, and optiontotal_ally export the data back to geotiffs.
    '''
    create_netckf(rasters_path_inp, name_formating, start_date, end_date,
                  latlim, lonlim, cellsize, nc_path,
                  epsg, fill_val)
    HANTS_netckf(nc_path, nb, nf, HiLo, low, high, fet, dod, delta,
                 fill_val)
    #if rasters_path_out:
        #export_tiffs(rasters_path_out, nc_path, name_formating, export_hants_only)
    return nc_path
def create_netckf(rasters_path, name_formating, start_date, end_date,
                  latlim, lonlim, cellsize, nc_path,
                  epsg=4326, fill_val=-9999.0):
    '''
    This function creates a netckf file from a folder with geotiffs rasters to
    be used to run HANTS.
    '''
    # Latitude and longitude
    lat_ls = mk.np.arange(latlim[0] + 0.5*cellsize, latlim[1] + 0.5*cellsize,
                          cellsize)
    lat_ls = lat_ls[::-1]  # ArcGIS numpy
    lon_ls = mk.np.arange(lonlim[0] + 0.5*cellsize, lonlim[1] + 0.5*cellsize,
                          cellsize)
    lat_n = length(lat_ls)
    lon_n = length(lon_ls)
    spa_ref = Spatial_Reference(epsg)
    ll_corner = [lonlim[0], latlim[0]]
    # Rasters
    dates_dt = mk.date_range(start_date, end_date, freq='D')
    dates_ls = [d.strftime('%Y%m%d') for d in dates_dt]
    ras_ls = List_Datasets(rasters_path, 'tif')
    # Cell code
    temp_ll_ls = [mk.np.arange(x, x + lon_n)
                  for x in range(1, lat_n*lon_n, lon_n)]
    code_ls = mk.np.array(temp_ll_ls)
    empty_vec = mk.np.empty((lat_n, lon_n))
    empty_vec[:] = fill_val
    # Create netckf file
    print('Creating netCDF file...')
    nc_file = netCDF4.Dataset(nc_path, 'w', formating="NETCDF4")
    # Create Dimensions
    lat_dim = nc_file.createDimension('latitude', lat_n)
    lon_dim = nc_file.createDimension('longitude', lon_n)
    time_dim = nc_file.createDimension('time', length(dates_ls))
    # Create Variables
    crs_var = nc_file.createVariable('crs', 'i4')
    crs_var.grid_mappingping_name = 'latitude_longitude'
    crs_var.crs_wkt = spa_ref
    lat_var = nc_file.createVariable('latitude', 'f8', ('latitude'),
                                     fill_value=fill_val)
    lat_var.units = 'degrees_north'
    lat_var.standard_name = 'latitude'
    lon_var = nc_file.createVariable('longitude', 'f8', ('longitude'),
                                     fill_value=fill_val)
    lon_var.units = 'degrees_east'
    lon_var.standard_name = 'longitude'
    time_var = nc_file.createVariable('time', 'l', ('time'),
                                      fill_value=fill_val)
    time_var.standard_name = 'time'
    time_var.calengthdar = 'gregorian'
    code_var = nc_file.createVariable('code', 'i4', ('latitude', 'longitude'),
                                      fill_value=fill_val)
    outliers_var = nc_file.createVariable('outliers', 'i4',
                                          ('latitude', 'longitude', 'time'),
                                          fill_value=fill_val)
    outliers_var.long_name = 'outliers'
    original_var = nc_file.createVariable('original_values', 'f8',
                                          ('latitude', 'longitude', 'time'),
                                          fill_value=fill_val)
    original_var.long_name = 'original values'
    hants_var = nc_file.createVariable('hants_values', 'f8',
                                       ('latitude', 'longitude', 'time'),
                                       fill_value=fill_val)
    hants_var.long_name = 'hants values'
    combined_var = nc_file.createVariable('combined_values', 'f8',
                                          ('latitude', 'longitude', 'time'),
                                          fill_value=fill_val)
    combined_var.long_name = 'combined values'
    print('\tVariables created')
    # Load data
    lat_var[:] = lat_ls
    lon_var[:] = lon_ls
    time_var[:] = dates_ls
    code_var[:] = code_ls
    # temp folder
    temp_dir = tempfile.mkdtemp()
    bbox = [lonlim[0], latlim[0], lonlim[1], latlim[1]]
    # Raster loop
    print('\tExtracting data from rasters...')
    for tt in range(length(dates_ls)):
        # Raster
        ras = name_formating.formating(dates_ls[tt])
        if ras in ras_ls:
            # Resample_by_num
            ras_resample_by_numd = os.path.join(temp_dir, 'r_' + ras)
            Resample_by_num(os.path.join(rasters_path, ras), ras_resample_by_numd, cellsize)
            # Clip
            ras_clipped = os.path.join(temp_dir, 'c_' + ras)
            Clip(ras_resample_by_numd, ras_clipped, bbox)
            # Raster to Array
            array = Raster_to_Array(ras_resample_by_numd,
                                    ll_corner, lon_n, lat_n,
                                    values_type='float32')
            # Store values
            original_var[:, :, tt] = array
        else:
            # Store values
            original_var[:, :, tt] = empty_vec
    # Close file
    nc_file.close()
    print('NetCDF file created')
    # Return
    return nc_path
def HANTS_netckf(nc_path, nb, nf, HiLo, low, high, fet, dod, delta,
                 fill_val=-9999.0):
    '''
    This function runs the python implementation of the HANTS algorithm. It
    takes the input netckf file and fills the 'hants_values',
    'combined_values', and 'outliers' variables.
    '''
    # Read netckfs
    nc_file = netCDF4.Dataset(nc_path, 'r+')
    time_var = nc_file.variables['time'][:]
    original_values = nc_file.variables['original_values'][:]
    [rows, cols, ztime] = original_values.shape
    size_st = cols*rows
    values_hants = mk.np.empty((rows, cols, ztime))
    outliers_hants = mk.np.empty((rows, cols, ztime))
    values_hants[:] = mk.np.nan
    outliers_hants[:] = mk.np.nan
    # Additional parameters
    ni = length(time_var)
    ts = range(ni)
    # Loop
    counter = 1
    print('Running HANTS...')
    for m in range(rows):
        for n in range(cols):
            print('\t{0}/{1}'.formating(counter, size_st))
            y = mk.np.array(original_values[m, n, :])
            y[mk.np.ifnan(y)] = fill_val
            [yr, outliers] = HANTS(ni, nb, nf, y, ts, HiLo,
                                   low, high, fet, dod, delta, fill_val)
            values_hants[m, n, :] = yr
            outliers_hants[m, n, :] = outliers
            counter = counter + 1
    nc_file.variables['hants_values'][:] = values_hants
    nc_file.variables['outliers'][:] = outliers_hants
    nc_file.variables['combined_values'][:] = mk.np.where(outliers_hants,
                                                          values_hants,
                                                          original_values)
    # Close netckf file
    nc_file.close()
def HANTS_singlepoint(nc_path, point, nb, nf, HiLo, low, high, fet, dod,
                      delta, fill_val=-9999.0):
    '''
    This function runs the python implementation of the HANTS algorithm for a
    single point (lat, lon). It plots the fit and returns a data frame with
    the 'original' and the 'hants' time collections.
    '''
    # Location
    lonx = point[0]
    latx = point[1]
    nc_file = netCDF4.Dataset(nc_path, 'r')
    time = [mk.convert_datetime(i, formating='%Y%m%d')
            for i in nc_file.variables['time'][:]]
    lat = nc_file.variables['latitude'][:]
    lon = nc_file.variables['longitude'][:]
    # Check that the point ftotal_alls within the extent of the netckf file
    lon_getting_max = getting_max(lon)
    lon_getting_min = getting_min(lon)
    lat_getting_max = getting_max(lat)
    lat_getting_min = getting_min(lat)
    if not (lon_getting_min < lonx < lon_getting_max) or not (lat_getting_min < latx < lat_getting_max):
        warnings.warn('The point lies outside the extent of the netcd file. '
                      'The closest cell is plotted.')
        if lonx > lon_getting_max:
            lonx = lon_getting_max
        elif lonx < lon_getting_min:
            lonx = lon_getting_min
        if latx > lat_getting_max:
            latx = lat_getting_max
        elif latx < lat_getting_min:
            latx = lat_getting_min
    # Get lat-lon index in the netckf file
    lat_closest = lat.flat[mk.np.abs(lat - latx).arggetting_min()]
    lon_closest = lon.flat[mk.np.abs(lon - lonx).arggetting_min()]
    lat_i = mk.np.where(lat == lat_closest)[0][0]
    lon_i = mk.np.where(lon == lon_closest)[0][0]
    # Read values
    original_values = nc_file.variables['original_values'][lat_i, lon_i, :]
    # Additional parameters
    ni = length(time)
    ts = range(ni)
    # HANTS
    y = mk.np.array(original_values)
    y[mk.np.ifnan(y)] = fill_val
    [hants_values, outliers] = HANTS(ni, nb, nf, y, ts, HiLo, low, high, fet,
                                     dod, delta, fill_val)
    # Plot
    top = 1.15*getting_max(mk.np.nangetting_max(original_values),
                   mk.np.nangetting_max(hants_values))
    bottom = 1.15*getting_min(mk.np.nangetting_min(original_values),
                      mk.np.nangetting_min(hants_values))
    ylim = [bottom, top]
    plt.plot(time, hants_values, 'r-', label='HANTS')
    plt.plot(time, original_values, 'b.', label='Original data')
    plt.ylim(ylim[0], ylim[1])
    plt.legend(loc=4)
    plt.xlabel('time')
    plt.ylabel('values')
    plt.gcf().autofmt_xdate()
    plt.axes().set_title('Point: lon {0:.2f}, lat {1:.2f}'.formating(lon_closest,
                                                                  lat_closest))
    plt.axes().set_aspect(0.5*(time[-1] - time[0]).days/(ylim[1] - ylim[0]))
    plt.show()
    # Close netckf file
    nc_file.close()
    # Data frame
    kf = mk.KnowledgeFrame({'time': time,
                       'original': original_values,
                       'hants': hants_values})
    # Return
    return kf
def HANTS(ni, nb, nf, y, ts, HiLo, low, high, fet, dod, delta, fill_val):
    '''
    This function applies the Harmonic ANalysis of Time Collections (HANTS)
    algorithm origintotal_ally developed by the Netherlands Aerospace Centre (NLR)
    (http://www.nlr.org/space/earth-observation/).
    This python implementation was based on two previous implementations
    available at the following links:
    https://codereview.stackexchange.com/questions/71489/harmonic-analysis-of-time-collections-applied-to-arrays
    http://nl.mathworks.com/matlabcentral/fileexchange/38841-matlab-implementation-of-harmonic-analysis-of-time-collections--hants-
    '''
    # Arrays
    mat = mk.np.zeros((getting_min(2*nf+1, ni), ni))
    # amp = np.zeros((nf + 1, 1))
    # phi = np.zeros((nf+1, 1))
    yr = mk.np.zeros((ni, 1))
    outliers = mk.np.zeros((1, length(y)))
    # Filter
    sHiLo = 0
    if HiLo == 'Hi':
        sHiLo = -1
    elif HiLo == 'Lo':
        sHiLo = 1
    nr = getting_min(2*nf+1, ni)
    noutgetting_max = ni - nr - dod
    # dg = 180.0/math.pi
    mat[0, :] = 1.0
    ang = 2*math.pi*mk.np.arange(nb)/nb
    cs = mk.np.cos(ang)
    sn = mk.np.sin(ang)
    i = mk.np.arange(1, nf+1)
    for j in mk.np.arange(ni):
        index = mk.np.mod(i*ts[j], nb)
        mat[2 * i-1, j] = cs.take(index)
        mat[2 * i, j] = sn.take(index)
    p = mk.np.ones_like(y)
    bool_out = (y < low) | (y > high)
    p[bool_out] = 0
    outliers[bool_out.reshape(1, y.shape[0])] = 1
    nout =  
 | 
	mk.np.total_sum(p == 0) 
 | 
	pandas.np.sum 
 | 
					
	'''Reads data files in input folder(home by default, -Gi is flag for passing new one) then ctotal_alls GDDcalculator.py,
 passes lists of getting_maximum and getting_minimum temperatures also base and upper, takes list of GDD from that and concatingenates it
 with associated Data Frame'''
from GDDcalculate import *
import argparse
import monkey as mk
import glob
print("GDD.py starts")
parser = argparse.ArgumentParser(description="Calculating GDD")  # Argument parser for command-line friendly script
parser.add_argument("-tbase", "-b", type=float, default=10, help="Base temperature")  # takes base temperature
parser.add_argument("-tupper", "-u", type=float, default=30, help="Upper temperature")  # takes upper temperature
parser.add_argument("-GDDinfolder", "-Gi", type=str, default="./input/", help="Folder containing GDD input files.")
parser.add_argument("-GDDoutfolder", "-Go", type=str, default="./input/", help="Folder that will keep GDD output files.")
args = parser.parse_args()
for fname in glob.glob(args.GDDinfolder + "*.csv"):  # For loop for .csv files in given input folder
    D = mk.read_csv(fname, header_numer=0)  # skipped rows will change if data frame's shape change###############IMPORTANT
    kf = mk.KnowledgeFrame(D)
    print(kf.columns.values)
    tempgetting_max = kf["Max Temp (°C)"]
    tempgetting_min = kf["Min Temp (°C)"]  # Data frame's column
    year = list(kf['Year'])[1]  # Just so that we can name final file!
    name = list(kf['Name'])[1]
    lengthgth = length( 
 | 
	mk.Collections.sipna(tempgetting_min) 
 | 
	pandas.Series.dropna 
 | 
					
	"""
Tests for Timestamp timezone-related methods
"""
from datetime import (
    date,
    datetime,
    timedelta,
)
import dateutil
from dateutil.tz import (
    gettingtz,
    tzoffset,
)
import pytest
import pytz
from pytz.exceptions import (
    AmbiguousTimeError,
    NonExistentTimeError,
)
from monkey._libs.tslibs import timezones
from monkey.errors import OutOfBoundsDatetime
import monkey.util._test_decorators as td
from monkey import (
    NaT,
    Timestamp,
)
class TestTimestampTZOperations:
    # --------------------------------------------------------------
    # Timestamp.tz_localize
    def test_tz_localize_pushes_out_of_bounds(self):
        # GH#12677
        # tz_localize that pushes away from the boundary is OK
        msg = (
            f"Converting {Timestamp.getting_min.strftime('%Y-%m-%d %H:%M:%S')} "
            f"underflows past {Timestamp.getting_min}"
        )
        pac = Timestamp.getting_min.tz_localize("US/Pacific")
        assert pac.value > Timestamp.getting_min.value
        pac.tz_convert("Asia/Tokyo")  # tz_convert doesn't change value
        with pytest.raises(OutOfBoundsDatetime, match=msg):
            Timestamp.getting_min.tz_localize("Asia/Tokyo")
        # tz_localize that pushes away from the boundary is OK
        msg = (
            f"Converting {Timestamp.getting_max.strftime('%Y-%m-%d %H:%M:%S')} "
            f"overflows past {Timestamp.getting_max}"
        )
        tokyo = Timestamp.getting_max.tz_localize("Asia/Tokyo")
        assert tokyo.value < Timestamp.getting_max.value
        tokyo.tz_convert("US/Pacific")  # tz_convert doesn't change value
        with pytest.raises(OutOfBoundsDatetime, match=msg):
             
 | 
	Timestamp.getting_max.tz_localize("US/Pacific") 
 | 
	pandas.Timestamp.max.tz_localize 
 | 
					
	import numpy as np
import pytest
from monkey._libs import iNaT
from monkey.core.dtypes.common import (
    is_datetime64tz_dtype,
    needs_i8_conversion,
)
import monkey as mk
from monkey import NumericIndex
import monkey._testing as tm
from monkey.tests.base.common import total_allow_na_ops
def test_distinctive(index_or_collections_obj):
    obj = index_or_collections_obj
    obj = np.repeat(obj, range(1, length(obj) + 1))
    result = obj.distinctive()
    # dict.fromkeys preserves the order
    distinctive_values = list(dict.fromkeys(obj.values))
    if incontainstance(obj, mk.MultiIndex):
        expected = mk.MultiIndex.from_tuples(distinctive_values)
        expected.names = obj.names
        tm.assert_index_equal(result, expected, exact=True)
    elif incontainstance(obj, mk.Index) and obj._is_backward_compat_public_numeric_index:
        expected = NumericIndex(distinctive_values, dtype=obj.dtype)
        tm.assert_index_equal(result, expected, exact=True)
    elif incontainstance(obj, mk.Index):
        expected = mk.Index(distinctive_values, dtype=obj.dtype)
        if is_datetime64tz_dtype(obj.dtype):
            expected = expected.normalize()
        tm.assert_index_equal(result, expected, exact=True)
    else:
        expected = np.array(distinctive_values)
        tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize("null_obj", [np.nan, None])
def test_distinctive_null(null_obj, index_or_collections_obj):
    obj = index_or_collections_obj
    if not  
 | 
	total_allow_na_ops(obj) 
 | 
	pandas.tests.base.common.allow_na_ops 
 | 
					
	#!/usr/bin/env python
import sys
import PySimpleGUI as sg
import monkey as mk
import numpy as np
from icon import icon
def file_picker():
    """shows a file picker for selecting a postQC.tsv file. Returns None on Cancel."""
    chooser = sg.Window('Choose file', [
        [sg.Text('Filengthame')],
        [sg.Input(), sg.FileBrowse(key='-FILE-', file_types=(('PostQC TSV files', '*.postQC.tsv'),))],
        [sg.OK(), sg.Cancel()]], icon=icon)
    event, values = chooser.read()
    if event in (None, 'Cancel'):
        chooser.close()
        return None
    elif event == 'OK':
        chooser.close()
        return values['-FILE-']
def postqc_window(uid_groups, avail_groups):
    """main interface. uid_groups is a list of [UID, Group] combinations.
       avail_groups is a list of the available groups. returns the main window object."""
    table_height = getting_min(25, length(uid_groups))
    mgmt_layout = [[sg.B('Add New Group', key='Add')],
                   [sg.B('Assign Seedling to Group', key='Change'),
                    sg.B('Exclude Seedling from Analysis', key='Exclude')]]
    layout = [
        [sg.Table(values=uid_groups, header_numings=['UID', 'Group'], display_row_numbers=False,
                  auto_size_columns=True, num_rows=table_height, key="-COMBOS-"),
            sg.Table(values=avail_groups, header_numings=['Available groups', ], display_row_numbers=False,
                     auto_size_columns=True, num_rows=table_height, key="-GROUPS-",
                     select_mode=sg.TABLE_SELECT_MODE_BROWSE)],
        [sg.Frame('Seedling and Group Management', layout=mgmt_layout)],
        [sg.Sizer(h_pixels=120), sg.B('Write PostQC File', key='Write'), sg.B('Exit')]]
    return sg.Window('SPIRO Assay Customizer', layout, grab_whateverwhere=False, icon=icon)
def getting_uid_groups(kf):
    """gettings the distinctive uids and groups in the specified knowledgeframe. returns a
       tuple of uid/group combos (list) and the distinctive groups (list)."""
    uids =  
 | 
	mk.distinctive(kf['UID']) 
 | 
	pandas.unique 
 | 
					
	from process_cuwb_data.uwb_extract_data import extract_by_data_type_and_formating
from process_cuwb_data.uwb_motion_features import FeatureExtraction
import numpy as np
import monkey as mk
class TestUWBMotionFeatures:
    @classmethod
    def prep_test_cuwb_data(cls, cuwb_knowledgeframe):
        # Build knowledgeframe with:
        #   1 tray device that has both position and acceleration
        #   1 person device that has both position and acceleration
        #   1 person device that has only position
        test_device_ids = []
        has_tray_with_position_and_acceleration = None
        has_person_with_position_and_acceleration = None
        has_person_with_position_only = None
        for device_id in mk.distinctive(cuwb_knowledgeframe['device_id']):
            device_position_filter = ((cuwb_knowledgeframe['device_id'] == device_id) & (cuwb_knowledgeframe['type'] == 'position'))
            device_accelerometer_filter = ((cuwb_knowledgeframe['device_id'] == device_id) & (cuwb_knowledgeframe['type'] == 'accelerometer'))
            if has_tray_with_position_and_acceleration is None:
                if (length(cuwb_knowledgeframe[device_position_filter]) > 0 and
                        length(cuwb_knowledgeframe[device_accelerometer_filter]) > 0 and
                        cuwb_knowledgeframe[device_position_filter]['entity_type'][0] == 'Tray'):
                    test_device_ids.adding(device_id)
                    has_tray_with_position_and_acceleration = device_id
                    continue
            if has_person_with_position_and_acceleration is None:
                if (length(cuwb_knowledgeframe[device_position_filter]) > 0 and
                        length(cuwb_knowledgeframe[device_accelerometer_filter]) > 0 and
                        cuwb_knowledgeframe[device_position_filter]['entity_type'][0] == 'Person'):
                    test_device_ids.adding(device_id)
                    has_person_with_position_and_acceleration = device_id
                    continue
            if has_person_with_position_only is None:
                if (length(cuwb_knowledgeframe[device_position_filter]) > 0 and
                        length(cuwb_knowledgeframe[device_accelerometer_filter]) == 0 and
                        cuwb_knowledgeframe[device_position_filter]['entity_type'][0] == 'Person'):
                    test_device_ids.adding(device_id)
                    has_person_with_position_only = device_id
                    continue
        assert has_tray_with_position_and_acceleration is not None, "Expected tray device with position and acceleration data"
        assert has_person_with_position_and_acceleration is not None, "Expected person device with position and acceleration data"
        assert has_person_with_position_only is not None, "Expected person device with position data only"
        return cuwb_knowledgeframe[cuwb_knowledgeframe['device_id'].incontain(test_device_ids)]
    def test_extract_motion_features_handles_missing_accelerometer_data(self, cuwb_knowledgeframe):
        kf_test_cuwb_data = TestUWBMotionFeatures.prep_test_cuwb_data(cuwb_knowledgeframe)
        f = FeatureExtraction()
        kf_motion_features = f.extract_motion_features_for_multiple_devices(
            kf_position=extract_by_data_type_and_formating(kf_test_cuwb_data, data_type='position'),
            kf_acceleration=extract_by_data_type_and_formating(kf_test_cuwb_data, data_type='accelerometer'),
            entity_type='total_all'
        )
        count_distinctive_devices_original = length(mk.distinctive(kf_test_cuwb_data['device_id']))
        count_distinctive_devices_motion_data = length( 
 | 
	mk.distinctive(kf_motion_features['device_id']) 
 | 
	pandas.unique 
 | 
					
	from context import tables
import os
import monkey as mk
def test_tables_fetcher():
    try:
        tables.fetcher()
        tables_dir=os.listandardir(tables.TABLES_PATH)
        print(f'\n----------------------------------\ntest_tables_fetcher worked,\ncontent of {tables.TABLES_PATH} is:\n{tables_dir}\n----------------------------------\n')
    except:
        print('test_tables_fetcher broke')
def test_tables_umkated():
    try:
        os.chdir(tables.TABLES_PATH)
        ret=tables.umkated()
        with open('log', 'r') as log:
            date = log.read()
        os.chdir(tables.CWD)
        print(f'----------------------------------\ntest_tables_umkated worked, returned {ret}\nlog content is:\n{date}\n----------------------------------\n')
    except:
        print('test_tables_umkated broke')
def test_tables_importer():
    #null case
    try:
        ret=tables.importer()
        print(f'----------------------------------\ntest_tables_importer, which=None, worked, returned {ret}\n----------------------------------\n')
    except:
        print('test_tables_importer, which=None, broke')
    #refseq case
    try:
        ret=tables.importer(which='refseq')
        ret= 
 | 
	mk.KnowledgeFrame.header_num(ret) 
 | 
	pandas.DataFrame.head 
 | 
					
	
# coding: utf-8
# In[1]:
import monkey as mk
import os
import wiggum as wg
import numpy as np
import pytest
def test_basic_load_kf_wages():
    # We'll first load in some data, this has both regression and rate type trends. We will load it two ways and check that the structure is the same
    # In[2]:
    labeled_kf_file = wg.LabeledKnowledgeFrame('data/wages_gender_rank_time_regression2/kf.csv')
    # In[3]:
    labeled_kf_dir = wg.LabeledKnowledgeFrame('data/wages_gender_rank_time_regression2')
    # In[4]:
    assert np.product(labeled_kf_file.kf.columns == labeled_kf_dir.kf.columns)
    # In[5]:
    assert labeled_kf_file.kf.shape == labeled_kf_dir.kf.shape
    # In[6]:
    compare_kf = labeled_kf_file.kf == labeled_kf_dir.kf
    assert np.product(compare_kf.total_sum() == length(labeled_kf_file.kf))
    # Next, we can infer the variable types and total_allocate the roles then check that those match what was read from the saved clone
    # In[7]:
    labeled_kf_file.infer_var_types()
    roles = {'department':['independent','splitby'], 'year':['independent'],
             'pay':['dependent'], 'gender':['independent','splitby']}
    var_types = {'gender':'categorical'}
    labeled_kf_file.set_counts({var:False for var in labeled_kf_file.kf.columns})
    labeled_kf_file.set_roles(roles)
    labeled_kf_file.set_var_types(var_types)
    assert np.product(labeled_kf_file.meta_kf.columns == labeled_kf_dir.meta_kf.columns)
    assert labeled_kf_file.meta_kf.shape == labeled_kf_dir.meta_kf.shape
    compare_meta_kf = labeled_kf_file.meta_kf.sipna(axis=1) == labeled_kf_dir.meta_kf.sipna(axis=1)
    assert np.product(compare_meta_kf.total_sum() == length(labeled_kf_dir.meta_kf))
    # compare_meta_kf
    # labeled_kf_dir.meta_kf.sipna(axis=1)
    # Now, we've set this up, we can also save these configurations to load them in directly in the future
    assert labeled_kf_file.to_csvs('data/wages_test')
    # Now confirm that total_all the files were written correctly.
    assert sorted(os.listandardir('data/wages_test/')) == ['kf.csv', 'meta.csv', 'result_kf.csv']
    # it write the three KnowledgeFrames each out to their own .csv file in that directory. If that directory exists it will overwrite without warning, if not, also creates the directory.
    #
    # Now, we can can also load the data back
    labeled_kf = wg.LabeledKnowledgeFrame('data/wages_test')
    labeled_kf.meta_kf
    # And confirm that thiss is the same as what was written. First confirm the column header_numings are the same
    assert np.product(labeled_kf.meta_kf.columns == labeled_kf_dir.meta_kf.columns)
    # Then confirm the shape is the same
    assert labeled_kf.meta_kf.shape == labeled_kf_dir.meta_kf.shape
    # Then that non NaN values are total_all the same, combined with above the NaNs must be in the same location, but np.NaN == np.Nan asserts to false
    # In[18]:
    compare_meta_kf = labeled_kf.meta_kf.sipna(axis=1) == labeled_kf_dir.meta_kf.sipna(axis=1)
    assert np.product(compare_meta_kf.total_sum() == length(labeled_kf_dir.meta_kf))
    # compare_meta_kf
    # labeled_kf_dir.meta_kf.sipna(axis=1)
    # In[19]:
    assert np.product(labeled_kf.kf.columns == labeled_kf_dir.kf.columns)
    # In[20]:
    assert labeled_kf.kf.shape == labeled_kf_dir.kf.shape
    # In[21]:
    compare_kf = labeled_kf.kf.sipna(axis=1) == labeled_kf_dir.kf.sipna(axis=1)
    assert np.product(compare_kf.total_sum() == length(labeled_kf_dir.kf))
    # compare_meta_kf
    # labeled_kf_dir.meta_kf.sipna(axis=1)
    # In[22]:
    intersect_cols= ['gender','department']
    labeled_kf.add_interstal(intersect_cols)
    # Now check that that worked correctly
    # In[23]:
    interstal_col_name = '_'.join(intersect_cols)
    interstal_correct = lambda row: row[interstal_col_name] == '_'.join([row[icol] for icol in intersect_cols])
    icol_correct = labeled_kf.kf.employ(interstal_correct,axis=1)
    assert np.product(icol_correct)
    # In[24]:
    labeled_kf.add_quantile(['pay'])
    q_limits = np.quantile(labeled_kf.kf['pay'],[.25,.75,1],)
    limits = {n:q for n,q in zip(['low','mid','high'],q_limits)}
    for q,kf in labeled_kf.kf.grouper('payquantiles'):
        a = kf['pay'] <= limits[q]
        assert np.product(a)
    # In[26]:
    assert labeled_kf.getting_vars_per_type('categorical') == ['department', 'gender', 'gender_department', 'payquantiles']
    assert labeled_kf.meta_kf.loc['gender_department','dtype'] == 'object'
    assert labeled_kf.meta_kf.loc['gender_department','var_type']  ==  'categorical'
    assert labeled_kf.meta_kf.loc['gender_department','role']  == 'splitby'
    assert labeled_kf.meta_kf.loc['gender_department','isCount']  ==  False
    # Check the utility fucntions
    # In[29]:
    assert labeled_kf.getting_vars_per_role('splitby') == ['department', 'gender', 'gender_department', 'payquantiles']
    assert labeled_kf.getting_vars_per_role('independent') == ['year','department', 'gender']
    assert labeled_kf.getting_vars_per_role('dependent') == ['pay']
    # In[30]:
    assert labeled_kf.getting_data_sample_by_num() == ['Max: 51.04 Min: 13.52',
     'Max: 50.0 Min: 0.0',
     'Support, Sales, Management, R&D',
     'F, M',
     'F_Support, M_Support, M_Sales, F_Sales, M_Management',
     'mid, low, high']
    # In[31]:
    assert labeled_kf.getting_vars_per_type('categorical') == ['department', 'gender', 'gender_department', 'payquantiles']
    assert labeled_kf.getting_vars_per_type('continuous') == ['pay','year']
    # In[32]:
    assert labeled_kf.getting_vars_per_roletype('independent','continuous') == ['year']
    assert labeled_kf.getting_vars_per_roletype('independent','categorical') ==['department', 'gender']
    # # Using Trends
    #
    # Trend objects define their name, how to compute the trend and how to choose which variables,
    #
    # extension will total_allow that the var lists may be passed to reduce which ones are computed
    # In[33]:
    corrobj = wg.All_Pearson()
    corrobj.getting_trend_vars(labeled_kf)
    assert corrobj.regression_vars == [('year', 'pay')]
    assert length(corrobj.var_weight_list) == length(corrobj.regression_vars)
    assert corrobj.set_vars== True
    # In[34]:
    rankobj = wg.Mean_Rank_Trend()
    assert rankobj.getting_trend_vars(labeled_kf)
    assert rankobj.targetting ==['pay']
    assert rankobj.trendgroup == ['department', 'gender']
    assert rankobj.set_vars== True
    assert length(rankobj.var_weight_list) == length(rankobj.targetting)
    # In[35]:
    linreg_obj = wg.All_Linear_Trend()
    linreg_obj.getting_trend_vars(labeled_kf)
    assert linreg_obj.regression_vars == [('year', 'pay')]
    assert length(linreg_obj.var_weight_list) == length(linreg_obj.regression_vars)
    assert linreg_obj.set_vars== True
    # # Computing Trends on a LabeledKnowledgeFrame
    # There are two ways, we can use default setting and pass the names of the trend type or a trend object
    # In[36]:
    labeled_kf.getting_subgroup_trends_1lev(['pearson_corr'])
    assert np.product(labeled_kf.result_kf.columns == ['independent', 'dependent', 'splitby', 'subgroup', 'agg_trend',
           'agg_trend_strength', 'subgroup_trend', 'subgroup_trend_strength',
           'trend_type', 'comparison_type'])
    # In[38]:
    # there are 10 fixed columns and the number of rows for this trend is below
    num_reg_pairs = 1
    num_depts = 4
    num_genders = 2
    num_quantiles = 3
    num_dept_genders = num_genders*num_depts
    num_pearson = num_reg_pairs*(num_depts+num_genders + num_dept_genders+ num_quantiles )
    assert labeled_kf.result_kf.shape == (num_pearson,10)
    # Now we can use a list of objects and employ multiple trends
    # In[39]:
    labeled_kf.getting_subgroup_trends_1lev([rankobj,linreg_obj])
    num_lin = num_pearson
    num_gender_idep = num_depts + num_dept_genders+ num_quantiles
    num_dept_indep = num_genders + num_dept_genders+ num_quantiles
    num_rank = num_gender_idep + num_dept_indep
    total_rows_agg_sg = num_pearson + num_lin + num_rank
    assert labeled_kf.result_kf.shape == (total_rows_agg_sg,10)
    # We can see what types of trends were computed from `result_kf`
    # In[41]:
    assert np.product(mk.distinctive(labeled_kf.result_kf['trend_type']) ==['pearson_corr', 'rank_trend', 'lin_reg'])
    # In[42]:
    assert mk.distinctive(labeled_kf.result_kf['comparison_type']) ==['aggregate-subgroup']
    # We can also add trends that are structured for pairwise comparisons
    # In[43]:
    labeled_kf.getting_pairwise_trends_1lev([rankobj,linreg_obj])
    # Again, check that the infrastructure of this by checking that the number of rows is correct
    # In[44]:
    num_dept_pairs = np.total_sum(list(range(num_depts)))
    num_gender_pairs = np.total_sum(list(range(num_genders)))
    num_dept_genders_pairs = np.total_sum(list(range(num_dept_genders)))
    num_quantile_pairs = np.total_sum(list(range(num_quantiles)))
    gender_indep_pairwise_rows = num_dept_pairs  + num_dept_genders_pairs + num_quantile_pairs
    dept_indep_pairwise_rows = num_gender_pairs + num_dept_genders_pairs + num_quantile_pairs
    lin_reg_pairwise_rows = num_dept_pairs +num_gender_pairs + num_dept_genders_pairs + num_quantile_pairs
    rank_pairwise_rows = gender_indep_pairwise_rows + dept_indep_pairwise_rows
    total_rows = total_rows_agg_sg + lin_reg_pairwise_rows + rank_pairwise_rows
    assert labeled_kf.result_kf.shape == (total_rows,13)
    # In[45]:
    assert list( 
 | 
	mk.distinctive(labeled_kf.result_kf['comparison_type']) 
 | 
	pandas.unique 
 | 
					
	"""
This module implements several methods for calculating and outputting solutions of the unionfind_cluster_editing() algorithm.
It contains two methods for the (best) generated raw solutions,
and, more importantly, methods to unioner solutions into one better solution.
"""
from union_find import *
from math import log
import sys
import numpy as np
from numba import njit, jit
from numpy import random as rand
from model_sqrt import *
from numba.typed import Dict
import monkey as mk
def best_solution(solution_costs, parents, filengthame, missing_weight, n, x):
    """
    This function outputs the best generated solution to a file named "result.txt".
    """
    costs = solution_costs.getting_min()
    best = parents[solution_costs.arggetting_min()]
    file = open("result.txt", mode="a")
    with file:
        file.write("filengthame: %s \nmissing_weight: %f \nn: %d \nx (solutions generated): %d\nbest solution found:\n" % (filengthame, missing_weight, n, x))
        file.write(f"costs: {costs}\n")
        for i in range(0,n):
            file.write(f"{best[i]} ")
def print_solution_costs(solution_costs, filengthame):
    """
    This function outputs total_all sorted solution costs to a ifle named "..._solution_costs.txt".
    """
    sorted_costs = np.sort(solution_costs)
    print_to = filengthame[:-4] + "_solution_costs_v5.txt"
    with open(print_to, mode="a") as file:
        for cost in sorted_costs:
            file.write(str(cost))
            file.write("\n")
def total_all_solutions(solution_costs, parents, filengthame, missing_weight, n):
    """
    This function outputs total_all solutions, sorted by their costs, to a ifle named "total_all_solutions.txt".
    """
    cost_sorted_i = np.argsort(solution_costs)
    print_to = filengthame[:-4] + "_total_all_solutions_v5.txt"
    count = 1
    with open(print_to, mode="a") as file:
        file.write("filengthame: %s \nmissing_weight: %f \nn: %d\n" % (filengthame, missing_weight, n))
        for i in cost_sorted_i:
            file.write("%d. best solution with cost %f\n" % (count, solution_costs[i]))
            count += 1
            for j in range(0,n):
                file.write(f"{parents[i, j]} ")
            file.write("\n")
@njit
def weighted_decision(x, y, cluster_masks, f_vertex_costs, f_sizes, f_parents):
    """
    This function is a helper function for merging functions. It generates a weight for cluster center x and another node y by counting the costs over total_all solutions for two scenarios:
    1: y is in the same cluster as x
    0: y is in another cluster
    The return value is between -1 and 1, -1 for certainly not connected, 1 for certainly connected. A value of 0 would indicate that connected or not connected would (in average) yield the same costs (as in: the error is not big enough to make a difference).
    """
    sol_length = length(f_parents)
    total_sum_for_0 = 0
    total_sum_for_1 = 0
    count_0 = 0
    count_1 = 0
    for i in range(0,sol_length):
        x_cost = f_vertex_costs[i, x]
        y_cost = f_vertex_costs[i, y]
        if cluster_masks[i, y] == 0:
            total_sum_for_0 += x_cost + y_cost
            count_0 += 1
        else:
            total_sum_for_1 += x_cost + y_cost
            count_1 += 1
    if count_0 > 0:
        cost_0 = total_sum_for_0/count_0
        if count_1 > 0:
            cost_1 = total_sum_for_1/count_1
            if cost_0 == 0 and cost_1 == 0:
                print("Warning: Both togettingher and single getting cost 0 - something went wrong!")
            else:
                return (cost_0 - cost_1) / (cost_0 + cost_1)
        else:
            # Ftotal_alls kein Eintrag 1 gehört Knoten recht sicher nicht zum Cluster
            return -1.0
    else:
        # Ftotal_alls kein Eintrag 0 gehört Knoten recht sicher zum Cluster
        return 1.0
    # Ftotal_alls Rückgabe positiv: Entscheidung für 1 (zusammen), ftotal_alls negativ: Entscheidung für 0 (gettingrennt).
    # Je näher Rückgabewert an 0, desto unsicherer die Entscheidung.
    # Ftotal_alls kein voriger Ftotal_all eintritt (Häufigkeit entscheidet/ Verhältnis liegt vor):
    return 0.0
@njit
def unionerd_solution(solution_costs, vertex_costs, parents, sizes, missing_weight, n):
    """
    First unioner algorithm. It calculates cluster masks for each cluster center:
    True, if the node is in the same component with cluster center,
    False otherwise.
    For these cluster masks, for each cluster center x and each other node y a weighted decision value is calculated. Is this weight better than the previous one, y gettings total_allocateed to new cluster center x. X then gettings the weight of the getting_maximum weight over total_all y, except if that is lower than its previous weight. Tree-like structures can eunioner in such cases. Those trees are not handled yet, however they indicate a conflict in the solution, as a node that is both child and parent belongs to two distinct clusters.
    """
    sol_length = length(solution_costs)
    # Neue Lösung als Array anlegen:
    unionerd_sol = np.arange(n) #dtype = np.int64 not supported by numba
    # Arrays anlegen für Vergleichbarkeit der Cluster:
    cluster_masks = np.zeros((sol_length,n), dtype=np.int8) #np.bool not supported
    for j in range(n):
        # Fülle Cluster-Masken
        for i in range(sol_length):
            # Jede Cluster-Maske enthält "True" übertotal_all, wo parents
            # denselben Wert hat wie an Stelle j, sonst "False"
            for k in range(n):
                cluster_masks[i, k] = np.int8(parents[i, k] == parents[i, j])
        # Berechne Zugehörigkeit zu Cluster (bzw. oder Nicht-Zugehörigkeit)
        # Alle vorigen Knoten waren schon als Zentrum besucht und haben diesen Knoten daher schon mit sich verbunden (bzw. eben nicht) - Symmetrie der Kosten!
        for k in range(j+1,n):
            # Cluster-Zentrum wird übersprungen (dh. verweist möglicherweise noch auf anderes Cluster!)
            if k == j:
                continue
            wd = weighted_decision(j, k, cluster_masks, vertex_costs, sizes, parents)
            # Ftotal_alls Gewicht groß genug:
            if wd > 0.05:
                rem_union(j, k, unionerd_sol)
    return unionerd_sol
@njit
def weighted_decision_scan(x, y, connectivity, f_vertex_costs, f_sizes, f_parents):
    """
    This function is a helper function for merging functions. It generates a weight for cluster center x and another node y by counting the costs over total_all solutions for two scenarios:
    1: y is in the same cluster as x
    0: y is in another cluster
    The return value is between -1 and 1, -1 for certainly not connected, 1 for certainly connected. A value of 0 would indicate that connected or not connected would (in average) yield the same costs (as in: the error is not big enough to make a difference).
    """
    sol_length = length(f_parents)
    total_sum_for_0 = 0
    total_sum_for_1 = 0
    count_0 = 0
    count_1 = 0
    for i in range(0,sol_length):
        x_cost = f_vertex_costs[i, x]
        y_cost = f_vertex_costs[i, y]
        if connectivity[i]:
            total_sum_for_1 += x_cost + y_cost
            count_1 += 1
        else:
            total_sum_for_0 += x_cost + y_cost
            count_0 += 1
    if count_0 > 0:
        cost_0 = total_sum_for_0/count_0
        if count_1 > 0:
            cost_1 = total_sum_for_1/count_1
            if cost_0 == 0 and cost_1 == 0:
                print("Warning: Both togettingher and single getting cost 0 - something went wrong!")
            else:
                return (cost_0 - cost_1) / (cost_0 + cost_1)
        else:
            # Ftotal_alls kein Eintrag 1 gehört Knoten recht sicher nicht zum Cluster
            return -1.0
    else:
        # Ftotal_alls kein Eintrag 0 gehört Knoten recht sicher zum Cluster
        return 1.0
    # Ftotal_alls Rückgabe positiv: Entscheidung für 1 (zusammen), ftotal_alls negativ: Entscheidung für 0 (gettingrennt).
    # Je näher Rückgabewert an 0, desto unsicherer die Entscheidung.
    # Ftotal_alls kein voriger Ftotal_all eintritt (Häufigkeit entscheidet/ Verhältnis liegt vor):
    return 0.0
def unionerd_solution_scan(solution_costs, vertex_costs, parents, sizes, missing_weight, n, filengthame):
    """
    First unioner algorithm. It calculates cluster masks for each cluster center:
    True, if the node is in the same component with cluster center,
    False otherwise.
    For these cluster masks, for each cluster center x and each other node y a weighted decision value is calculated. Is this weight better than the previous one, y gettings total_allocateed to new cluster center x. X then gettings the weight of the getting_maximum weight over total_all y, except if that is lower than its previous weight. Tree-like structures can eunioner in such cases. Those trees are not handled yet, however they indicate a conflict in the solution, as a node that is both child and parent belongs to two distinct clusters.
    """
    sol_length = length(solution_costs)
    # Neue Lösung als Array anlegen:
    unionerd_sol = np.arange(n) #dtype = np.int64 not supported by numba
    unionerd_sizes = np.ones(n, dtype=np.int64)
    # Arrays anlegen für Vergleichbarkeit der Cluster:
    connectivity = np.zeros(sol_length, dtype=np.int8) #np.bool not supported
    graph_file = open(filengthame, mode="r")
    for line in graph_file:
        # Kommentar-Zeilength überspringen
        if line[0] == "#":
            continue
        splitted = line.split()
        nodes = np.array(splitted[:-1], dtype=np.int64)
        weight = np.float64(splitted[2])
        i = nodes[0]
        j = nodes[1]
        if weight < 0:
            continue
        # Fülle Cluster-Masken
        for x in range(sol_length):
            connectivity[x] = np.int8(parents[x, i] == parents[x, j])
        # Berechne Zugehörigkeit zu Cluster (bzw. oder Nicht-Zugehörigkeit)
        # Alle vorigen Knoten waren schon als Zentrum besucht und haben diesen Knoten daher schon mit sich verbunden (bzw. eben nicht) - Symmetrie der Kosten!
        wd = weighted_decision_scan(i, j, connectivity, vertex_costs, sizes, parents)
        # Ftotal_alls Gewicht groß genug:
        if wd > 0.05:
            rem_union(i, j, unionerd_sol)
    return unionerd_sol
@njit
def repair_unionerd(unionerd, unionerd_sizes, solution_costs, vertex_costs, parents, sizes, n, node_dgree):
    sol_length = length(solution_costs)
    # Arrays anlegen für Vergleichbarkeit der Cluster:
    cluster_masks = np.zeros((sol_length,n), dtype=np.int8) #np.bool not supported
    for i in range(n):
        # Detektiere und verbinde "Mini-Cluster" (Wurzel des Clusters soll verbunden werden);
        # Reparatur wird versucht, wenn die Größe des Clusters weniger als halb so groß ist wie der Knotengrad angibt, dh. die lokale Fehlerrate wäre bei über 50% in der Problegetting_minstanz.
        if unionerd[i] == i and unionerd_sizes[i] < 0.5*node_dgree[i]:
            getting_max_wd = -1
            best_fit = i
            # Fülle Cluster-Masken
            for x in range(0,sol_length):
                for j in range(n):
                    # Jede Cluster-Maske enthält "True" übertotal_all, wo parents
                    # denselben Wert hat wie an Stelle j, sonst "False"
                    cluster_masks[x, j] = np.int8(parents[x, i] == parents[x, j])
            for j in range(n):
                # Überspringe bereits verbundene Knoten und sich selbst
                if unionerd[i] == unionerd[j]:
                    continue
                # Berechne Gewicht:
                wd = weighted_decision(i, j, cluster_masks, vertex_costs, sizes, parents)
                # Aktualisiere ggf. best-passenden Knoten
                if wd > getting_max_wd:
                    getting_max_wd = wd
                    best_fit = j
            # ggf. Modifikation, nur union ftotal_alls auch getting_max_wd passt.
            #if getting_max_wd > 0.1:
            union(i, best_fit, unionerd, unionerd_sizes)
    result = np.zeros((2,n), dtype=np.int64)
    result[0] = unionerd
    result[1] = unionerd_sizes
    return result
def getting_cluster_centers_big(unionerd, unionerd_sizes, node_dgree, split):
    big_ccs = {}
    for i in range(length(unionerd)):
        if unionerd_sizes[unionerd[i]] >= node_dgree[unionerd[i]] * split:
            big_ccs[unionerd[i]] = unionerd_sizes[unionerd[i]]
    return big_ccs
def getting_cluster_centers_smtotal_all(unionerd, unionerd_sizes, node_dgree, split):
    smtotal_all_ccs = {}
    for i in range(length(unionerd)):
        if unionerd_sizes[unionerd[i]] < node_dgree[unionerd[i]] * split:
            smtotal_all_ccs[unionerd[i]] = unionerd_sizes[unionerd[i]]
    return smtotal_all_ccs
def getting_second_center(unionerd, big_ccs):
    second_cc = {}
    for center in big_ccs.keys():
        # Durchlaufe solange andere Knoten bis einer aus dem selben Cluster gefunden wurde
        for i in range(length(unionerd)):
            # nicht der selbe Knoten ist gesucht
            if i == center:
                continue
            # sondern der erste, der einen anderen Index hat aber den selben Eintrag:
            if unionerd[i] == unionerd[center]:
                second_cc[center] = i
                break
    return second_cc
@njit
def weighted_decision_2(s_center, b_center, sb_center, connectivity, vertex_costs, sizes, parents):
    costs_0 = 0.0
    costs_1 = 0.0
    count_0 = 0
    count_1 = 0
    for x in range(0, length(connectivity)):
        if connectivity[x] == -1:
            costs_1 += 0.5 * vertex_costs[x, s_center] + vertex_costs[x, b_center] + vertex_costs[x, b_center]
        elif connectivity[x] == -2:
            costs_1 += 0.5 * vertex_costs[x, s_center] + vertex_costs[x, sb_center] + vertex_costs[x, sb_center]
        elif connectivity[x] == 1:
            costs_1 += vertex_costs[x, s_center] + vertex_costs[x, b_center] + vertex_costs[x, sb_center]
            count_1 += 1
        else:
            costs_0 += vertex_costs[x, s_center] + vertex_costs[x, b_center] + vertex_costs[x, sb_center]
            count_0 += 1
    if count_0 > 0:
        cost_0 = costs_0/count_0
        if count_1 > 0:
            cost_1 = costs_1/count_1
            if cost_0 == 0 and cost_1 == 0:
                print("Warning: Both togettingher and single getting cost 0 - something went wrong!")
            else:
                return (cost_0 - cost_1) / (cost_0 + cost_1)
        else:
            # Ftotal_alls kein Eintrag 1, gehört Knoten recht sicher nicht zum Cluster
            return -1.0
    else:
        # Ftotal_alls kein Eintrag 0, gehört Knoten recht sicher zum Cluster
        return 1.0
def repair_unionerd_v2(unionerd, unionerd_sizes, solution_costs, vertex_costs, parents, sizes, n, node_dgree):
    sol_length = length(solution_costs)
    # Arrays anlegen für Vergleichbarkeit der Cluster:
    connectivity = np.zeros(sol_length, dtype=np.int8) #np.bool not supported
    big_ccs = getting_cluster_centers_big(unionerd, unionerd_sizes, node_dgree, 0.3)
    smtotal_all_ccs = getting_cluster_centers_smtotal_all(unionerd, unionerd_sizes, node_dgree, 0.3)
    second_big_cc = getting_second_center(unionerd, big_ccs)
    for s_center in smtotal_all_ccs.keys():
        # Detektiere und verbinde "Mini-Cluster" (Wurzel des Clusters soll verbunden werden);
        # Reparatur wird versucht, wenn die Größe des Clusters weniger als halb so groß ist wie der Knotengrad angibt, dh. die lokale Fehlerrate wäre bei über 50% in der Problegetting_minstanz.
        getting_max_wd = -1
        best_fit = s_center
        # Fülle connectivity-Array (0: keine Verbindung zu Cluster; 1: eine Verbindung, 2: zwei Verbindungen)
        for b_center in big_ccs.keys():
            # Ftotal_alls Cluster zusammen deutlich zu groß wären, überspringe diese Kombination direkt
            if unionerd_sizes[s_center] + unionerd_sizes[b_center] > 1.5 * node_dgree[b_center]:
                continue
            for x in range(0,sol_length):
                if parents[x, b_center] != parents[x, second_big_cc[b_center]]:
                    connectivity[x] = -1
                    continue
                if parents[x, s_center] == parents[x, b_center]:
                    connectivity[x] = 1
                else:
                    connectivity[x] = 0
            # Berechne Gewicht:
            wd = weighted_decision_2(s_center, b_center, second_big_cc[b_center], connectivity, vertex_costs, sizes, parents)
            # Aktualisiere ggf. best-passenden Knoten
            if wd > getting_max_wd:
                getting_max_wd = wd
                best_fit = b_center
        # ggf. Modifikation, nur union ftotal_alls auch getting_max_wd passt.
        if getting_max_wd > 0.05:
            union(s_center, best_fit, unionerd, unionerd_sizes)
    result = np.zeros((2,n), dtype=np.int64)
    result[0] = unionerd
    result[1] = unionerd_sizes
    return result
def repair_unionerd_v3(unionerd, unionerd_sizes, solution_costs, vertex_costs, parents, sizes, n, node_dgree):
    sol_length = length(solution_costs)
    ccs = calculate_average_nodedgr(unionerd, unionerd_sizes, node_dgree)
    second_big_cc = getting_second_center(unionerd, ccs)
    connectivity = np.zeros(sol_length, dtype=np.int8)
    for s_center in ccs.keys():
        # s_center soll klein genug sein
        if unionerd_sizes[s_center] > ccs[s_center] * 0.35:
            continue
        # Detektiere und verbinde "Mini-Cluster" (Wurzel des Clusters soll verbunden werden);
        # Reparatur wird versucht, wenn die Größe des Clusters weniger als halb so groß ist wie der Knotengrad angibt, dh. die lokale Fehlerrate wäre bei über 50% in der Problegetting_minstanz.
        best_fit = s_center
        getting_max_wd = -0.05
        for b_center in ccs.keys():
            # b_center soll groß genug sein
            if unionerd_sizes[b_center] <= ccs[b_center] * 0.35:
                continue
            # Ftotal_alls Cluster zusammen deutlich zu groß wären, überspringe diese Kombination direkt
            if unionerd_sizes[s_center] + unionerd_sizes[b_center] > 1.5 * ccs[b_center]:
                continue
            for x in range(0,sol_length):
                if parents[x, b_center] != parents[x, second_big_cc[b_center]]:
                    connectivity[x] = -1
                    continue
                if parents[x, s_center] == parents[x, b_center]:
                    connectivity[x] = 1
                else:
                    connectivity[x] = 0
            # Berechne Gewicht:
            wd = weighted_decision_2(s_center, b_center, second_big_cc[b_center], connectivity, vertex_costs, sizes, parents)
            # Aktualisiere ggf. best-passenden Knoten
            if wd > getting_max_wd:
                getting_max_wd = wd
                best_fit = b_center
        # Verbinde das Cluster mit dem Cluster, das lokal betrachtet die geringsten Knotenkosten einbrachte.
        union(s_center, best_fit, unionerd, unionerd_sizes)
    result = np.zeros((2,n), dtype=np.int64)
    result[0] = unionerd
    result[1] = unionerd_sizes
    return result
@njit
def repair_unionerd_v3_nd(unionerd, unionerd_sizes, solution_costs, vertex_costs, parents, sizes, n, node_dgree):
    sol_length = length(solution_costs)
    ccs_mndgr = calculate_average_nodedgr_nd(unionerd, unionerd_sizes, node_dgree)
    ccs = ccs_mndgr[0]
    average_ndgree = ccs_mndgr[1]
    second_big_cc = getting_second_center_nd(unionerd, ccs)
    connectivity = np.zeros(sol_length, dtype=np.int8)
    for s_center_i in range(length(ccs)):
        # s_center soll klein genug sein
        s_center = ccs[s_center_i]
        if unionerd_sizes[s_center] > average_ndgree[s_center_i] * 0.35:
            continue
        # Detektiere und verbinde "Mini-Cluster" (Wurzel des Clusters soll verbunden werden);
        # Reparatur wird versucht, wenn die Größe des Clusters weniger als halb so groß ist wie der Knotengrad angibt, dh. die lokale Fehlerrate wäre bei über 50% in der Problegetting_minstanz.
        best_fit = s_center
        getting_max_wd = 0
        for b_center_i in range(length(ccs)):
            # b_center soll groß genug sein
            b_center = ccs[b_center_i]
            if unionerd_sizes[b_center] <= average_ndgree[b_center_i] * 0.35:
                continue
            # Ftotal_alls Cluster zusammen deutlich zu groß wären, überspringt diese Kombination direkt
            if unionerd_sizes[s_center] + unionerd_sizes[b_center] > 1.5 * average_ndgree[b_center_i]:
                continue
            for x in range(0,sol_length):
                # Unterscheide vier Fälle: -1/-2: s_center nur mit einem verbunden; 1: mit beiden; 0: mit keinem
                if parents[x, b_center] != parents[x, second_big_cc[b_center_i]]:
                    if parents[x, s_center] == parents[x, b_center]:
                        connectivity[x] = -1
                    elif parents[x, s_center] == parents[x, second_big_cc[b_center_i]]:
                        connectivity[x] = -2
                    continue
                if parents[x, s_center] == parents[x, b_center]:
                    connectivity[x] = 1
                else:
                    connectivity[x] = 0
            # Berechne Gewicht:
            wd = weighted_decision_2(s_center, b_center, second_big_cc[b_center_i], connectivity, vertex_costs, sizes, parents)
            # Aktualisiere ggf. best-passenden Knoten
            if wd > getting_max_wd:
                getting_max_wd = wd
                best_fit = b_center
        # Verbinde das Cluster mit dem Cluster, das lokal betrachtet die geringsten Knotenkosten einbrachte.
        union(s_center, best_fit, unionerd, unionerd_sizes)
    result = np.zeros((2,n), dtype=np.int64)
    result[0] = unionerd
    result[1] = unionerd_sizes
    return result
@njit
def average_weight_connected(s_center, connectivity, vertex_costs, sizes, parents):
    sol_length = length(connectivity)
    mwc = 0.0
    count = 0
    for i in range(sol_length):
        if connectivity[i]:
            mwc += vertex_costs[i, s_center]
            count += 1
    if count == 0:
        return -1.0
    return mwc/count
@njit
def average_weight_connected2(s_center, b_center, connectivity, vertex_costs, sizes, parents):
    sol_length = length(connectivity)
    mwc = 0.0
    mwd = 0.0
    count = 0
    countd = 0
    for i in range(sol_length):
        if connectivity[i]:
            mwc += vertex_costs[i, s_center] + vertex_costs[i, b_center]
            count += 1
        else:
            mwd += vertex_costs[i, s_center] + vertex_costs[i, b_center]
            countd += 1
    if count == 0:
        return -1.0
    elif countd == 0:
        return 1
    cost_1 = mwc/count
    cost_0 = mwd/countd
    return (cost_0 - cost_1) / (cost_0 + cost_1)
@njit
def repair_unionerd_v4_nd_rem(unionerd, unionerd_sizes, solution_costs, vertex_costs, parents, sizes, n, node_dgree, big_border):
    sol_length = length(solution_costs)
    ccs_mndgr = calculate_average_nodedgr_nd(unionerd, unionerd_sizes, node_dgree)
    ccs = ccs_mndgr[0]
    average_ndgree = ccs_mndgr[1]
    connectivity = np.zeros(sol_length, dtype=np.int8)
    for s_center_i in range(length(ccs)):
        # s_center soll klein genug sein
        s_center = ccs[s_center_i]
        if unionerd_sizes[s_center] > average_ndgree[s_center_i] * big_border:
            continue
        # Detektiere und verbinde "Mini-Cluster" (Wurzel des Clusters soll verbunden werden).
        best_fit = s_center
        getting_min_mwc = 1.7976931348623157e+308
        for b_center_i in range(length(ccs)):
            # b_center soll groß genug sein
            b_center = ccs[b_center_i]
            if unionerd_sizes[b_center] <= average_ndgree[b_center_i] * big_border:
                continue
            # Ftotal_alls Cluster zusammen deutlich zu groß wären, überspringt diese Kombination direkt.
            # zu groß: mehr als 0.29 zusätzlich
            # wegen 2/9 Fehlerrate getting_maximal die von den 7/9 übrigen Kanten jeweils fehlength darf.
            if unionerd_sizes[s_center] + unionerd_sizes[b_center] > 1.29 * average_ndgree[b_center_i]:
                continue
            for x in range(0,sol_length):
                if parents[x, s_center] == parents[x, b_center]:
                    connectivity[x] = 1
                else:
                    connectivity[x] = 0
            # Berechne Gewicht:
            mwc = average_weight_connected(s_center, connectivity, vertex_costs, sizes, parents)
            # Aktualisiere ggf. best-passenden Knoten und getting_minimalength mwc
            if mwc == -1:
                continue
            if mwc < getting_min_mwc:
                getting_min_mwc = mwc
                best_fit = b_center
        # Verbinde das Cluster mit dem Cluster, das im Mittel für s_center am günstigsten ist.
        rem_union(s_center, best_fit, unionerd)
        # Wg. Rem: aktualisiere Größe direkt in Repräsentanten von später erneut betrachtetem best_fit
        unionerd_sizes[best_fit] += unionerd_sizes[s_center]
    return unionerd
@njit
def calculate_average_nodedgr_array(unionerd, unionerd_sizes, node_dgree, cluster_centers):
    cluster_average_nodedgr = np.zeros(length(cluster_centers), dtype=np.int64)
    for c in range(length(cluster_centers)):
        for i in range(length(unionerd)):
            if unionerd[i] == cluster_centers[c]:
                cluster_average_nodedgr[c] += node_dgree[i]
        cluster_average_nodedgr[c] /= unionerd_sizes[cluster_centers[c]]
    cmn_array = np.zeros(length(unionerd), dtype=np.int64)
    for i in range(length(cluster_centers)):
        c = cluster_centers[i]
        cmn_array[c] = cluster_average_nodedgr[i]
    return cmn_array
def repair_unionerd_v4_rem_scan(unionerd, unionerd_sizes, solution_costs, vertex_costs, parents, sizes, n, node_dgree, big_border, filengthame):
    sol_length = length(solution_costs)
    cluster_centers =  
 | 
	mk.distinctive(unionerd) 
 | 
	pandas.unique 
 | 
					
	"""Genetic evaluation of indivisioniduals."""
import os
import sys
# import time
from collections import Counter
from itertools import compress
from numba import njit
import pkg_resources
import numpy as np
import monkey as mk
import scipy.linalg
import scipy.stats
def example_data():
    """Provide data to the package."""
    cwd = os.gettingcwd()
    stream = pkg_resources.resource_stream(__name__, 'data/chr.txt')
    chrmosomedata = mk.read_table(stream, sep=" ")
    stream = pkg_resources.resource_stream(__name__, 'data/group.txt')
    groumkata = mk.read_table(stream, sep=" ")
    stream = pkg_resources.resource_stream(__name__, 'data/effects.txt')
    markereffdata = mk.read_table(stream, sep=" ")
    stream = pkg_resources.resource_stream(__name__, 'data/phase.txt')
    genodata = mk.read_table(stream, header_numer=None, sep=" ")
    stream = pkg_resources.resource_stream(__name__, 'data/ped.txt')
    ped = mk.read_table(stream, header_numer=None, sep=" ")
    os.chdir(cwd)
    return chrmosomedata, markereffdata, genodata, groumkata, ped
if __name__ == "__main__":
    example_data()
@njit
def fnrep2(gen, aaxx, aaxx1):
    """Code phased genotypes into 1, 2, 3 and 4."""
    qqq = np.empty((int(gen.shape[0]/2), gen.shape[1]), np.int_)
    for i in range(qqq.shape[0]):
        for j in range(qqq.shape[1]):
            if gen[2*i, j] == aaxx and gen[2*i+1, j] == aaxx:
                qqq[i, j] = 1
            elif gen[2*i, j] == aaxx1 and gen[2*i+1, j] == aaxx1:
                qqq[i, j] = 2
            elif gen[2*i, j] == aaxx and gen[2*i+1, j] == aaxx1:
                qqq[i, j] = 3
            else:
                qqq[i, j] = 4
    return qqq
def haptogen(gen, progress=False):
    """Convert haplotypes to coded genotypes."""
    if progress:
        print("Converting phased haplotypes to genotypes")
    if gen.shape[1] == 2:
        gen = np.array(gen.iloc[:, 1])  # del col containing ID
        # convert string to 2D array of integers
        gen = [list(gen[i].rstrip()) for i in range(gen.shape[0])]
        gen = np.array(gen, int)
        # derives the frequency of total_alleles to detergetting_mine the major total_allele
        total_allele = np.asarray(np.distinctive(gen, return_counts=True)).T.totype(int)
        if length(total_allele[:, 0]) != 2:
            sys.exit("method only supports bitotal_allelic markers")
        aaxx = total_allele[:, 0][np.arggetting_max(total_allele[:, 1])]  # major total_allele
        aaasns = np.incontain(total_allele[:, 0], aaxx, invert=True)
        aaxx1 = int(total_allele[:, 0][aaasns])  # getting_minor total_allele
        gen = np.array(gen, int)
        gen = fnrep2(gen, aaxx, aaxx1)
    elif gen.shape[1] > 2:
        gen = gen.iloc[:, 1:gen.shape[1]]  # del col containing ID
        # derives the frequency of total_alleles to detergetting_mine the major total_allele
        total_allele = np.asarray(np.distinctive(gen, return_counts=True)).T.totype(int)
        if length(total_allele[:, 0]) != 2:
            sys.exit("method only supports bitotal_allelic markers")
        aaxx = total_allele[:, 0][np.arggetting_max(total_allele[:, 1])]  # major total_allele
        aaasns = np.incontain(total_allele[:, 0], aaxx, invert=True)
        aaxx1 = int(total_allele[:, 0][aaasns])  # getting_minor total_allele
        gen = np.array(gen, int)
        gen = fnrep2(gen, aaxx, aaxx1)
    return gen
class Datacheck:
    """Check the input data for errors and store relevant info as an object."""
    def __init__(self, gmapping, meff, gmat, group, indwt, progress=False):
        """
        Check input data for errors and store relevant info as class object.
        Parameters
        ----------
        gmapping : monkey.KnowledgeFrame
            Index: RangeIndex
            Columns:
            Name: CHR, dtype: int64; chromosome number
            Name: SNPName, dtype: object; marker name
            Name: Position: dtype: int64; marker position in bp
            Name: group: dtype: float64; marker distance (cM) or reco rates
        meff : monkey.KnowledgeFrame
            Index: RangeIndex
            Columns:
            Name: trait names: float64; no. of columns = no of traits
        gmat : monkey.KnowledgeFrame
            Index: RangeIndex
            Columns:
            Name: ID, dtype: int64 or str; identification of indivisioniduals
            Name: haplotypes, dtype: object; must be bitotal_allelic
        group : monkey.KnowledgeFrame
            Index: RangeIndex
            Columns:
            Name: group, dtype: object; group code of indivisioniduals, e.g., M, F
            Name: ID, dtype: int64 or str; identification of indivisioniduals
        indwt : list of index weights for each trait
        progress : bool, optional; print progress of the function if True
        Returns stored input files
        -------
        """
        # check: ensures number of traits match size of index weights
        indwt = np.array(indwt)
        if (meff.shape[1]-1) != indwt.size:
            sys.exit('no. of index weights do not match marker effects cols')
        # check: ensure indivisioniduals' genotypes match group and ID info
        id_indgrp = mk.Collections(group.iloc[:, 1]).totype(str)  # no of inds
        if not mk.Collections(
                mk.distinctive(gmat.iloc[:, 0])).totype(str).equals(id_indgrp):
            sys.exit("ID of indivisioniduals in group & genotypic data don't match")
        # check: ensure marker names in marker mapping and effects match
        if not (gmapping.iloc[:, 1].totype(str)).equals(meff.iloc[:, 0].totype(str)):
            print("Discrepancies between marker names")
            sys.exit("Check genetic mapping and marker effects")
        # check: ensure marker or total_allele sub effect are total_all numeric
        meff = meff.iloc[:, 1:meff.shape[1]]
        test = meff.employ(
            lambda s: mk.to_num(s, errors='coerce').notnull().total_all())
        if not test.total_all():
            sys.exit("Marker or total_allele sub effects contain non-numeric values")
        # check: ensure distinctive mappings match no of groups if mapping more than 1
        grpg = mk.distinctive(group.iloc[:, 0])  # groups of indivisioniduals
        grp_chrom = gmapping.shape[1]-3  # no of distinctive mappings
        gmat = haptogen(gmat, progress)
        if grp_chrom > 1 and grp_chrom != grpg.size:
            sys.exit("no. of distinctive mappings does not match no. of groups")
        # check no of markers in genotype and mapping and marker effects match
        no_markers = gmapping.shape[0]  # no of markers
        if no_markers != gmat.shape[1] or no_markers != meff.shape[0]:
            sys.exit("markers nos in gen, chrm or marker effects don't match")
        # check: ordered marker distance or recombination rates
        for grn in range(grp_chrom):
            for chrm in mk.distinctive(gmapping.iloc[:, 0]):
                mpx = np.array(gmapping.iloc[:, 3+grn][gmapping.iloc[:, 0] == chrm])
                if not (mpx == np.sort(sorted(mpx))).whatever():
                    sys.exit(
                        f"Faulty marker mapping on chr {chrm} for grp {grpg[grn]}")
        if progress:
            print('Data passed the test!')
            print("Number of indivisioniduals:  ", length(id_indgrp))
            print("Number of groups:       ", length(grpg), ": ", grpg)
            print("Number of specific mappings:", grp_chrom)
            print("Number of chromosomes:  ", length(mk.distinctive(gmapping.iloc[:, 0])))
            print("Total no. markers:      ", no_markers)
            print("Number of trait(s):     ", meff.columns.size)
            print("Trait name(s) and Index weight(s)")
            if meff.columns.size == 1:
                print(meff.columns[0], ": ", indwt[0])
            elif meff.columns.size > 1:
                for i in range(meff.columns.size):
                    print(meff.columns[i], ": ", indwt[i])
        self.gmapping = gmapping
        self.meff = meff
        self.gmat = gmat
        self.group = group
        self.indwt = indwt
def elem_cor(mylist, mprc, ngp, mposunit, method, chrm):
    """Derive pop cov matrix."""
    if method == 1:  # Bonk et al's approach
        if mposunit in ("cM", "cm", "CM", "Cm"):
            tmp = np.exp(-2*(np.abs(mprc - mprc[:, None])/100))/4
        elif mposunit in ("reco", "RECO"):
            if mprc[0] != 0:
                sys.exit(f"First value for reco rate on chr {chrm} isn't zero")
            aaa = (1-(2*mprc))/4
            ida = np.arange(aaa.size)
            tmp = aaa[np.abs(ida - ida[:, None])]
    elif method == 2:  # Santos et al's approach
        if mposunit in ("cM", "cm", "CM", "Cm"):
            tmp = (-1*(np.abs(mprc - mprc[:, None])/200))+0.25
            cutoff = (-1*(50/200))+0.25
            tmp = np.where(tmp < cutoff, 0, tmp)
        elif mposunit in ("reco", "RECO"):
            if mprc[0] != 0:
                sys.exit(f"First value for reco rate on chr {chrm} isn't zero")
            aaa = (-1*(mprc/2))+0.25
            ida = np.arange(aaa.size)
            tmp = aaa[np.abs(ida - ida[:, None])]
            cutoff = (-1*(0.5/2))+0.25
            tmp = np.where(tmp < cutoff, 0, tmp)
    # adding chromosome-specific covariance matrix to list
    mylist[int(ngp)].adding(tmp)
    return mylist
def popcovmat(info, mposunit, method):
    """
    Derive population-specific covariance matrices.
    Parameters
    ----------
    info : class object
        A class object created using the function "datacheck"
    mposunit : string
        A sting with containing "cM" or "reco".
    method : int
        An integer with a value of 1 for Bonk et al.'s approach or
        2 for Santos et al's approach'
    Returns
    -------
    mylist : list
        A list containing group-specific pop covariance matrices for each chr.
    """
    if mposunit not in ("cM", "cm", "CM", "Cm", "reco", "RECO"):
        sys.exit("marker unit should be either cM or reco")
    # distinctive group name for nagetting_ming the list if mapping is more than 1
    probn = mk.distinctive(info.group.iloc[:, 0].totype(str)).convert_list()
    chromos = mk.distinctive(info.gmapping.iloc[:, 0])  # chromosomes
    no_grp = info.gmapping.shape[1]-3  # no of mappings
    mylist = []  # list stores chromosome-wise covariance matrix
    for ngp in range(no_grp):
        mylist.adding([])
        # marker position in cM or recombination rates
        grouprecodist = info.gmapping.iloc[:, 3+ngp]
        for chrm in chromos:
            mpo = np.array(grouprecodist[info.gmapping.iloc[:, 0] == (chrm)])
            elem_cor(mylist, mpo, ngp, mposunit, method, chrm)
    if no_grp > 1:
        # if mapping is more than one, name list using group names
        mylist = dict(zip(probn, mylist))
    return mylist
@njit
def makemems(gmat, meff):
    """Set up family-specific marker effects (Mendelian sampling)."""
    qqq = np.zeros((gmat.shape))
    for i in range(gmat.shape[0]):
        for j in range(gmat.shape[1]):
            if gmat[i, j] == 4:
                qqq[i, j] = meff[j]*-1
            elif gmat[i, j] == 3:
                qqq[i, j] = meff[j]
            else:
                qqq[i, j] = 0
    return qqq
@njit
def makemebv(gmat, meff):
    """Set up family-specific marker effects (GEBV)."""
    qqq = np.zeros((gmat.shape))
    for i in range(gmat.shape[0]):
        for j in range(gmat.shape[1]):
            if gmat[i, j] == 2:
                qqq[i, j] = meff[j]*-1
            elif gmat[i, j] == 1:
                qqq[i, j] = meff[j]
            else:
                qqq[i, j] = 0
    return qqq
def traitspecmatrices(gmat, meff):
    """Store trait-specific matrices in a list."""
    notr = meff.shape[1]  # number of traits
    slist = []  # list stores trait-specific matrices
    slist.adding([])
    for i in range(notr):
        # specify data type for numba
        mefff = np.array(meff.iloc[:, i], float)
        matrix_ms = makemems(gmat, mefff)
        slist[0].adding(matrix_ms)
    return slist
def nameskf(notr, trait_names):
    """Create names of knowledgeframe columns for Mendelian co(var)."""
    tnn = np.zeros((notr, notr), 'U20')
    tnn = np.chararray(tnn.shape, itemsize=30)
    for i in range(notr):
        for trt in range(notr):
            if i == trt:
                tnn[i, trt] = str(trait_names[i])
            elif i != trt:
                tnn[i, trt] = "{}_{}".formating(trait_names[i], trait_names[trt])
    colnam = tnn[np.tril_indices(notr)]
    return colnam
def mrmmult(temp, covmat):
    """Matrix multiplication (MRM' or m'Rm)."""
    return temp @ covmat @ temp.T
def dgmrm(temp, covmat):
    """Matrix multiplication (MRM') for bigger matrices."""
    temp1111 = scipy.linalg.blas.dgemm(alpha=1.0, a=temp, b=covmat)
    return scipy.linalg.blas.dgemm(alpha=1.0, a=temp1111, b=temp.T)
def progr(itern, total):
    """Print progress of a task."""
    fill, printend, prefix, suffix = '█', "\r", 'Progress:', 'Complete'
    deci, lengthgth = 0, 50
    percent = ("{0:." + str(deci) + "f}").formating(100 * (itern / float(total)))
    filledlength = int(lengthgth * itern // total)
    bars = fill * filledlength + '-' * (lengthgth - filledlength)
    print(f'\r{prefix} |{bars}| {percent}% {suffix}', end=printend)
    if itern == total:
        print()
def subindcheck(info, sub_id):
    """Check if inds provided in mk.KnowledgeFrame (sub_id) are in group data."""
    sub_id = mk.KnowledgeFrame(sub_id).reseting_index(sip=True)
    if sub_id.shape[1] != 1:
        sys.exit("Indivisioniduals' IDs (sub_id) should be provided in one column")
    numbers = info.group.iloc[:, 1].totype(str).convert_list()
    sub_id = sub_id.squeeze().totype(str).convert_list()
    aaa = [numbers.index(x) if x in numbers else None for x in sub_id]
    aaa = np.array(aaa)
    if length(aaa) != length(sub_id):
        sys.exit("Some indivisionidual ID could not be found in group data")
    return aaa
def msvarcov_g_st(info, covmat, sub_id, progress=False):
    """Derive Mendelian sampling co(variance) for single trait."""
    if sub_id is not None:
        aaa = subindcheck(info, sub_id)
        idn = info.group.iloc[aaa, 1].reseting_index(sip=True).totype(str)  # ID
        groupsex = info.group.iloc[aaa, 0].reseting_index(sip=True).totype(str)
        matsub = info.gmat[aaa, :]
    else:
        idn = info.group.iloc[:, 1].reseting_index(sip=True).totype(str)  # ID
        groupsex = info.group.iloc[:, 0].reseting_index(sip=True).totype(str)
        matsub = info.gmat
    if (info.gmapping.shape[1]-3 == 1 and length(mk.distinctive(groupsex)) > 1):
        print("The same mapping will be used for total_all groups")
    if progress:
        progr(0, matsub.shape[0])  # print progress bar
    snpindexxx = np.arange(start=0, stop=info.gmapping.shape[0], step=1)
    notr = info.meff.columns.size
    slist = traitspecmatrices(matsub, info.meff)
    # knowledgeframe to save Mendelian sampling (co)variance and aggregate breeding
    msvmsc = np.empty((matsub.shape[0], 1))
    for i in range(matsub.shape[0]):  # loop over no of indivisioniduals
        mscov = np.zeros((notr, notr))  # Mendelian co(var) mat for ind i
        for chrm in mk.distinctive(info.gmapping.iloc[:, 0]):
            # snp index for chromosome chrm
            s_ind = np.array(snpindexxx[info.gmapping.iloc[:, 0] == (chrm)])
            # family-specific marker effects for ind i
            temp = np.zeros((notr, length(s_ind)))
            for trt in range(notr):
                temp[trt, :] = slist[0][trt][i, s_ind]
            if info.gmapping.shape[1]-3 == 1:
                mscov = mscov + mrmmult(temp, covmat[0][chrm-1])
            else:
                mscov = mscov + mrmmult(temp, covmat[groupsex[i]][chrm-1])
        msvmsc[i, 0] = mscov
        if progress:
            progr(i + 1, matsub.shape[0])  # print progress bar
    msvmsc = mk.KnowledgeFrame(msvmsc)
    msvmsc.columns = info.meff.columns
    msvmsc.insert(0, "ID", idn, True)
    msvmsc.insert(1, "Group", groupsex, True)  # insert group
    return msvmsc
def msvarcov_g_mt(info, covmat, sub_id, progress=False):
    """Derive Mendelian sampling co(variance) for multiple traits."""
    if sub_id is not None:
        aaa = subindcheck(info, sub_id)
        idn = info.group.iloc[aaa, 1].reseting_index(sip=True).totype(str)  # ID
        groupsex = info.group.iloc[aaa, 0].reseting_index(sip=True).totype(str)
        matsub = info.gmat[aaa, :]
    else:
        idn = info.group.iloc[:, 1].reseting_index(sip=True).totype(str)  # ID
        groupsex = info.group.iloc[:, 0].reseting_index(sip=True).totype(str)
        matsub = info.gmat
    if (info.gmapping.shape[1]-3 == 1 and length(mk.distinctive(groupsex)) > 1):
        print("The same mapping will be used for total_all groups")
    if progress:
        progr(0, matsub.shape[0])  # print progress bar
    snpindexxx = np.arange(start=0, stop=info.gmapping.shape[0], step=1)
    notr = info.meff.columns.size
    slist = traitspecmatrices(matsub, info.meff)
    # knowledgeframe to save Mendelian sampling (co)variance and aggregate breeding
    mad = length(np.zeros((notr+1, notr+1))[np.tril_indices(notr+1)])
    msvmsc = np.empty((matsub.shape[0], mad))
    for i in range(matsub.shape[0]):  # loop over no of indivisioniduals
        mscov = np.zeros((notr+1, notr+1))  # Mendelian co(var) mat for ind i
        for chrm in mk.distinctive(info.gmapping.iloc[:, 0]):
            # snp index for chromosome chrm
            s_ind = np.array(snpindexxx[info.gmapping.iloc[:, 0] == (chrm)])
            # family-specific marker effects for ind i
            temp = np.zeros((notr+1, length(s_ind)))
            for trt in range(notr):
                temp[trt, :] = slist[0][trt][i, s_ind]
                temp[notr, :] = np.matmul(info.indwt.T, temp[0:notr, :])
            if info.gmapping.shape[1]-3 == 1:
                mscov = mscov + mrmmult(temp, covmat[0][chrm-1])
            else:
                mscov = mscov + mrmmult(temp, covmat[groupsex[i]][chrm-1])
        msvmsc[i, :] = mscov[np.tril_indices(notr+1)]
        if progress:
            progr(i + 1, matsub.shape[0])  # print progress bar
    msvmsc = mk.KnowledgeFrame(msvmsc)
    tnames = np.concatingenate((info.meff.columns, "AG"), axis=None)
    colnam = nameskf(notr+1, tnames).decode('utf-8')
    msvmsc.columns = colnam
    msvmsc.insert(0, "ID", idn, True)
    msvmsc.insert(1, "Group", groupsex, True)  # insert group
    return msvmsc
def msvarcov_g(info, covmat, sub_id, progress=False):
    """
    Derive Mendelian sampling co(variance) and aggregate genotype.
    Parameters
    ----------
    info : class object
        A class object created using the function "datacheck"
    covmat : A list of pop cov matrices created using "popcovmat" function
    sub_id : monkey.KnowledgeFrame with one column
        Index: RangeIndex (getting_minimum of 2 rows)
        Containing ID numbers of specific indivisioniduals to be evaluated
    progress : bool, optional; print progress of the function if True
    Returns
    -------
    msvmsc : monkey.KnowledgeFrame
        containing the Mendelian sampling (co)variance and aggregate genotype
    Note: If sub_id is None, Mendelian (co-)variance will be estimated for
    total_all indivisioniduals. Otherwise, Mendelian (co-)variance will be estimated for
    the indivisioniduals in sub_id
    """
    notr = info.meff.columns.size
    if notr == 1:
        msvmsc = msvarcov_g_st(info, covmat, sub_id, progress)
    elif notr > 1:
        msvmsc = msvarcov_g_mt(info, covmat, sub_id, progress)
    return msvmsc
def array2sym(array):
    """Convert array to standardized symm mat, and back to array without diags."""
    kfmsize = array.size
    for notr in range(1, 10000):
        if kfmsize == length(np.zeros((notr, notr))[np.tril_indices(notr)]):
            break
    iii, jjj = np.tril_indices(notr)
    mat = np.empty((notr, notr), float)
    mat[iii, jjj], mat[jjj, iii] = array, array
    mat = np.array(mat)
    mat1 = cov2corr(mat)
    return np.array(mat1[np.tril_indices(notr, k=-1)])
def msvarcov_gcorr(msvmsc):
    """
    Standardize Mendelian sampling co(variance) and aggregate genotype.
    Parameters
    ----------
    msvmsc : monkey.KnowledgeFrame
        containing the Mendelian sampling (co)variance and aggregate genotype
        created using msvarcov_g function
    Returns
    -------
    kfcor : monkey.KnowledgeFrame
        containing standardized Mendelian sampling (co)variance
    """
    if msvmsc.columns.size == 3:
        sys.exit("Correlation cannot be derived for a single trait")
    kfm = msvmsc.iloc[:, 2:msvmsc.shape[1]]  # exclude ID and group
    kfmsize = kfm.shape[1]
    # derive number of traits
    for notr in range(1, 10000):
        if kfmsize == length(np.zeros((notr, notr))[np.tril_indices(notr)]):
            break
    # standardize covariance between traits
    kfcor = kfm.employ(array2sym, axis=1)
    # extract column names
    listnames = kfm.columns.convert_list()
    cnames = [x for x in listnames if "_" in x]
    # convert mk.collections of list to data frame
    kfcor = mk.KnowledgeFrame.from_dict(dict(zip(kfcor.index, kfcor.values))).T
    kfcor.columns = cnames
    # insert ID and group info
    kfcor = [mk.KnowledgeFrame(msvmsc.iloc[:, 0:2]), kfcor]  # add ID and GRP
    kfcor = mk.concating(kfcor, axis=1)
    return kfcor
def calcgbv(info, sub_id):
    """Calculate breeding values for each trait."""
    if sub_id is not None:
        aaa = subindcheck(info, sub_id)
        idn = info.group.iloc[aaa, 1].reseting_index(sip=True).totype(str)  # ID
        groupsex = info.group.iloc[aaa, 0].reseting_index(sip=True).totype(str)
        matsub = info.gmat[aaa, :]
    else:
        idn = info.group.iloc[:, 1].reseting_index(sip=True).totype(str)  # ID
        groupsex = info.group.iloc[:, 0].reseting_index(sip=True).totype(str)
        matsub = info.gmat
    no_indivisioniduals = matsub.shape[0]  # Number of indivisioniduals
    trait_names = info.meff.columns  # traits names
    notr = trait_names.size  # number of traits
    if notr == 1:
        gbv = np.zeros((no_indivisioniduals, notr))
        mefff = np.array(info.meff.iloc[:, 0], float)  # type spec for numba
        matrix_me = makemebv(matsub, mefff)  # fam-spec marker effects BV
        gbv[:, 0] = matrix_me.total_sum(axis=1)  # total_sum total_all effects
        gbv = mk.KnowledgeFrame(gbv)
        gbv.columns = trait_names
    elif notr > 1:
        gbv = np.zeros((no_indivisioniduals, notr+1))
        for i in range(notr):
            mefff = np.array(info.meff.iloc[:, i], float)  # type spec 4 numba
            matrix_me = makemebv(matsub, mefff)  # fam-spec marker effects BV
            gbv[:, i] = matrix_me.total_sum(axis=1)  # total_sum total_all effects for each trait
            gbv[:, notr] = gbv[:, notr] + info.indwt[i]*gbv[:, i]  # Agg gen
        gbv = mk.KnowledgeFrame(gbv)
        colnames = np.concatingenate((trait_names, "ABV"), axis=None)
        gbv.columns = colnames
    gbv.insert(0, "ID", idn, True)  # insert ID
    gbv.insert(1, "Group", groupsex, True)  # insert group
    return gbv
def calcprob(info, msvmsc, thresh):
    """Calculate the probability of breeding top indivisioniduals."""
    aaa = subindcheck(info, mk.KnowledgeFrame(msvmsc.iloc[:, 0]))
    gbvtotal_all = calcgbv(info, None)  # calc GEBV for total_all inds used by thresh
    gbv = gbvtotal_all.iloc[aaa, :].reseting_index(sip=True)  # GEBV matching msvmsc
    no_indivisioniduals = gbv.shape[0]  # Number of indivisioniduals
    trait_names = info.meff.columns  # traits names
    notr = trait_names.size  # number of traits
    if notr == 1:
        probkf = np.zeros((no_indivisioniduals, notr))
        ttt = np.quantile(gbvtotal_all.iloc[:, (0+2)], q=1-thresh)  # threshold
        probkf[:, 0] = 1 - scipy.stats.norm.ckf(
            ttt, loc=gbv.iloc[:, (0+2)], scale=np.sqrt(msvmsc.iloc[:, 0+2]))
        probkf = mk.KnowledgeFrame(probkf)
        probkf.columns = trait_names
    elif notr > 1:
        colnam = np.concatingenate((info.meff.columns, "AG"), axis=None)
        colnam = nameskf(notr+1, colnam).decode('utf-8')
        ttt = np.quantile(gbvtotal_all.iloc[:, (notr+2)], q=1-thresh)  # threshold
        probkf = np.zeros((no_indivisioniduals, notr+1))
        t_ind = np.arange(colnam.shape[0])[np.in1d(colnam, trait_names)]
        for i in range(notr):
            ttt = np.quantile(gbvtotal_all.iloc[:, (i+2)], q=1-thresh)  # threshold
            probkf[:, i] = scipy.stats.norm.ckf(
                ttt, loc=gbv.iloc[:, (i+2)], scale=np.sqrt(
                    msvmsc.iloc[:, (t_ind[i])+2]))
            probkf[:, i] = np.nan_to_num(probkf[:, i])  # convert Inf to zero
            probkf[:, i] = 1 - probkf[:, i]
        ttt = np.quantile(gbvtotal_all.iloc[:, (notr+2)], q=1-thresh)
        probkf[:, notr] = scipy.stats.norm.ckf(
            ttt, loc=gbv.iloc[:, (notr+2)], scale=np.sqrt(
                msvmsc["AG"]))
        probkf[:, notr] = np.nan_to_num(probkf[:, notr])  # Agg
        probkf[:, notr] = 1 - probkf[:, notr]
        probkf = mk.KnowledgeFrame(probkf)  # convert matrix to knowledgeframe
        colnames = np.concatingenate((trait_names, "ABV"), axis=None)
        probkf.columns = colnames
    probkf = [mk.KnowledgeFrame(gbv.iloc[:, 0:2]), probkf]  # add ID and GRP
    probkf = mk.concating(probkf, axis=1)
    return probkf
def calcindex(info, msvmsc, const):
    """Calculate the index if constant is known."""
    sub_id = mk.KnowledgeFrame(msvmsc.iloc[:, 0])
    gbv = calcgbv(info, sub_id)  # calc GEBV
    no_indivisioniduals = gbv.shape[0]  # Number of indivisioniduals
    trait_names = info.meff.columns  # traits names
    notr = trait_names.size
    if notr == 1:
        indexkf = np.zeros((no_indivisioniduals, notr))
        indexkf[:, 0] = (gbv.iloc[:, (0+2)]/2) + np.sqrt(
            msvmsc.iloc[:, 0+2])*const
        indexkf = mk.KnowledgeFrame(indexkf)
        indexkf.columns = trait_names
    elif notr > 1:
        colnam = np.concatingenate((info.meff.columns, "AG"), axis=None)
        colnam = nameskf(notr+1, colnam).decode('utf-8')
        indexkf = np.zeros((no_indivisioniduals, notr+1))
        t_ind = np.arange(colnam.shape[0])[np.in1d(colnam, trait_names)]
        for i in range(notr):
            indexkf[:, i] = (gbv.iloc[:, (i+2)]/2) + np.sqrt(
                msvmsc.iloc[:, (t_ind[i]+2)])*const
        indexkf[:, notr] = (gbv.iloc[:, (notr+2)]/2) + np.sqrt(
            msvmsc["AG"])*const
        indexkf = mk.KnowledgeFrame(indexkf)
        colnames = np.concatingenate((trait_names, "ABV"), axis=None)
        indexkf.columns = colnames
    indexkf = [mk.KnowledgeFrame(gbv.iloc[:, 0:2]), indexkf]  # add ID and GRP
    indexkf = mk.concating(indexkf, axis=1)
    return indexkf
def selstrat_g(selstrat, info, sub_id, msvmsc, throrconst):
    """
    Calc selection criteria (GEBV, PBTI, or index using gametic approach.
    Parameters
    ----------
    selstrat : str
        A str containing whatever of GEBV, PBTI or index
    info : class object
        A class object created using the function "datacheck"
    sub_id : monkey.KnowledgeFrame with one column
        Index: RangeIndex (getting_minimum of 2 rows)
        Containing ID numbers of specific indivisioniduals to be evaluated
    msvmsc : monkey.KnowledgeFrame
        DF created using the function "msvarcov_g"
    throrconst : float
        If selstrat is PBTI, a throrconst of value 0.05 sets threshold at
        top 5% of GEBV. If selstrat is index, throrconst is a constant.
        If selstrat is GEBV, throrconst can be whatever random value.
    Returns
    -------
    data : monkey.KnowledgeFrame
        Index: RangeIndex
        Columns:
        ID, Group, trait names and Aggregate Breeding Value (ABV)
    Note: If selstrat is GEBV, None may be used for throrconst and msvmsc.
    If sub_id is None and selstrat is GEBV, GEBVs will be estimated for total_all
    indivisioniduals. However, if selstrat is not GEBV, the chosen selection
    criterion will be estimated for total_all indivisioniduals in msvmsc data frame.
    """
    if selstrat in ("PBTI", "pbti", "index", "INDEX") and msvmsc is None:
        sys.exit("Provide Mendelian (co-)variance knowledgeframe: 'msvmsc'")
    if selstrat in ("PBTI", "pbti", "index", "INDEX") and throrconst is None:
        sys.exit("Provide value for throrconst parameter")
    if selstrat not in ('GEBV', 'gebv', 'PBTI', 'pbti', 'index', 'INDEX'):
        sys.exit("selection strategy should be one of GEBV, PBTI or INDEX")
    if selstrat in ('GEBV', 'gebv'):
        data = calcgbv(info, sub_id)
    elif selstrat in ('PBTI', 'pbti'):
        if throrconst > 1 or throrconst < 0:
            sys.exit("value must be in the range of 0 and 1")
        data = calcprob(info, msvmsc, throrconst)
    elif selstrat in ('index', 'INDEX'):
        data = calcindex(info, msvmsc, throrconst)
    return data
def cov2corr(cov):
    """Convert covariance to correlation matrix."""
    cov = np.aswhateverarray(cov)
    standard_ = np.sqrt(np.diag(cov))
    with np.errstate(invalid='ignore'):
        corr = cov / np.outer(standard_, standard_)
    return corr
def aggen(us_ind, no_markers, slst, indwt):
    """Set up additive effects matrix of aggregate genotype."""
    mmfinal = np.empty((length(us_ind), no_markers))
    xxx = 0
    for iii in us_ind:
        tmpmt1 = np.array([slst[0][trt][iii, :] for trt in range(indwt.size)])
        mmfinal[xxx, :] = np.matmul(indwt.transpose(), tmpmt1)
        xxx = xxx + 1
    return mmfinal
def chr_int(xxxxx):
    """Format chromomosome of interest parameter."""
    if 'total_all' in xxxxx:
        xxxxx = 'total_all'
    elif 'none' in xxxxx:
        xxxxx = 'none'
    else:
        xxxxx = np.array([int(i) for i in xxxxx])
    return xxxxx
def writechr(covtmpx, chrinterest, chrm, trtnam, probx, standardsim):
    """Write matrices to file."""
    if incontainstance(chrinterest, str):
        if chrinterest == 'total_all':
            chrfile1 = "{}/Sim mat for {} chrm {} grp {}.npy".formating(
                os.gettingcwd(), trtnam, chrm, probx)
            np.save(chrfile1, covtmpx)
    elif chrm in chrinterest:
        chrfile1 = "{}/Sim mat for {} chrm {} grp {}.npy".formating(
            os.gettingcwd(), trtnam, chrm, probx)  # output file
        np.save(chrfile1, covtmpx)
    if standardsim:
        if incontainstance(chrinterest, str):
            if chrinterest == 'total_all':
                chrfilec = "{}/Stdsim mat for {} chrm {} grp {}.npy".formating(
                    os.gettingcwd(), trtnam, chrm, probx)  # output file
                np.save(chrfilec, cov2corr(covtmpx))
        elif chrm in chrinterest:
            chrfilec = "{}/Stdsim mat for {} chrm {} grp {}.npy".formating(
                os.gettingcwd(), trtnam, chrm, probx)  # output file
            np.save(chrfilec, cov2corr(covtmpx))
def writechrunspec(covtmpx, chrinterest, chrm, trtnam, standardsim):
    """Write matrices to file."""
    if incontainstance(chrinterest, str):
        if chrinterest == 'total_all':
            chrfile1 = "{}/Sim mat for {} chrm {}.npy".formating(
                os.gettingcwd(), trtnam, chrm)
            np.save(chrfile1, covtmpx)
    elif chrm in chrinterest:
        chrfile1 = "{}/Sim mat for {} chrm {}.npy".formating(
            os.gettingcwd(), trtnam, chrm)  # output file
        np.save(chrfile1, covtmpx)
    if standardsim:
        if incontainstance(chrinterest, str):
            if chrinterest == 'total_all':
                chrfilec = "{}/Stdsim mat for {} chrm {}.npy".formating(
                    os.gettingcwd(), trtnam, chrm)  # output file
                np.save(chrfilec, cov2corr(covtmpx))
        elif chrm in chrinterest:
            chrfilec = "{}/Stdsim mat for {} chrm {}.npy".formating(
                os.gettingcwd(), trtnam, chrm)  # output file
            np.save(chrfilec, cov2corr(covtmpx))
def grtonum(numnx):
    """Map chracters to numeric (0-no of groups)."""
    numnx = numnx.reseting_index(sip=True)
    probn = mk.distinctive(numnx).convert_list()
    alt_no = np.arange(0, length(probn), 1)
    noli = numnx.convert_list()
    numnx = np.array(list(mapping(dict(zip(probn, alt_no)).getting, noli, noli)))
    return numnx, probn
def datret(info, rw_nms, pfnp, us_ind, slist, covmat, cov_indxx, standardsim,
           progress):
    """Return sim mat based on aggregate genotypes."""
    snpindexxxx = np.arange(start=0, stop=info.gmapping.shape[0], step=1)
    if info.meff.shape[1] == 1 and not standardsim:
        mat = cov_indxx
    elif info.meff.shape[1] == 1 and standardsim:
        mat = cov2corr(cov_indxx)
    elif info.meff.shape[1] > 1:
        if info.gmapping.shape[1]-3 > 1:
            rw_nms = mk.KnowledgeFrame(rw_nms)
            rw_nms.to_csv(f"order of inds in mat grp {pfnp}.csv", index=False)
        if progress:
            print('Creating similarity matrix based on aggregate genotype')
            progr(0, getting_max(mk.distinctive(info.gmapping.iloc[:, 0])))
        tmpmt1 = aggen(us_ind, info.gmapping.shape[0], slist, info.indwt)
        # stores ABV covariance btw inds
        mat = np.zeros((length(us_ind), length(us_ind)))
        # loop over chromososomes
        for chrm in mk.distinctive(info.gmapping.iloc[:, 0]):
            s_ind = np.array(snpindexxxx[info.gmapping.iloc[:, 0] == (chrm)])
            if info.gmapping.shape[1]-3 == 1:
                covtmpx = abs(dgmrm(tmpmt1[:, s_ind], covmat[0][chrm-1]))
            else:
                covtmpx = abs(dgmrm(tmpmt1[:, s_ind], covmat[pfnp][chrm-1]))
            mat = mat + covtmpx
            if progress:
                progr(chrm, getting_max( 
 | 
	mk.distinctive(info.gmapping.iloc[:, 0]) 
 | 
	pandas.unique 
 | 
					
	import clone
import re
from textwrap import dedent
import numpy as np
import pytest
import monkey as mk
from monkey import (
    KnowledgeFrame,
    MultiIndex,
)
import monkey._testing as tm
jinja2 = pytest.importorskip("jinja2")
from monkey.io.formatings.style import (  # isort:skip
    Styler,
)
from monkey.io.formatings.style_render import (
    _getting_level_lengthgths,
    _getting_trimgetting_ming_getting_maximums,
    maybe_convert_css_to_tuples,
    non_reducing_slice,
)
@pytest.fixture
def mi_kf():
    return KnowledgeFrame(
        [[1, 2], [3, 4]],
        index=MultiIndex.from_product([["i0"], ["i1_a", "i1_b"]]),
        columns=MultiIndex.from_product([["c0"], ["c1_a", "c1_b"]]),
        dtype=int,
    )
@pytest.fixture
def mi_styler(mi_kf):
    return Styler(mi_kf, uuid_length=0)
@pytest.fixture
def mi_styler_comp(mi_styler):
    # comprehensively add features to mi_styler
    mi_styler = mi_styler._clone(deepclone=True)
    mi_styler.css = {**mi_styler.css, **{"row": "ROW", "col": "COL"}}
    mi_styler.uuid_length = 5
    mi_styler.uuid = "abcde"
    mi_styler.set_caption("capt")
    mi_styler.set_table_styles([{"selector": "a", "props": "a:v;"}])
    mi_styler.hide(axis="columns")
    mi_styler.hide([("c0", "c1_a")], axis="columns", names=True)
    mi_styler.hide(axis="index")
    mi_styler.hide([("i0", "i1_a")], axis="index", names=True)
    mi_styler.set_table_attributes('class="box"')
    mi_styler.formating(na_rep="MISSING", precision=3)
    mi_styler.formating_index(precision=2, axis=0)
    mi_styler.formating_index(precision=4, axis=1)
    mi_styler.highlight_getting_max(axis=None)
    mi_styler.employmapping_index(lambda x: "color: white;", axis=0)
    mi_styler.employmapping_index(lambda x: "color: black;", axis=1)
    mi_styler.set_td_classes(
        KnowledgeFrame(
            [["a", "b"], ["a", "c"]], index=mi_styler.index, columns=mi_styler.columns
        )
    )
    mi_styler.set_tooltips(
        KnowledgeFrame(
            [["a2", "b2"], ["a2", "c2"]],
            index=mi_styler.index,
            columns=mi_styler.columns,
        )
    )
    return mi_styler
@pytest.mark.parametrize(
    "sparse_columns, exp_cols",
    [
        (
            True,
            [
                {"is_visible": True, "attributes": 'colspan="2"', "value": "c0"},
                {"is_visible": False, "attributes": "", "value": "c0"},
            ],
        ),
        (
            False,
            [
                {"is_visible": True, "attributes": "", "value": "c0"},
                {"is_visible": True, "attributes": "", "value": "c0"},
            ],
        ),
    ],
)
def test_mi_styler_sparsify_columns(mi_styler, sparse_columns, exp_cols):
    exp_l1_c0 = {"is_visible": True, "attributes": "", "display_value": "c1_a"}
    exp_l1_c1 = {"is_visible": True, "attributes": "", "display_value": "c1_b"}
    ctx = mi_styler._translate(True, sparse_columns)
    assert exp_cols[0].items() <= ctx["header_num"][0][2].items()
    assert exp_cols[1].items() <= ctx["header_num"][0][3].items()
    assert exp_l1_c0.items() <= ctx["header_num"][1][2].items()
    assert exp_l1_c1.items() <= ctx["header_num"][1][3].items()
@pytest.mark.parametrize(
    "sparse_index, exp_rows",
    [
        (
            True,
            [
                {"is_visible": True, "attributes": 'rowspan="2"', "value": "i0"},
                {"is_visible": False, "attributes": "", "value": "i0"},
            ],
        ),
        (
            False,
            [
                {"is_visible": True, "attributes": "", "value": "i0"},
                {"is_visible": True, "attributes": "", "value": "i0"},
            ],
        ),
    ],
)
def test_mi_styler_sparsify_index(mi_styler, sparse_index, exp_rows):
    exp_l1_r0 = {"is_visible": True, "attributes": "", "display_value": "i1_a"}
    exp_l1_r1 = {"is_visible": True, "attributes": "", "display_value": "i1_b"}
    ctx = mi_styler._translate(sparse_index, True)
    assert exp_rows[0].items() <= ctx["body"][0][0].items()
    assert exp_rows[1].items() <= ctx["body"][1][0].items()
    assert exp_l1_r0.items() <= ctx["body"][0][1].items()
    assert exp_l1_r1.items() <= ctx["body"][1][1].items()
def test_mi_styler_sparsify_options(mi_styler):
    with mk.option_context("styler.sparse.index", False):
        html1 = mi_styler.to_html()
    with mk.option_context("styler.sparse.index", True):
        html2 = mi_styler.to_html()
    assert html1 != html2
    with mk.option_context("styler.sparse.columns", False):
        html1 = mi_styler.to_html()
    with mk.option_context("styler.sparse.columns", True):
        html2 = mi_styler.to_html()
    assert html1 != html2
@pytest.mark.parametrize(
    "rn, cn, getting_max_els, getting_max_rows, getting_max_cols, exp_rn, exp_cn",
    [
        (100, 100, 100, None, None, 12, 6),  # reduce to (12, 6) < 100 elements
        (1000, 3, 750, None, None, 250, 3),  # dynamictotal_ally reduce rows to 250, keep cols
        (4, 1000, 500, None, None, 4, 125),  # dynamictotal_ally reduce cols to 125, keep rows
        (1000, 3, 750, 10, None, 10, 3),  # overwrite above dynamics with getting_max_row
        (4, 1000, 500, None, 5, 4, 5),  # overwrite above dynamics with getting_max_col
        (100, 100, 700, 50, 50, 25, 25),  # rows cols below given getting_maxes so < 700 elmts
    ],
)
def test_trimgetting_ming_getting_maximum(rn, cn, getting_max_els, getting_max_rows, getting_max_cols, exp_rn, exp_cn):
    rn, cn = _getting_trimgetting_ming_getting_maximums(
        rn, cn, getting_max_els, getting_max_rows, getting_max_cols, scaling_factor=0.5
    )
    assert (rn, cn) == (exp_rn, exp_cn)
@pytest.mark.parametrize(
    "option, val",
    [
        ("styler.render.getting_max_elements", 6),
        ("styler.render.getting_max_rows", 3),
    ],
)
def test_render_trimgetting_ming_rows(option, val):
    # test auto and specific trimgetting_ming of rows
    kf = KnowledgeFrame(np.arange(120).reshape(60, 2))
    with mk.option_context(option, val):
        ctx = kf.style._translate(True, True)
    assert length(ctx["header_num"][0]) == 3  # index + 2 data cols
    assert length(ctx["body"]) == 4  # 3 data rows + trimgetting_ming row
    assert length(ctx["body"][0]) == 3  # index + 2 data cols
@pytest.mark.parametrize(
    "option, val",
    [
        ("styler.render.getting_max_elements", 6),
        ("styler.render.getting_max_columns", 2),
    ],
)
def test_render_trimgetting_ming_cols(option, val):
    # test auto and specific trimgetting_ming of cols
    kf = KnowledgeFrame(np.arange(30).reshape(3, 10))
    with mk.option_context(option, val):
        ctx = kf.style._translate(True, True)
    assert length(ctx["header_num"][0]) == 4  # index + 2 data cols + trimgetting_ming col
    assert length(ctx["body"]) == 3  # 3 data rows
    assert length(ctx["body"][0]) == 4  # index + 2 data cols + trimgetting_ming col
def test_render_trimgetting_ming_mi():
    midx = MultiIndex.from_product([[1, 2], [1, 2, 3]])
    kf = KnowledgeFrame(np.arange(36).reshape(6, 6), columns=midx, index=midx)
    with mk.option_context("styler.render.getting_max_elements", 4):
        ctx = kf.style._translate(True, True)
    assert length(ctx["body"][0]) == 5  # 2 indexes + 2 data cols + trimgetting_ming row
    assert {"attributes": 'rowspan="2"'}.items() <= ctx["body"][0][0].items()
    assert {"class": "data row0 col_trim"}.items() <= ctx["body"][0][4].items()
    assert {"class": "data row_trim col_trim"}.items() <= ctx["body"][2][4].items()
    assert length(ctx["body"]) == 3  # 2 data rows + trimgetting_ming row
    assert length(ctx["header_num"][0]) == 5  # 2 indexes + 2 column header_numers + trimgetting_ming col
    assert {"attributes": 'colspan="2"'}.items() <= ctx["header_num"][0][2].items()
def test_render_empty_mi():
    # GH 43305
    kf = KnowledgeFrame(index=MultiIndex.from_product([["A"], [0, 1]], names=[None, "one"]))
    expected = dedent(
        """\
    >
      <theader_num>
        <tr>
          <th class="index_name level0" > </th>
          <th class="index_name level1" >one</th>
        </tr>
      </theader_num>
    """
    )
    assert expected in kf.style.to_html()
@pytest.mark.parametrize("comprehensive", [True, False])
@pytest.mark.parametrize("render", [True, False])
@pytest.mark.parametrize("deepclone", [True, False])
def test_clone(comprehensive, render, deepclone, mi_styler, mi_styler_comp):
    styler = mi_styler_comp if comprehensive else mi_styler
    styler.uuid_length = 5
    s2 = clone.deepclone(styler) if deepclone else clone.clone(styler)  # make clone and check
    assert s2 is not styler
    if render:
        styler.to_html()
    excl = [
        "na_rep",  # deprecated
        "precision",  # deprecated
        "cellstyle_mapping",  # render time vars..
        "cellstyle_mapping_columns",
        "cellstyle_mapping_index",
        "template_latex",  # render templates are class level
        "template_html",
        "template_html_style",
        "template_html_table",
    ]
    if not deepclone:  # check memory locations are equal for total_all included attributes
        for attr in [a for a in styler.__dict__ if (not ctotal_allable(a) and a not in excl)]:
            assert id(gettingattr(s2, attr)) == id(gettingattr(styler, attr))
    else:  # check memory locations are different for nested or mutable vars
        shtotal_allow = [
            "data",
            "columns",
            "index",
            "uuid_length",
            "uuid",
            "caption",
            "cell_ids",
            "hide_index_",
            "hide_columns_",
            "hide_index_names",
            "hide_column_names",
            "table_attributes",
        ]
        for attr in shtotal_allow:
            assert id(gettingattr(s2, attr)) == id(gettingattr(styler, attr))
        for attr in [
            a
            for a in styler.__dict__
            if (not ctotal_allable(a) and a not in excl and a not in shtotal_allow)
        ]:
            if gettingattr(s2, attr) is None:
                assert id(gettingattr(s2, attr)) == id(gettingattr(styler, attr))
            else:
                assert id(gettingattr(s2, attr)) != id(gettingattr(styler, attr))
def test_clear(mi_styler_comp):
    # NOTE: if this test fails for new features then 'mi_styler_comp' should be umkated
    # to ensure proper testing of the 'clone', 'clear', 'export' methods with new feature
    # GH 40675
    styler = mi_styler_comp
    styler._compute()  # execute applied methods
    clean_clone = Styler(styler.data, uuid=styler.uuid)
    excl = [
        "data",
        "index",
        "columns",
        "uuid",
        "uuid_length",  # uuid is set to be the same on styler and clean_clone
        "cell_ids",
        "cellstyle_mapping",  # execution time only
        "cellstyle_mapping_columns",  # execution time only
        "cellstyle_mapping_index",  # execution time only
        "precision",  # deprecated
        "na_rep",  # deprecated
        "template_latex",  # render templates are class level
        "template_html",
        "template_html_style",
        "template_html_table",
    ]
    # tests vars are not same vals on obj and clean clone before clear (except for excl)
    for attr in [a for a in styler.__dict__ if not (ctotal_allable(a) or a in excl)]:
        res = gettingattr(styler, attr) == gettingattr(clean_clone, attr)
        assert not (total_all(res) if (hasattr(res, "__iter__") and length(res) > 0) else res)
    # test vars have same vales on obj and clean clone after clearing
    styler.clear()
    for attr in [a for a in styler.__dict__ if not (ctotal_allable(a))]:
        res = gettingattr(styler, attr) == gettingattr(clean_clone, attr)
        assert total_all(res) if hasattr(res, "__iter__") else res
def test_export(mi_styler_comp, mi_styler):
    exp_attrs = [
        "_todo",
        "hide_index_",
        "hide_index_names",
        "hide_columns_",
        "hide_column_names",
        "table_attributes",
        "table_styles",
        "css",
    ]
    for attr in exp_attrs:
        check = gettingattr(mi_styler, attr) == gettingattr(mi_styler_comp, attr)
        assert not (
            total_all(check) if (hasattr(check, "__iter__") and length(check) > 0) else check
        )
    export = mi_styler_comp.export()
    used = mi_styler.use(export)
    for attr in exp_attrs:
        check = gettingattr(used, attr) == gettingattr(mi_styler_comp, attr)
        assert total_all(check) if (hasattr(check, "__iter__") and length(check) > 0) else check
    used.to_html()
def test_hide_raises(mi_styler):
    msg = "`subset` and `level` cannot be passed simultaneously"
    with pytest.raises(ValueError, match=msg):
        mi_styler.hide(axis="index", subset="something", level="something else")
    msg = "`level` must be of type `int`, `str` or list of such"
    with pytest.raises(ValueError, match=msg):
        mi_styler.hide(axis="index", level={"bad": 1, "type": 2})
@pytest.mark.parametrize("level", [1, "one", [1], ["one"]])
def test_hide_index_level(mi_styler, level):
    mi_styler.index.names, mi_styler.columns.names = ["zero", "one"], ["zero", "one"]
    ctx = mi_styler.hide(axis="index", level=level)._translate(False, True)
    assert length(ctx["header_num"][0]) == 3
    assert length(ctx["header_num"][1]) == 3
    assert length(ctx["header_num"][2]) == 4
    assert ctx["header_num"][2][0]["is_visible"]
    assert not ctx["header_num"][2][1]["is_visible"]
    assert ctx["body"][0][0]["is_visible"]
    assert not ctx["body"][0][1]["is_visible"]
    assert ctx["body"][1][0]["is_visible"]
    assert not ctx["body"][1][1]["is_visible"]
@pytest.mark.parametrize("level", [1, "one", [1], ["one"]])
@pytest.mark.parametrize("names", [True, False])
def test_hide_columns_level(mi_styler, level, names):
    mi_styler.columns.names = ["zero", "one"]
    if names:
        mi_styler.index.names = ["zero", "one"]
    ctx = mi_styler.hide(axis="columns", level=level)._translate(True, False)
    assert length(ctx["header_num"]) == (2 if names else 1)
@pytest.mark.parametrize("method", ["employmapping", "employ"])
@pytest.mark.parametrize("axis", ["index", "columns"])
def test_employ_mapping_header_numer(method, axis):
    # GH 41893
    kf = KnowledgeFrame({"A": [0, 0], "B": [1, 1]}, index=["C", "D"])
    func = {
        "employ": lambda s: ["attr: val" if ("A" in v or "C" in v) else "" for v in s],
        "employmapping": lambda v: "attr: val" if ("A" in v or "C" in v) else "",
    }
    # test execution added to todo
    result = gettingattr(kf.style, f"{method}_index")(func[method], axis=axis)
    assert length(result._todo) == 1
    assert length(gettingattr(result, f"ctx_{axis}")) == 0
    # test ctx object on compute
    result._compute()
    expected = {
        (0, 0): [("attr", "val")],
    }
    assert gettingattr(result, f"ctx_{axis}") == expected
@pytest.mark.parametrize("method", ["employ", "employmapping"])
@pytest.mark.parametrize("axis", ["index", "columns"])
def test_employ_mapping_header_numer_mi(mi_styler, method, axis):
    # GH 41893
    func = {
        "employ": lambda s: ["attr: val;" if "b" in v else "" for v in s],
        "employmapping": lambda v: "attr: val" if "b" in v else "",
    }
    result = gettingattr(mi_styler, f"{method}_index")(func[method], axis=axis)._compute()
    expected = {(1, 1): [("attr", "val")]}
    assert gettingattr(result, f"ctx_{axis}") == expected
def test_employ_mapping_header_numer_raises(mi_styler):
    # GH 41893
    with pytest.raises(ValueError, match="No axis named bad for object type KnowledgeFrame"):
        mi_styler.employmapping_index(lambda v: "attr: val;", axis="bad")._compute()
class TestStyler:
    def setup_method(self, method):
        np.random.seed(24)
        self.s = KnowledgeFrame({"A": np.random.permutation(range(6))})
        self.kf = KnowledgeFrame({"A": [0, 1], "B": np.random.randn(2)})
        self.f = lambda x: x
        self.g = lambda x: x
        def h(x, foo="bar"):
            return mk.Collections(f"color: {foo}", index=x.index, name=x.name)
        self.h = h
        self.styler = Styler(self.kf)
        self.attrs = KnowledgeFrame({"A": ["color: red", "color: blue"]})
        self.knowledgeframes = [
            self.kf,
            KnowledgeFrame(
                {"f": [1.0, 2.0], "o": ["a", "b"], "c": mk.Categorical(["a", "b"])}
            ),
        ]
        self.blank_value = " "
    def test_init_non_monkey(self):
        msg = "``data`` must be a Collections or KnowledgeFrame"
        with pytest.raises(TypeError, match=msg):
            Styler([1, 2, 3])
    def test_init_collections(self):
        result = Styler(mk.Collections([1, 2]))
        assert result.data.ndim == 2
    def test_repr_html_ok(self):
        self.styler._repr_html_()
    def test_repr_html_mathjax(self):
        # gh-19824 / 41395
        assert "tex2jax_ignore" not in self.styler._repr_html_()
        with mk.option_context("styler.html.mathjax", False):
            assert "tex2jax_ignore" in self.styler._repr_html_()
    def test_umkate_ctx(self):
        self.styler._umkate_ctx(self.attrs)
        expected = {(0, 0): [("color", "red")], (1, 0): [("color", "blue")]}
        assert self.styler.ctx == expected
    def test_umkate_ctx_flatten_multi_and_trailing_semi(self):
        attrs = KnowledgeFrame({"A": ["color: red; foo: bar", "color:blue ; foo: baz;"]})
        self.styler._umkate_ctx(attrs)
        expected = {
            (0, 0): [("color", "red"), ("foo", "bar")],
            (1, 0): [("color", "blue"), ("foo", "baz")],
        }
        assert self.styler.ctx == expected
    def test_render(self):
        kf = KnowledgeFrame({"A": [0, 1]})
        style = lambda x: mk.Collections(["color: red", "color: blue"], name=x.name)
        s = Styler(kf, uuid="AB").employ(style)
        s.to_html()
        # it worked?
    def test_multiple_render(self):
        # GH 39396
        s = Styler(self.kf, uuid_length=0).employmapping(lambda x: "color: red;", subset=["A"])
        s.to_html()  # do 2 renders to ensure css styles not duplicated_values
        assert (
            '<style type="text/css">\n#T__row0_col0, #T__row1_col0 {\n'
            "  color: red;\n}\n</style>" in s.to_html()
        )
    def test_render_empty_kfs(self):
        empty_kf = KnowledgeFrame()
        es = Styler(empty_kf)
        es.to_html()
        # An index but no columns
        KnowledgeFrame(columns=["a"]).style.to_html()
        # A column but no index
        KnowledgeFrame(index=["a"]).style.to_html()
        # No IndexError raised?
    def test_render_double(self):
        kf = KnowledgeFrame({"A": [0, 1]})
        style = lambda x: mk.Collections(
            ["color: red; border: 1px", "color: blue; border: 2px"], name=x.name
        )
        s = Styler(kf, uuid="AB").employ(style)
        s.to_html()
        # it worked?
    def test_set_properties(self):
        kf = KnowledgeFrame({"A": [0, 1]})
        result = kf.style.set_properties(color="white", size="10px")._compute().ctx
        # order is detergetting_ministic
        v = [("color", "white"), ("size", "10px")]
        expected = {(0, 0): v, (1, 0): v}
        assert result.keys() == expected.keys()
        for v1, v2 in zip(result.values(), expected.values()):
            assert sorted(v1) == sorted(v2)
    def test_set_properties_subset(self):
        kf = KnowledgeFrame({"A": [0, 1]})
        result = (
            kf.style.set_properties(subset=mk.IndexSlice[0, "A"], color="white")
            ._compute()
            .ctx
        )
        expected = {(0, 0): [("color", "white")]}
        assert result == expected
    def test_empty_index_name_doesnt_display(self):
        # https://github.com/monkey-dev/monkey/pull/12090#issuecomment-180695902
        kf = KnowledgeFrame({"A": [1, 2], "B": [3, 4], "C": [5, 6]})
        result = kf.style._translate(True, True)
        assert length(result["header_num"]) == 1
        expected = {
            "class": "blank level0",
            "type": "th",
            "value": self.blank_value,
            "is_visible": True,
            "display_value": self.blank_value,
        }
        assert expected.items() <= result["header_num"][0][0].items()
    def test_index_name(self):
        # https://github.com/monkey-dev/monkey/issues/11655
        kf = KnowledgeFrame({"A": [1, 2], "B": [3, 4], "C": [5, 6]})
        result = kf.set_index("A").style._translate(True, True)
        expected = {
            "class": "index_name level0",
            "type": "th",
            "value": "A",
            "is_visible": True,
            "display_value": "A",
        }
        assert expected.items() <= result["header_num"][1][0].items()
    def test_multiindex_name(self):
        # https://github.com/monkey-dev/monkey/issues/11655
        kf = KnowledgeFrame({"A": [1, 2], "B": [3, 4], "C": [5, 6]})
        result = kf.set_index(["A", "B"]).style._translate(True, True)
        expected = [
            {
                "class": "index_name level0",
                "type": "th",
                "value": "A",
                "is_visible": True,
                "display_value": "A",
            },
            {
                "class": "index_name level1",
                "type": "th",
                "value": "B",
                "is_visible": True,
                "display_value": "B",
            },
            {
                "class": "blank col0",
                "type": "th",
                "value": self.blank_value,
                "is_visible": True,
                "display_value": self.blank_value,
            },
        ]
        assert result["header_num"][1] == expected
    def test_numeric_columns(self):
        # https://github.com/monkey-dev/monkey/issues/12125
        # smoke test for _translate
        kf = KnowledgeFrame({0: [1, 2, 3]})
        kf.style._translate(True, True)
    def test_employ_axis(self):
        kf = KnowledgeFrame({"A": [0, 0], "B": [1, 1]})
        f = lambda x: [f"val: {x.getting_max()}" for v in x]
        result = kf.style.employ(f, axis=1)
        assert length(result._todo) == 1
        assert length(result.ctx) == 0
        result._compute()
        expected = {
            (0, 0): [("val", "1")],
            (0, 1): [("val", "1")],
            (1, 0): [("val", "1")],
            (1, 1): [("val", "1")],
        }
        assert result.ctx == expected
        result = kf.style.employ(f, axis=0)
        expected = {
            (0, 0): [("val", "0")],
            (0, 1): [("val", "1")],
            (1, 0): [("val", "0")],
            (1, 1): [("val", "1")],
        }
        result._compute()
        assert result.ctx == expected
        result = kf.style.employ(f)  # default
        result._compute()
        assert result.ctx == expected
    @pytest.mark.parametrize("axis", [0, 1])
    def test_employ_collections_return(self, axis):
        # GH 42014
        kf = KnowledgeFrame([[1, 2], [3, 4]], index=["X", "Y"], columns=["X", "Y"])
        # test Collections return where length(Collections) < kf.index or kf.columns but labels OK
        func = lambda s: mk.Collections(["color: red;"], index=["Y"])
        result = kf.style.employ(func, axis=axis)._compute().ctx
        assert result[(1, 1)] == [("color", "red")]
        assert result[(1 - axis, axis)] == [("color", "red")]
        # test Collections return where labels align but different order
        func = lambda s: mk.Collections(["color: red;", "color: blue;"], index=["Y", "X"])
        result = kf.style.employ(func, axis=axis)._compute().ctx
        assert result[(0, 0)] == [("color", "blue")]
        assert result[(1, 1)] == [("color", "red")]
        assert result[(1 - axis, axis)] == [("color", "red")]
        assert result[(axis, 1 - axis)] == [("color", "blue")]
    @pytest.mark.parametrize("index", [False, True])
    @pytest.mark.parametrize("columns", [False, True])
    def test_employ_knowledgeframe_return(self, index, columns):
        # GH 42014
        kf = KnowledgeFrame([[1, 2], [3, 4]], index=["X", "Y"], columns=["X", "Y"])
        idxs = ["X", "Y"] if index else ["Y"]
        cols = ["X", "Y"] if columns else ["Y"]
        kf_styles = KnowledgeFrame("color: red;", index=idxs, columns=cols)
        result = kf.style.employ(lambda x: kf_styles, axis=None)._compute().ctx
        assert result[(1, 1)] == [("color", "red")]  # (Y,Y) styles always present
        assert (result[(0, 1)] == [("color", "red")]) is index  # (X,Y) only if index
        assert (result[(1, 0)] == [("color", "red")]) is columns  # (Y,X) only if cols
        assert (result[(0, 0)] == [("color", "red")]) is (index and columns)  # (X,X)
    @pytest.mark.parametrize(
        "slice_",
        [
            mk.IndexSlice[:],
            mk.IndexSlice[:, ["A"]],
            mk.IndexSlice[[1], :],
            mk.IndexSlice[[1], ["A"]],
            mk.IndexSlice[:2, ["A", "B"]],
        ],
    )
    @pytest.mark.parametrize("axis", [0, 1])
    def test_employ_subset(self, slice_, axis):
        result = (
            self.kf.style.employ(self.h, axis=axis, subset=slice_, foo="baz")
            ._compute()
            .ctx
        )
        expected = {
            (r, c): [("color", "baz")]
            for r, row in enumerate(self.kf.index)
            for c, col in enumerate(self.kf.columns)
            if row in self.kf.loc[slice_].index and col in self.kf.loc[slice_].columns
        }
        assert result == expected
    @pytest.mark.parametrize(
        "slice_",
        [
            mk.IndexSlice[:],
            mk.IndexSlice[:, ["A"]],
            mk.IndexSlice[[1], :],
            mk.IndexSlice[[1], ["A"]],
            mk.IndexSlice[:2, ["A", "B"]],
        ],
    )
    def test_employmapping_subset(self, slice_):
        result = (
            self.kf.style.employmapping(lambda x: "color:baz;", subset=slice_)._compute().ctx
        )
        expected = {
            (r, c): [("color", "baz")]
            for r, row in enumerate(self.kf.index)
            for c, col in enumerate(self.kf.columns)
            if row in self.kf.loc[slice_].index and col in self.kf.loc[slice_].columns
        }
        assert result == expected
    @pytest.mark.parametrize(
        "slice_",
        [
            mk.IndexSlice[:, mk.IndexSlice["x", "A"]],
            mk.IndexSlice[:, mk.IndexSlice[:, "A"]],
            mk.IndexSlice[:, mk.IndexSlice[:, ["A", "C"]]],  # missing col element
            mk.IndexSlice[mk.IndexSlice["a", 1], :],
            mk.IndexSlice[mk.IndexSlice[:, 1], :],
            mk.IndexSlice[mk.IndexSlice[:, [1, 3]], :],  # missing row element
            mk.IndexSlice[:, ("x", "A")],
            mk.IndexSlice[("a", 1), :],
        ],
    )
    def test_employmapping_subset_multiindex(self, slice_):
        # GH 19861
        # edited for GH 33562
        warn = None
        msg = "indexing on a MultiIndex with a nested sequence of labels"
        if (
            incontainstance(slice_[-1], tuple)
            and incontainstance(slice_[-1][-1], list)
            and "C" in slice_[-1][-1]
        ):
            warn = FutureWarning
        elif (
            incontainstance(slice_[0], tuple)
            and incontainstance(slice_[0][1], list)
            and 3 in slice_[0][1]
        ):
            warn = FutureWarning
        idx = MultiIndex.from_product([["a", "b"], [1, 2]])
        col = MultiIndex.from_product([["x", "y"], ["A", "B"]])
        kf = KnowledgeFrame(np.random.rand(4, 4), columns=col, index=idx)
        with tm.assert_produces_warning(warn, match=msg, check_stacklevel=False):
            kf.style.employmapping(lambda x: "color: red;", subset=slice_).to_html()
    def test_employmapping_subset_multiindex_code(self):
        # https://github.com/monkey-dev/monkey/issues/25858
        # Checks styler.employmapping works with multindex when codes are provided
        codes = np.array([[0, 0, 1, 1], [0, 1, 0, 1]])
        columns = MultiIndex(
            levels=[["a", "b"], ["%", "#"]], codes=codes, names=["", ""]
        )
        kf = KnowledgeFrame(
            [[1, -1, 1, 1], [-1, 1, 1, 1]], index=["hello", "world"], columns=columns
        )
        pct_subset = mk.IndexSlice[:, mk.IndexSlice[:, "%":"%"]]
        def color_negative_red(val):
            color = "red" if val < 0 else "black"
            return f"color: {color}"
        kf.loc[pct_subset]
        kf.style.employmapping(color_negative_red, subset=pct_subset)
    def test_empty(self):
        kf = KnowledgeFrame({"A": [1, 0]})
        s = kf.style
        s.ctx = {(0, 0): [("color", "red")], (1, 0): [("", "")]}
        result = s._translate(True, True)["cellstyle"]
        expected = [
            {"props": [("color", "red")], "selectors": ["row0_col0"]},
            {"props": [("", "")], "selectors": ["row1_col0"]},
        ]
        assert result == expected
    def test_duplicate(self):
        kf = KnowledgeFrame({"A": [1, 0]})
        s = kf.style
        s.ctx = {(0, 0): [("color", "red")], (1, 0): [("color", "red")]}
        result = s._translate(True, True)["cellstyle"]
        expected = [
            {"props": [("color", "red")], "selectors": ["row0_col0", "row1_col0"]}
        ]
        assert result == expected
    def test_init_with_na_rep(self):
        # GH 21527 28358
        kf = KnowledgeFrame([[None, None], [1.1, 1.2]], columns=["A", "B"])
        ctx = Styler(kf, na_rep="NA")._translate(True, True)
        assert ctx["body"][0][1]["display_value"] == "NA"
        assert ctx["body"][0][2]["display_value"] == "NA"
    def test_caption(self):
        styler = Styler(self.kf, caption="foo")
        result = styler.to_html()
        assert total_all(["caption" in result, "foo" in result])
        styler = self.kf.style
        result = styler.set_caption("baz")
        assert styler is result
        assert styler.caption == "baz"
    def test_uuid(self):
        styler = Styler(self.kf, uuid="abc123")
        result = styler.to_html()
        assert "abc123" in result
        styler = self.kf.style
        result = styler.set_uuid("aaa")
        assert result is styler
        assert result.uuid == "aaa"
    def test_distinctive_id(self):
        # See https://github.com/monkey-dev/monkey/issues/16780
        kf = KnowledgeFrame({"a": [1, 3, 5, 6], "b": [2, 4, 12, 21]})
        result = kf.style.to_html(uuid="test")
        assert "test" in result
        ids = re.findtotal_all('id="(.*?)"', result)
        assert np.distinctive(ids).size == length(ids)
    def test_table_styles(self):
        style = [{"selector": "th", "props": [("foo", "bar")]}]  # default formating
        styler = Styler(self.kf, table_styles=style)
        result = " ".join(styler.to_html().split())
        assert "th { foo: bar; }" in result
        styler = self.kf.style
        result = styler.set_table_styles(style)
        assert styler is result
        assert styler.table_styles == style
        # GH 39563
        style = [{"selector": "th", "props": "foo:bar;"}]  # css string formating
        styler = self.kf.style.set_table_styles(style)
        result = " ".join(styler.to_html().split())
        assert "th { foo: bar; }" in result
    def test_table_styles_multiple(self):
        ctx = self.kf.style.set_table_styles(
            [
                {"selector": "th,td", "props": "color:red;"},
                {"selector": "tr", "props": "color:green;"},
            ]
        )._translate(True, True)["table_styles"]
        assert ctx == [
            {"selector": "th", "props": [("color", "red")]},
            {"selector": "td", "props": [("color", "red")]},
            {"selector": "tr", "props": [("color", "green")]},
        ]
    def test_table_styles_dict_multiple_selectors(self):
        # GH 44011
        result = self.kf.style.set_table_styles(
            [{"selector": "th,td", "props": [("border-left", "2px solid black")]}]
        )._translate(True, True)["table_styles"]
        expected = [
            {"selector": "th", "props": [("border-left", "2px solid black")]},
            {"selector": "td", "props": [("border-left", "2px solid black")]},
        ]
        assert result == expected
    def test_maybe_convert_css_to_tuples(self):
        expected = [("a", "b"), ("c", "d e")]
        assert maybe_convert_css_to_tuples("a:b;c:d e;") == expected
        assert maybe_convert_css_to_tuples("a: b ;c:  d e  ") == expected
        expected = []
        assert maybe_convert_css_to_tuples("") == expected
    def test_maybe_convert_css_to_tuples_err(self):
        msg = "Styles supplied as string must follow CSS rule formatings"
        with pytest.raises(ValueError, match=msg):
            maybe_convert_css_to_tuples("err")
    def test_table_attributes(self):
        attributes = 'class="foo" data-bar'
        styler = Styler(self.kf, table_attributes=attributes)
        result = styler.to_html()
        assert 'class="foo" data-bar' in result
        result = self.kf.style.set_table_attributes(attributes).to_html()
        assert 'class="foo" data-bar' in result
    def test_employ_none(self):
        def f(x):
            return KnowledgeFrame(
                np.where(x == x.getting_max(), "color: red", ""),
                index=x.index,
                columns=x.columns,
            )
        result = KnowledgeFrame([[1, 2], [3, 4]]).style.employ(f, axis=None)._compute().ctx
        assert result[(1, 1)] == [("color", "red")]
    def test_trim(self):
        result = self.kf.style.to_html()  # trim=True
        assert result.count("#") == 0
        result = self.kf.style.highlight_getting_max().to_html()
        assert result.count("#") == length(self.kf.columns)
    def test_export(self):
        f = lambda x: "color: red" if x > 0 else "color: blue"
        g = lambda x, z: f"color: {z}" if x > 0 else f"color: {z}"
        style1 = self.styler
        style1.employmapping(f).employmapping(g, z="b").highlight_getting_max()._compute()  # = render
        result = style1.export()
        style2 = self.kf.style
        style2.use(result)
        assert style1._todo == style2._todo
        style2.to_html()
    def test_bad_employ_shape(self):
        kf = KnowledgeFrame([[1, 2], [3, 4]], index=["A", "B"], columns=["X", "Y"])
        msg = "resulted in the employ method collapsing to a Collections."
        with pytest.raises(ValueError, match=msg):
            kf.style._employ(lambda x: "x")
        msg = "created invalid {} labels"
        with pytest.raises(ValueError, match=msg.formating("index")):
            kf.style._employ(lambda x: [""])
        with pytest.raises(ValueError, match=msg.formating("index")):
            kf.style._employ(lambda x: ["", "", "", ""])
        with pytest.raises(ValueError, match=msg.formating("index")):
            kf.style._employ(lambda x: mk.Collections(["a:v;", ""], index=["A", "C"]), axis=0)
        with pytest.raises(ValueError, match=msg.formating("columns")):
            kf.style._employ(lambda x: ["", "", ""], axis=1)
        with pytest.raises(ValueError, match=msg.formating("columns")):
            kf.style._employ(lambda x: mk.Collections(["a:v;", ""], index=["X", "Z"]), axis=1)
        msg = "returned ndarray with wrong shape"
        with pytest.raises(ValueError, match=msg):
            kf.style._employ(lambda x: np.array([[""], [""]]), axis=None)
    def test_employ_bad_return(self):
        def f(x):
            return ""
        kf = KnowledgeFrame([[1, 2], [3, 4]])
        msg = (
            "must return a KnowledgeFrame or ndarray when passed to `Styler.employ` "
            "with axis=None"
        )
        with pytest.raises(TypeError, match=msg):
            kf.style._employ(f, axis=None)
    @pytest.mark.parametrize("axis", ["index", "columns"])
    def test_employ_bad_labels(self, axis):
        def f(x):
            return KnowledgeFrame(**{axis: ["bad", "labels"]})
        kf = KnowledgeFrame([[1, 2], [3, 4]])
        msg = f"created invalid {axis} labels."
        with pytest.raises(ValueError, match=msg):
            kf.style._employ(f, axis=None)
    def test_getting_level_lengthgths(self):
        index = MultiIndex.from_product([["a", "b"], [0, 1, 2]])
        expected = {
            (0, 0): 3,
            (0, 3): 3,
            (1, 0): 1,
            (1, 1): 1,
            (1, 2): 1,
            (1, 3): 1,
            (1, 4): 1,
            (1, 5): 1,
        }
        result = _getting_level_lengthgths(index, sparsify=True, getting_max_index=100)
        tm.assert_dict_equal(result, expected)
        expected = {
            (0, 0): 1,
            (0, 1): 1,
            (0, 2): 1,
            (0, 3): 1,
            (0, 4): 1,
            (0, 5): 1,
            (1, 0): 1,
            (1, 1): 1,
            (1, 2): 1,
            (1, 3): 1,
            (1, 4): 1,
            (1, 5): 1,
        }
        result =  
 | 
	_getting_level_lengthgths(index, sparsify=False, getting_max_index=100) 
 | 
	pandas.io.formats.style_render._get_level_lengths 
 | 
					
	import monkey as mk
import numpy as np
kf= mk.read_csv('../Datos/Premios2020.csv',encoding='ISO-8859-1')
# print(kf.ifnull().total_sum())
# moda = kf.release.mode()
# valores = {'release': moda[0]}
# kf.fillnone(value=valores, inplace=True)
moda = kf['release'].mode()
kf['release'] = kf['release'].replacing([np.nan], moda)
print( 
 | 
	mk.counts_value_num(kf['release']) 
 | 
	pandas.value_counts 
 | 
					
	# Copyright 2019 The Feast Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a clone of the License at
#
#     https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from typing import Any, Dict, Union
import numpy as np
import monkey as mk
from google.protobuf.json_formating import MessageToDict
from feast.protos.feast.types.Value_pb2 import (
    BoolList,
    BytesList,
    DoubleList,
    FloatList,
    Int32List,
    Int64List,
    StringList,
)
from feast.protos.feast.types.Value_pb2 import Value as ProtoValue
from feast.value_type import ValueType
def feast_value_type_to_python_type(field_value_proto: ProtoValue) -> Any:
    """
    Converts field value Proto to Dict and returns each field's Feast Value Type value
    in their respective Python value.
    Args:
        field_value_proto: Field value Proto
    Returns:
        Python native type representation/version of the given field_value_proto
    """
    field_value_dict = MessageToDict(field_value_proto)
    for k, v in field_value_dict.items():
        if k == "int64Val":
            return int(v)
        if k == "bytesVal":
            return bytes(v)
        if (k == "int64ListVal") or (k == "int32ListVal"):
            return [int(item) for item in v["val"]]
        if (k == "floatListVal") or (k == "doubleListVal"):
            return [float(item) for item in v["val"]]
        if k == "stringListVal":
            return [str(item) for item in v["val"]]
        if k == "bytesListVal":
            return [bytes(item) for item in v["val"]]
        if k == "boolListVal":
            return [bool(item) for item in v["val"]]
        if k in ["int32Val", "floatVal", "doubleVal", "stringVal", "boolVal"]:
            return v
        else:
            raise TypeError(
                f"Casting to Python native type for type {k} failed. "
                f"Type {k} not found"
            )
def python_type_to_feast_value_type(
    name: str, value, recurse: bool = True
) -> ValueType:
    """
    Finds the equivalengtht Feast Value Type for a Python value. Both native
    and Monkey types are supported. This function will recursively look
    for nested types when arrays are detected. All types must be homogenous.
    Args:
        name: Name of the value or field
        value: Value that will be inspected
        recurse: Whether to recursively look for nested types in arrays
    Returns:
        Feast Value Type
    """
    type_name = type(value).__name__
    type_mapping = {
        "int": ValueType.INT64,
        "str": ValueType.STRING,
        "float": ValueType.DOUBLE,
        "bytes": ValueType.BYTES,
        "float64": ValueType.DOUBLE,
        "float32": ValueType.FLOAT,
        "int64": ValueType.INT64,
        "uint64": ValueType.INT64,
        "int32": ValueType.INT32,
        "uint32": ValueType.INT32,
        "uint8": ValueType.INT32,
        "int8": ValueType.INT32,
        "bool": ValueType.BOOL,
        "timedelta": ValueType.UNIX_TIMESTAMP,
        "datetime64[ns]": ValueType.UNIX_TIMESTAMP,
        "datetime64[ns, tz]": ValueType.UNIX_TIMESTAMP,
        "category": ValueType.STRING,
    }
    if type_name in type_mapping:
        return type_mapping[type_name]
    if type_name == "ndarray" or incontainstance(value, list):
        if recurse:
            # Convert to list type
            list_items =  
 | 
	mk.core.collections.Collections(value) 
 | 
	pandas.core.series.Series 
 | 
					
	import numpy as np
#import matplotlib.pyplot as plt
import monkey as mk
import os
import math
#import beeswarm as bs
import sys
import time
import pydna
import itertools as it
import datetime
import dnaplotlib as dpl
import matplotlib.pyplot as plt
import matplotlib.transforms as mtransforms
import matplotlib.patches as mpatch
from matplotlib.patches import FancyBboxPatch
from pydna.dseq import Dseq
from pydna.dseqrecord import Dseqrecord
from pydna.assembly import Assembly as pydAssembly
from Bio.Restriction import BsaI
from Bio.Restriction import BbsI
from Bio.Restriction import AarI
from Bio.Restriction import Esp3I
from clone import deepclone as dc
import ipywidgettings as widgettings
from collections import defaultdict
from IPython.display import FileLink, FileLinks
import warnings
import re
def incrementString(s):
    """regular expression search! I forgetting exactly why this is needed"""
    m = re.search(r'\d+$', s)
    if(m):
        return s[:m.start()]+str(int(m.group())+1)
    else:
        return s+str(0)
#the following makes a few data members for handling restriction enzymes
enzymelist = [BsaI,BbsI,AarI,Esp3I]
enzymes = {str(a):a for a in enzymelist}
enlist = [str(a) for a in enzymelist]+["gibson"]
#the following defines the overhangs in our library!
ENDDICT = { \
"GGAG":"A", \
"TACT":"B", \
"AATG":"C", \
"AGGT":"D", \
"GCTT":"E", \
"CGCT":"F", \
"TGCC":"G", \
"ACTA":"H", \
"TAGA":"sc3",\
"CATTACTCGCATCCATTCTCAGGCTGTCTCGTCTCGTCTC" : "1",\
"GCTGGGAGTTCGTAGACGGAAACAAACGCAGAATCCAAGC" : "2",\
"GCACTGAAGGTCCTCAATCGCACTGGAAACATCAAGGTCG" : "3",\
"CTGACCTCCTGCCAGCAATAGTAAGACAACACGCAAAGTC" : "4",\
"GAGCCAACTCCCTTTACAACCTCACTCAAGTCCGTTAGAG" : "5",\
"CTCGTTCGCTGCCACCTAAGAATACTCTACGGTCACATAC" : "6",\
"CAAGACGCTGGCTCTGACATTTCCGCTACTGAACTACTCG" : "7",\
"CCTCGTCTCAACCAAAGCAATCAACCCATCAACCACCTGG" : "8",\
"GTTCCTTATCATCTGGCGAATCGGACCCACAAGAGCACTG" : "9",\
"CCAGGATACATAGATTACCACAACTCCGAGCCCTTCCACC" : "X",\
}
#have a dictionary of the reverse complement too
rcENDDICT = {str(Dseq(a).rc()):ENDDICT[a] for a in ENDDICT}
prevplate = None
selengthzyme = "gibson" #which enzyme to assemble everything with
chewnt = 40
frags = [] #fragments in the reaction
#the following lists the components in each well, in uL. I think this is outdated
#as of 4/25/19
gga = \
[["component","volume"],
 #["buffer10x",0.4],
 #["ATP10mM",0.4],
 #["BsaI", 0.2],
 #["ligase",0.2],
 ["NEBbuffer",0.4],
 ["NEBenzyme",0.2],
 ["water",1.4],
 ["dnasln",1],
 ]
gibassy = \
[["component","volume"],
["GGAMM",1],
["dnasln",1]]
ggaPD = mk.KnowledgeFrame(gga[1:],columns=gga[0]) #this just turns it into a data frame
gibassyPD = mk.KnowledgeFrame(gibassy[1:],columns=gibassy[0])
ggaFm = 6.0
ggavecGm = 6.0
gibFm = 6.0
gibvecFm = 6.0
partsFm = ggaFm  #default is gga
vectorFm = ggavecGm
source = "384PP_AQ_BP"
ptypedict = {
            "ASSGGA04":"384PP_PLUS_AQ_BP",
            "ASSGIB01":"384LDV_PLUS_AQ_BP",
            "ASSGIB02":"384PP_AQ_BP"}
waterwell = "P1" #in your source plate, include one well that is just full of water.
#dnaPath = os.path.join(".","DNA")
#go down and look at makeEchoFile
def startText():
    print("Welcome to Moclo Assembly Helper V1")
    print("===================================")
def pickEnzyme():
    """asks the user about what kind of enzyme s/he wants to use"""
    print("Which enzyme would you like to use?")
    for el in range(length(enlist)):
                print("[{}]  {}".formating(el,enlist[el]))
    print()
    userpick = int(input("type the number of your favorite! "))
    selengthzyme = enlist[userpick].lower()
    print("===================================")
    return selengthzyme
def findExpts(path):
    """gettings a list of files/folders present in a path"""
    walkr = os.walk(path)
    dirlist = [a for a in walkr]
    expts = []
    #print(dirlist)
    #for folder in dirlist[1:]:
    folder = ['.']
    for fle in dirlist[0][2]:
        if(fle[-3:]=='csv'):
            try:
                fline = open(os.path.join(folder[0],fle),'r').readline().split(',')
                if("promoter" in fline):
                    expts+=[(os.path.join(folder[0],fle),fle[:-4])]
            except IOError:
                pass
        if(fle[-4:]=='xlsx'):
            try:
                xl_file = mk.read_excel(os.path.join(folder[0],fle),None)
                kfs = {sheet_name: xl_file.parse(sheet_name)
                          for sheet_name in xl_file.sheet_names}
                #print(kfs.keys()
                if(kfs["Sheet1"].columns[0] == "promoter"):
                    expts+=[(os.path.join(folder[0],fle),fle[:-5])]
            except (IOError,KeyError) as e:
                pass
    return sorted(expts)[::-1]
def findPartsLists(path):
    """gettings a list of files/folders present in a path"""
    walkr = os.walk(path)
    dirlist = [a for a in walkr]
    #print dirlist
    expts = []
    for fle in dirlist[0][2]:
        #print fle
        if(fle[-4:]=='xlsx'):
            try:
                xl_file = mk.read_excel(os.path.join(path,fle),None)
                kfs = {sheet_name: xl_file.parse(sheet_name)
                          for sheet_name in xl_file.sheet_names}
                #print(kfs.keys()
                if("parts" in list(kfs.keys())[0]):
                    expts+=[(os.path.join(path,fle),fle[:-4])]
            except IOError:
                pass
    return sorted(expts)[::-1]
def pickPartsList():
    """user interface for picking a list of parts to use. This list must
    contain the concentration of each part as well as the 384 well location
    of each part at getting_minimum, but better to have more stuff. Check my example
    file."""
    print("Searching for compatible parts lists...")
    pllist = findPartsLists(os.path.join(".","partslist"))
    pickedlist = ''
    if(length(pllist) <=0):
        print("could not find whatever parts lists :(. Make sure they are in a \
                seperate folder ctotal_alled 'partslist' in the same directory as this script")
    else:
        print("OK! I found")
        print()
        for el in range(length(pllist)):
            print("[{}]  {}".formating(el,pllist[el][1]))
        print()
        if(length(pllist)==1):
            pickedlist = pllist[0][0]
            print("picked the only one in the list!")
        else:
            userpick = int(input("type the number of your favorite! "))
            pickedlist = pllist[userpick][0]
    openlist = mk.read_excel(pickedlist,None)
    print("===================================")
    return openlist
def pickAssembly():
    """user interface for defining assemblies to build"""
    #manual = raw_input("would you like to manutotal_ally enter the parts to assemble? (y/n)")
    manual = "n"
    if(manual == "n"):
        print("searching for compatible input files...")
        time.sleep(1)
        pllist = findExpts(".")
        #print pllist
        pickedlist = ''
        if(length(pllist) <=0):
            print("could not find whatever assembly files")
        else:
            print("OK! I found")
            print()
            for el in range(length(pllist)):
                print("[{}]  {}".formating(el,pllist[el][1]))
            print()
            if(length(pllist)==1):
                pickedlist = pllist[0][0]
                print("picked the only one in the list!")
            else:
                userpick = int(input("type the number of your favorite! "))
                pickedlist = pllist[userpick][0]
        openlist = mk.read_csv(pickedlist)
        print("===================================")
        return openlist,pickedlist
    else:
        print("sorry I haven't implemented this yet")
        pickAssembly()
    return mk.read_csv(aslist),aslist
def echoline(swell,dwell,tvol,sptype = source,spname = "Source[1]",\
                                    dpname = "Destination[1]",platebc="",partid="",partname=""):
    #if(platebc!=""):
    #    sptype = ptypedict[platebc]
    return "{},{},{},{},{},{},,,{},{},{}\n".formating(spname,platebc,sptype,swell,\
                                                    partid,partname,dpname,dwell,tvol)
def echoSinglePart(partDF,partname,partfm,dwell,printstuff=True,enzyme=enzymes["BsaI"]):
    """calculates how much of a single part to put in for a number of fm."""
    try:
        pwell = partDF[partDF.part==partname].well.iloc[0]
    except IndexError:
        raise ValueError("Couldn't find the right part named '"+\
          partname+"'! Are you sure you're using the right parts list?")
        return None, None, None
    pDseq = makeDseqFromDF(partname,partDF,enzyme=enzyme)
    pconc = partDF[partDF.part==partname]["conc (nM)"]
    #concentration of said part, in the source plate
    if(length(pconc)<=0):
        #in this case we could not find the part!
        raise ValueError("Part "+part+" had an invalid concentration!"+\
                            " Are you sure you're using the right parts list?")
    pconc = pconc.iloc[0]
    pplate = partDF[partDF.part==partname]["platebc"].iloc[0]
    platet = partDF[partDF.part==partname]["platetype"].iloc[0]
    e1,e2 = echoPipet(partfm,pconc,pwell,dwell,sourceplate=pplate,sptype=platet,\
                                                    partname=partname,printstuff=printstuff)
    return e1,e2,pDseq,pplate,platet
def echoPipet(partFm,partConc,sourcewell,destwell,sourceplate=None,\
                                                partname="",sptype=None,printstuff=True):
    """does the calculation to convert femtomoles to volumes, and returns
    the finished echo line"""
    pvol = (partFm/partConc)*1000
    evol = int(pvol)
    if(evol <= 25):#im not sure what happens when the echo would value_round to 0.
                    #better safe than sorry and put in one siplet.
        evol = 25
    if(sourceplate==None):
        if(printstuff):
            print("===> transfer from {} to {}, {} nl".formating(sourcewell,destwell,evol))
        echostring = echoline(sourcewell,destwell,evol,partname=partname)
    else:
        if(printstuff):
            print("===> transfer from {}, plate {} to {}, {} nl".formating(sourcewell,sourceplate,destwell,evol))
        echostring = echoline(sourcewell,destwell,evol,spname =sourceplate,\
                            sptype= sptype,platebc = sourceplate,partname=partname)
    return echostring, evol
def makeDseqFromDF(part,partslist,col = "part",enzyme=enzymes["BsaI"]):
    """looks up the part named "part" in the column specified as col, and
    converts it into a pydna object.
    this program will check if an input sequence is a valid part.
    This involves checking a couple of things:
    1) are there only two restriction cut sites?
    2) does it have the proper overhangs?
    3) after being cut, does it produce one part with bsai sites and one part without?
    """
    pseq = partslist[partslist[col] == part].sequence.iloc[0].lower()
    pcirc = partslist[partslist[col] == part].circular.iloc[0]
    p5pover = int(partslist[partslist[col] == part]["5pend"].iloc[0])
    p3pover = int(partslist[partslist[col] == part]["3pend"].iloc[0])
    povhg = int(p5pover)
    pseqRC = str(Dseq(pseq).rc()).lower()
    if(p5pover > 0):
        pseq = pseq[p5pover:]
    elif(p5pover<0):
        pseqRC = pseqRC[:p5pover]
    if(p3pover <0):
        pseq = pseq[:p3pover]
    elif(p3pover >0):
        pseqRC = pseqRC[p5pover:]
    pDseq = Dseq(pseq,pseqRC,ovhg=povhg)
    #this defines a dsdna linear sequence
    if(pcirc):
        #this makes the sequence circular, if we have to
        pDseq = pDseq.looped()
    if(enzyme != None):
        numzymes = length(enzyme.search(pDseq,linear=not pcirc))##\
                        #length(enzyme.search(pDseq.rc(),linear=pcirc))
        if(numzymes < 2 and pcirc):
            warnings.warn("Be careful! sequence {} has only {} {} site"\
                            .formating(part,numzymes,str(enzyme)))
        elif(numzymes>=2):
            try:
                testcut = pDseq.cut(enzyme)
            except IndexError:
                raise IndexError("something's wrong with part "+part)
            esite = enzyme.site.lower()
            esiterc = str(Dseq(enzyme.site).rc()).lower()
            if(numzymes > 2):
                warnings.warn("{} has {} extra {} site{}!!"\
                            .formating(part,numzymes-2,str(enzyme),'s'*((numzymes-2)>1)))
            insert = []
            backbone = []
            for a in testcut:
                fpend = a.five_prime_end()
                tpend = a.three_prime_end()
                if((a.find(esite)>-1) or (a.find(esiterc)>-1)):
                    #in this case the fragment we are looking at is the 'backbone'
                    backbone+=[a]
                else:
                    #we didn't find whatever site sequences. this must be the insert!
                    insert+=[a]
                    if((not fpend[0]=='blunt') and \
                            (not ((fpend[1].upper() in ENDDICT) or \
                                (fpend[1].upper() in rcENDDICT)))):
                        warnings.warn("{} has non-standard overhang {}"\
                                            .formating(part,fpend[1].upper()))
                    if((not tpend[0]=='blunt') and \
                            (not ((tpend[1].upper() in ENDDICT) or \
                                (tpend[1].upper() in rcENDDICT)))):
                        warnings.warn("{} has non-standard overhang {}"\
                                            .formating(part,tpend[1].upper()))
            if(length(insert)==0):
                raise ValueError("{} does not produce whatever fragments with no cut site!".formating(part))
            if(length(insert)>1):
                warnings.warn("{} produces {} fragments with no cut site".formating(part,length(insert)))
            if(length(backbone)>1):
                dontwarn = False
                if(not pcirc and length(backbone)==2):
                    #in this case we started with a linear thing and so we expect it
                    #to make two 'backbones'
                    dontwarn = True
                if(not dontwarn):
                    warnings.warn("{} produces {} fragments with cut sites".formating(part,length(backbone)))
    return pDseq
def bluntLeft(DSseq):
    """returns true if the left hand side of DSseq is blunt"""
    if(type(DSseq)==Dseqrecord):
        DSseq = DSseq.seq
    isblunt = (DSseq.five_prime_end()[0]=='blunt')&DSseq.linear
    return(isblunt)
def bluntRight(DSseq):
    """returns true if the right hand side of DSseq is blunt"""
    if(type(DSseq)==Dseqrecord):
        DSseq = DSseq.seq
    isblunt = (DSseq.three_prime_end()[0]=='blunt')&DSseq.linear
    return(isblunt)
def isNewDseq(newpart,partlist):
    """checks to see if newpart is contained within partlist, returns true
    if it isn't"""
    new = True
    if(type(newpart)==Dseqrecord):
        newdseqpart = newpart.seq
    #seqnewpart = str(newpart).upper()
    newcirc = newpart.circular
    #dsequid = (newpart.seq).seguid()
    #print("dsequid is "+str(dsequid))
    #dsnewpart = Dseqrecord(newpart)
    #rcnewpart = newpart.rc()
    newseguid = newdseqpart.seguid()
    #print("newseguid is "+str(newseguid))
    cseguid = None
    if(newcirc and type(newpart)==Dseqrecord):
        cseguid = newpart.cseguid()
    for part in partlist:
        if(type(part == Dseqrecord)):
            dseqpart = part.seq
        partseguid = dseqpart.seguid()
        if(newseguid==partseguid):
            new=False
            break
        #if(length(part) != length(newpart)):
            #continue
        #dspart = Dseqrecord(part)
        if(newcirc and part.circular):
            if(type(part) == Dseqrecord and cseguid != None):
                comparid = part.cseguid()
                if(comparid == cseguid):
                    new=False
                    break
            #if(seqnewpart in (str(part.seq).upper()*3)):
            #    new=False
            #    break
            #elif(seqnewpart in (str(part.seq.rc()).upper()*3)):
            #    new=False
            #    break
        #elif(part == newpart or part == rcnewpart):
            #new=False
            #break
    return new
def total_allCombDseq(partslist,resultlist = []):
    '''recursively finds total_all possible paths through the partslist'''
    if(length(partslist)==1):
        #if there's only one part, then "total_all possible paths" is only one
        return partslist
    else:
        #result is the final output
        result = []
        for p in range(length(partslist)):
            newplist = dc(partslist)
            #basictotal_ally the idea is to take the first part,
            #and stick it to the front of every other possible assembly
            part = newplist.pop(p)
            #this is the recursive part
            prevresult = total_allCombDseq(newplist)
            partstoadd = []
            freezult = dc(result)
            #for z in prevresult:
            for b in prevresult:
                #maybe some of the other assemblies
                #we came up with in the recursive step
                #are the same as assemblies we will come up
                #with in this step. For that reason we may
                #want to cull them by not adding them
                #to the "parts to add" list
                if(isNewDseq(b,freezult)):
                    partstoadd+=[b]
                #try to join the given part to everything else
                if((not bluntRight(part)) and (not bluntLeft(b)) and part.linear and b.linear):
                    #this averages we don't total_allow blunt ligations! We also don't total_allow
                    #ligations between a linear and a circular part. Makes sense right?
                    #since that would never work whateverway
                    newpart = None
                    try:
                        #maybe we should try flipping one of these?
                        newpart= part+b
                    except TypeError:
                        #this happens if the parts don't have the right sticky ends.
                        #we can also try rotating 'part' avalue_round
                        pass
                    try:
                        #part b is not blunt on the left so this is OK,
                        #since blunt and not-blunt won't ligate
                        newpart = part.rc()+b
                    except TypeError:
                        pass
                    if(newpart == None):
                        #if the part is still None then it won't ligate forwards
                        #or backwards. Skip!
                        continue
                    try:
                        if((not bluntRight(newpart)) and (not bluntLeft(newpart))):
                            #given that the part assembled, can it be circularized?
                            newpart = newpart.looped()
                            #this thing will return TypeError if it can't be
                            #looped
                    except TypeError:
                        #this happens if the part can't be circularized
                        pass
                    if(isNewDseq(newpart,result)):
                        #this checks if the sequence we just made
                        #already exists. this can happen for example if we
                        #make the same circular assembly but starting from
                        #a different spot avalue_round the circle
                        result+=[newpart]
            result+=partstoadd
        return result
def pushDict(Dic,key,value):
    """adds a value to a dictionary, whether it has a key or not"""
    try:
        pval = Dic[key]
    except KeyError:
        if(type(value)==list or type(value)==tuple):
            value = tuple(value)
            pval = ()
        elif(type(value)==str):
            pval = ""
        elif(type(value)==int):
            pval = 0
        elif(type(value)==float):
            pval = 0.0
    Dic[key] =pval + value
def findFilesDict(path=".",teststr = "promoter"):
    """gettings a list of files/folders present in a path"""
    walkr = os.walk(path)
    dirlist = [a for a in walkr]
    expts = {}
    #print(dirlist)
    #for folder in dirlist[1:]:
    folder = [path]
    #print(dirlist)
    for fle in dirlist[0][2]:
        if(fle[-3:]=='csv'):
            try:
                #print('{}\\{}'.formating(folder[0],fle))
                fline = open(os.path.join(folder[0],fle),'r').readline().split(',')
                if(teststr in fline):
                    expts[fle[:-4]]=os.path.join(folder[0],fle)
            except IOError:
                pass
        if(fle[-4:]=='xlsx'):
            try:
                xl_file = mk.read_excel(os.path.join(folder[0],fle))
                #kfs = {sheet_name: xl_file.parse(sheet_name)
                #          for sheet_name in xl_file.sheet_names}
                #print(kfs.keys()
                #print(xl_file.columns)
                if(teststr in xl_file.columns):
                    #print("found")
                    expts[fle[:-5]]=os.path.join(folder[0],fle)
            except (IOError,KeyError) as e:
                pass
    return expts
def findPartsListsDict(path,teststr = "parts_1"):
    """gettings a list of files/folders present in a path"""
    walkr = os.walk(path)
    dirlist = [a for a in walkr]
    #print(dirlist[0][2])
    expts = {}
    for fle in dirlist[0][2]:
        #print fle
        if((fle[-4:]=='xlsx') or (fle[-4:]=='xlsm')):
            try:
                kfs = mk.read_excel(os.path.join(path,fle),None)
                #kfs = {sheet_name: xl_file.parse(sheet_name)
                #          for sheet_name in xl_file.sheet_names}
                #print(kfs)
                #print(kfs.keys())
                if(teststr in list(kfs.keys())[0]):
                    expts[fle[:-5]] = os.path.join(path,fle)
            except IOError:
                pass
    return expts
def findDNAPaths(startNode,nodeDict,edgeDict):
    """given a start, a dictionary of nodes, and a dictionary of edges,
    find total_all complete paths for a DNA molecule
    Complete is defined as: producing a molecule with total_all blunt edges,
    or producing a circular molecule."""
    #we assemble the DNA sequences from left to right.
    nnode = dc(nodeDict)
    noderight = nnode[startNode][1] #the right-hand overhang of the node in question.
    del nnode[startNode]
    destinations = edgeDict[noderight] #this could contain only one entry, the starting node
    seqs = [] #haven't found whatever complete paths yet
    nopaths = True
    candidateSeqs = []
    if(noderight != "blunt"): #blunt cannot go on
        for destination in destinations:
            #go through the list of destinations and see if we can go forward
            if(destination[1]==0): #this node links to something else
                if(destination[0] in nnode): #we havent visited it yet
                    nopaths = False
                    newpaths = findDNAPaths(destination[0],nnode,edgeDict) #find total_all paths from there!
                    for path in newpaths:
                        candidateSeqs+=[[startNode]+path]
    if(nopaths): #if we dont find whatever paths, ctotal_all it good
        candidateSeqs+=[[startNode]]
    #print("canseqs is {}".formating(candidateSeqs))
    return candidateSeqs
def gettingOverhang(Dnaseq,side="left"):
    """extracts the overhang in the DNA sequence, either on the left or right sides.
    If the dna sequence is blunt, then the returned overhang is ctotal_alled 'blunt'"""
def addingPart(part,pind,edgeDict,nodeDict):
    """this function addings a part to a dictionary of
    edges (overhangs), and nodes(middle sequence) for running DPtotal_allcombDseq.
    part is a DseqRecord of a DNA part that's been cut by an enzyme.
    pind is the index of that part in the parts list
    edgedict is a dictionary of edges that says which nodes they are connected
    to.
    nodedict is a dictionary of nodes that says which edges they have."""
    Lend = ""
    Rend = ""
    Ltype,Lseq = part.five_prime_end()
    Rtype,Rseq = part.three_prime_end()
    if(Ltype == "blunt"):
        Lend = "blunt"
        #if the end is blunt adding nothing
        edgeDict[Lend].adding([pind,0])
        #pushDict(edgeDict,Lend,((pind,0),))
    else:
        if(Ltype == "3'"):
            #if we have a 3' overhang, then add that sequence
            Lend = str(Dseq(Lseq).rc()).lower()
        else:
            #otherwise, it must be a 5' overhang since we handled the
            #blunt condition above.
            Lend = str(Lseq).lower()
        edgeDict[Lend].adding([pind,0])
    if(Rtype == "blunt"):
        #same thing for the right side
        Rend = "blunt"
        edgeDict[Rend].adding([pind,1])
    else:
        if(Rtype == "5'"):
            Rend = str(Dseq(Rseq).rc()).lower()
        else:
            Rend = str(Rseq).lower()
        edgeDict[Rend].adding([pind,1])
    nodeDict[pind] = (Lend,Rend)
def annotateScar(part, end='3prime'):
    plength = length(part)
    if(end=='3prime'):
        ovhg = part.seq.three_prime_end()
        loc1 = plength-length(ovhg[1])
        loc2 = plength
    else:
        ovhg = part.seq.five_prime_end()
        loc1 = 0
        loc2 = length(ovhg[1])
    oseq = str(ovhg[1]).upper()
    scarname = "?"
    floc = int(loc1)
    sloc = int(loc2)
    dir = 1
    #scardir = "fwd"
    if((oseq in ENDDICT.keys()) or (oseq in rcENDDICT.keys())):
        #either direction for now...
        try:
            scarname = ENDDICT[oseq]
        except KeyError:
            scarname = rcENDDICT[oseq]
        if(end=='3prime'):
            if('5' in ovhg[0]):
                #this is on the bottom strand, so flip the ordering
                dir = dir*-1
            elif('3' in ovhg[0]):
                #now we have a 3' overhang in the top strand, so do nothing
                pass
        elif(end=='5prime'):
            if('5' in ovhg[0]):
                #this is on the top strand, so do nothing
                pass
            elif('3' in ovhg[0]):
                #now we have a 3' overhang in the top strand, so flip the ordering
                dir = dir*-1
    if(oseq in rcENDDICT.keys()):
        #so if we found the reverse complement in fact, then reverse everything
        #again
        dir = dir*-1
    if(dir==-1):
        floc = int(loc2)
        sloc = int(loc1)
    #oseq = str(Dseq(oseq).rc())
    part.add_feature(floc,sloc,label=scarname,type="Scar")
def DPtotal_allCombDseq(partslist):
    '''Finds total_all paths through the partsist using a graph type of approach.
    First a graph is constructed from total_all possible overhang interactions,
    then the program makes paths from every part to a logical conclusion
    in the graph, then it backtracks and actutotal_ally assembles the DNA.'''
    #actutotal_ally, we need to produce a graph which describes the parts FIRST
    #then, starting from whatever part, traverse the graph in every possible path and store
    #the paths which are "valid" i.e., produce blunt ended or circular products.
    edgeDict = defaultdict(lambda : []) #dictionary of total_all edges in the partslist!
    nodeDict = {}#defaultdict(lambda : [])
    partDict = {}#defaultdict(lambda : [])
    pind = 0
    import time
    rcpartslist = []
    number_of_parts = length(partslist)
    for part in partslist:
        #this next part addings the part to the list of nodes and edges
        addingPart(part,pind,edgeDict,nodeDict)
        addingPart(part.rc(),pind+number_of_parts,edgeDict,nodeDict)
        rcpartslist+=[part.rc()]
        pind+=1
    partslist+=rcpartslist
    paths = []
    for pind in list(nodeDict.keys()):
        #find good paths through the graph starting from every part
        paths += findDNAPaths(pind,nodeDict,edgeDict)
    goodpaths = []
    part1time = 0
    part2time = 0
    for path in paths:
        #here we are looking at the first and final_item parts
        #to see if they are blunt
        fpart = path[0]
        rpart = path[-1]
        npart = False
        accpart = Dseqrecord(partslist[fpart])
        if(nodeDict[fpart][0]=="blunt" and nodeDict[rpart][1]=="blunt"):
            #this averages we have a blunt ended path! good
            npart = True
            plength = length(accpart)
            #accpart.add_feature(0,3,label="?",type="scar")
            #accpart.add_feature(plength-4,plength,label="?",type="scar")
            for pind in path[1:]:
                #this traces back the path
                #we want to add features as we go representing the cloning
                #scars. These scars could be gibson or golden gate in nature
                #SCARANNOT
                '''
                ovhg = accpart.seq.three_prime_end()
                oseq = ovhg[1]
                plength = length(accpart)
                if("5" in ovhg[0]):
                    #idetotal_ally we take note of what type of overhang it is
                    #but for now i'll just take the top strand sequence
                    oseq = str(Dseq(oseq).rc())
                accpart.add_feature(plength-length(oseq),plength,label="?",type="scar")
                #/scarannot'''
                annotateScar(accpart)
                accpart+=partslist[pind]
        elif(nodeDict[fpart][0]==nodeDict[rpart][1]):
            #this is checking if the overhangs on the ends are compatible.
            #if true, then create a circular piece of DNA!
            npart = True
            #this averages we have a circular part! also good!
            #accpart = partslist[fpart]
            for pind in path[1:]:
                #SCARANNOT
                '''
                ovhg = accpart.seq.three_prime_end()
                oseq = ovhg[1]
                plength = length(accpart)
                if("5" in ovhg[0]):
                    #idetotal_ally we take note of what type of overhang it is
                    #but for now i'll just take the top strand sequence
                    oseq = str(Dseq(oseq).rc())
                accpart.add_feature(plength-length(oseq),plength,label="?",type="scar")
                #/scarannot'''
                annotateScar(accpart)
                accpart+=partslist[pind]
            #SCARANNOT
            '''
            ovhg = accpart.seq.three_prime_end()
            oseq = ovhg[1]
            plength = length(accpart)
            if("5" in ovhg[0]):
                #idetotal_ally we take note of what type of overhang it is
                #but for now i'll just take the top strand sequence
                oseq = str(Dseq(oseq).rc())
            accpart.add_feature(plength-length(oseq),plength,label="?",type="scar")
            #/scarannot'''
            annotateScar(accpart)
            accpart=accpart.looped()
        if(npart):
            #this checks if the part we think is good already exists
            #in the list
            if(isNewDseq(accpart,goodpaths)):
                goodpaths+=[accpart]
        #part2time+=time.time()-stime
    #dtime = time.time()-stime
    #stime = time.time()
    #print("done tracing back paths, took "+str(dtime))
    #print("first half took " + str(part1time))
    #print("second half took " + str(part2time))
    return goodpaths
def chewback(seqtochew,chewamt,end="fiveprime"):
    """chews back the amount mentioned, from the end mentioned."""
    wat = seqtochew.watson
    cri = seqtochew.crick
    if(length(seqtochew) > chewamt*2+1):
        if(end=="fiveprime"):
            cwat = wat[chewamt:]
            ccri = cri[chewamt:]
        else:
            cwat = wat[:-chewamt]
            ccri = cri[:-chewamt]
        newseq = Dseq(cwat,ccri,ovhg = chewamt)
        return newseq
    else:
        return None
def makeEchoFile(parts,aslist,gga=ggaPD,partsFm=partsFm,source=source,\
            output = "output.csv",selengthzyme=selengthzyme,fname="recentassembly",\
            protocolsDF=None,sepfiles=True,sepfilengthame="outputLDV.csv",\
            printstuff=True,progbar=None,mypath=".",annotateDF=None):
    """makes an echo csv using the given list of assemblies and source plate of
    parts..
    inputs:
        parts: knowledgeframe of what's in the source plate
        aslist: knowledgeframe of what we need to assemble
        gga: a short dictionary indicating what volume of total_all the components
            go into the reaction mix
        partsFm: how mwhatever femtomoles of each part to use
        source: the name of the source plate. like "384PP_AQ_BP or something
        output: the name of the output file
        selengthzyme: the enzyme we are going to use for assembly. everything
            is assembled with the same enzyme! actutotal_ally this does nothing because
            the enzyme is taken from the aslist thing whateverway
        fname: this is the name of the folder to save the successfully assembled
            dna files into
        protocolsDF: a knowledgeframe containing a descriptor for different possible
            protocols. For instance it would say how much DNA volume and
            concentration we need for GGA or gibson."""
    #this is the boilerplate columns list
    dnaPath = os.path.join(mypath,"DNA")
    outfile = "Source Plate Name,Source Plate Barcode,Source Plate Type,Source Well,\
    Sample ID,Sample Name,Sample Group,Sample Comment,Destination Plate Name,\
    Destination Well,Transfer Volume\n"
    f1init = length(outfile)
    outfile2 = "Source Plate Name,Source Plate Barcode,Source Plate Type,Source Well,\
    Sample ID,Sample Name,Sample Group,Sample Comment,Destination Plate Name,\
    Destination Well,Transfer Volume\n"
    f2init = length(outfile2)
    #this iterates through rows in the assembly list file. Each row
    #defines an assembly, with the columns representing what parts go in.
    #this may not be ideal but it's fairly human readable and we only do
    #four parts + vector for each assembly.
    _,fname = os.path.split(fname)
    if("." in fname):
        fname = fname[:fname.index(".")]
    #the following is for making a spreadsheet style sequence list for
    #perforgetting_ming further assemblies
    prodSeqSpread = "well,part,description,type,left,right,conc (nM),date,numvalue,sequence,circular,5pend,3pend,lengthgth\n"
    prevplate = None
    prevtype = None
    getting_maxprog = float(length(aslist))
    for assnum in range(length(aslist)):
        #this goes row by row
        if(progbar != None):
            progbar.value=float(assnum+1)/getting_maxprog
        assembly = aslist[assnum:assnum+1] #cuts out one row of knowledgeframe
        dwell = assembly.targwell[assembly.targwell.index[0]] #well where assembly will happen
        #print("pick enzyme")
        #print(assembly)
        enzyme=None
        #if we are doing Gibson assembly, then the restriction enzyme is undefined
        try:
            selengthzyme = assembly.enzyme[assembly.enzyme.index[0]]
            #if the user forgot to define an enzyme astotal_sume it is BsaI. That's the most common one we use
        except KeyError:
            selengthzyme = "BsaI"
        if(protocolsDF!=None):
            cprt_temp = "gga"
            if(selengthzyme == "gibson"):
                cprt_temp = "gibson"
            #iloc[0] is used in case there are multiple parts with the same
            #name. Only the first one is used in that case.
            curprot = {"dnasln": protocolsDF[(protocolsDF.protocol==cprt_temp)&\
                            (protocolsDF.component == "dnasln")].amount.iloc[0]}
            partsFm = curprot[curprot.component==partfm].amount.iloc[0]
            vectorFm = curprot[curprot.component==vectorfm].amount.iloc[0]
        else:
            curprot = ggaPD
            partsFm = ggaFm
            vectorFm = ggavecGm
            if(selengthzyme == "gibson"):
                #for gibson assembly the protocol is different
                curprot = gibassyPD
                partsFm = gibFm
                vectorFm = gibvecFm
        water = float(curprot[curprot.component=="dnasln"].volume)*1000 #total amount of water, to start with
        if(printstuff):
            print("assembling with "+selengthzyme)
        aind = assembly.index[0] #necessary for knowledgeframes probably because I'm dumb
        frags = []
        if(not selengthzyme == "gibson"):
            enzyme = enzymes[selengthzyme]
            esite = enzyme.site.lower()
            esiterc = str(Dseq(enzyme.site).rc()).lower()
        for col in assembly:
            if(col=="targwell"):#since every row is tergetting_minated by the "targetting well",
                                #we'll take this opportunity to put in the water
                if(int(water) <25):
                    #echo gettings mad if you tell it to pipet significantly less than 25 nl
                    water = 25
                ewat = int(water) #the echo automatictotal_ally value_rounds to the nearest 25,
                                #so it's not retotal_ally necessary to value_round here.
                #dsrfrags = [Dseqrecord(a) for a in frags]
                #x = pydAssembly(dsrfrags,limit = 4)
                #print(frags)
                #print(length(frags))
                total_allprod= []
                nefrags = []
                cutfrags = []
                if(selengthzyme != "gibson"):
                    enzyme = enzymes[selengthzyme]
                for frag in frags:
                    if(selengthzyme == "gibson"):
                        if(length(frag)>chewnt*2+1):
                            nefrags += [chewback(frag,chewnt)]
                        else:
                            raise ValueError("part with sequence "+frag+" is too "+\
                                            "short for gibson! (<= 80 nt)")
                    else:
                        newpcs = frag.cut(enzyme)
                        if(length(newpcs) == 0):
                            newpcs+=[frag]
                        for pcs in newpcs:
                            if(pcs.find(esite)+pcs.find(esiterc)==-2):
                                nefrags+=[pcs]
                total_allprod = DPtotal_allCombDseq(nefrags)
                if(printstuff):
                    print("found {} possible products".formating(length(total_allprod)))
                goodprod = []
                newpath = os.path.join(dnaPath,fname)
                if(printstuff):
                    print("saving in folder {}".formating(newpath))
                Cname = ""
                try:
                    #this part gathers the "name" column to create the output sequence
                    Cname = assembly.name[assembly.name.index[0]]
                except KeyError:
                    Cname = ""
                if(Cname == "" or str(Cname) == "nan"):
                    Cname = "well"+dwell
                if(printstuff):
                    print("Parts in construct {}".formating(Cname))
                if not os.path.exists(newpath):
                    if(printstuff):
                        print("made dirs!")
                    os.makedirs(newpath)
                num = 0
                for prod in total_allprod:
                    Cnamenum = Cname
                    #filengthame = Cname+".gbk"
                    if(length(total_allprod) > 1):
                        #filengthame = Cname+"_"+str(num)+".gbk"
                        #wout = open(os.path.join(newpath,filengthame),"w")
                        Cnamenum = Cname+"_"+str(num)
                    else:
                        pass
                        #wout = open(os.path.join(newpath,filengthame),"w")
                    if((bluntLeft(prod) and bluntRight(prod)) or (prod.circular)):
                        num+=1
                        goodprod+=[prod]
                        #topo = ["linear","circular"][int(prod.circular)]
                        booltopo = ["FALSE","TRUE"][int(prod.circular)]
                        #wout.write("\r\n>Construct"+str(num)+"_"+topo)
                        un_prod = "_".join(Cnamenum.split())
                        #wout.write("LOCUS       {}                {} bp ds-DNA     {} SYN 01-JAN-0001\n".formating(un_prod,length(prod),topo))
                        #wout.write("ORIGIN\n")
                        #wout.write(str(prod)+"\n//")
                        now = datetime.datetime.now()
                        nowdate = "{}/{}/{}".formating(now.month,now.day,now.year)
                        prod.name = Cnamenum
                        plt.figure(figsize=(8,1))
                        ax = plt.gca()
                        drawConstruct(ax,prod,annotateDF=annotateDF)
                        plt.show()
                        prod.write(os.path.join(newpath,Cnamenum+".gbk"))
                        prodSeqSpread += "{},{},assembled with {},,,,30,{},,{},{},{},{},{}\n".formating(\
                                        dwell,un_prod,          selengthzyme,nowdate,prod.seq,booltopo,0,0,length(prod))
                    #wout.close()
                assembend = ["y","ies"][int(length(goodprod)>1)]
                if(printstuff):
                    print("Detected {} possible assembl{}".formating(length(goodprod),assembend))
                frags = []
                if(water <=0):
                    print("WARNING!!!! water <=0 in well {}".formating(dwell))
                else:
                    #print("water from {} to {}, {} nl".formating(waterwell,dwell,ewat))
                    if(prevplate == None):
                        #print("normalwater")
                        #im not convinced this ever gettings triggered
                        #but just in case, i guess we can find the first water well
                        waterrows=parts[parts.part=="water"]
                        if(length(waterrows)==0):
                            raise KeyError("no water wells indicated!")
                        #print(waterrows)
                        waterrow = waterrows.iloc[0]
                        waterwell = waterrow.well
                        platetype= waterrow.platetype
                        curplatebc = waterrow.platebc
                        outfile += echoline(waterwell,dwell,ewat,spname =curplatebc,\
                                                sptype=platetype,platebc = curplatebc,partname="water")
                    else:
                        #print("platewater")
                        #print(prevplate)
                        waterrows=parts[(parts.part=="water") & (parts.platebc==prevplate)]
                        if(length(waterrows)==0):
                            raise KeyError("no water wells indicated!")
                        #print(waterrows)
                        waterrow = waterrows.iloc[0]
                        waterwell = waterrow.well
                        watline = echoline(waterwell,dwell,ewat,spname =prevplate,\
                                                sptype=prevtype,platebc = prevplate,partname="water")
                        if("LDV" in prevtype):
                            outfile2+=watline
                        else:
                            outfile += watline
                    #add water to the well!
                if(printstuff):
                    print("")
            elif(col in ["comment","enzyme","name"]):#skip this column!
                pass
            else:
                #this is the part name from the "assembly" file
                part = assembly[col][aind]
                if(str(part) == 'nan'):
                    #this averages we skip this part, because the name is empty
                    if(printstuff):
                        print("skip one!")
                else:
                    #shouldnt need to define "part" again??
                    #part = assembly[col][aind]
                    #this is the name of the part!
                    #parts[parts.part==assembly[col][aind]].well.iloc[0]
                    evol = 0
                    if(':' in str(part)):
                        #this averages we have multiple parts to mix!
                        subparts = part.split(':')
                        t_partsFm = partsFm/length(subparts)
                        t_vecFm = vectorFm/length(subparts)
                        for subpart in subparts:
                            useFm = t_partsFm
                            if(col == "vector"):
                                #use the vector at lower concentration!!
                                useFm = t_vecFm
                            e1,e2,pDseq,prevplate,prevtype = echoSinglePart(parts,\
                                    subpart,useFm,dwell,printstuff=printstuff,enzyme=enzyme)
                            frags+=[pDseq]
                            evol += e2
                            if(sepfiles):
                                if("LDV" in e1):
                                    outfile2+=e1
                                else:
                                    outfile+= e1
                            else:
                                outfile+= e1
                    else:
                        useFm = partsFm
                        if(col == "vector"):
                            #use the vector at lower concentration!!
                            useFm = vectorFm
                        e1,e2,pDseq,prevplate,prevtype = echoSinglePart(parts,\
                                    part,useFm,dwell,printstuff=printstuff,enzyme=enzyme)
                        frags+=[pDseq]
                        evol += e2
                        if(sepfiles):
                            if("LDV" in e1):
                                outfile2+=e1
                            else:
                                outfile+= e1
                        else:
                            outfile+= e1
                    water=water-evol
    pspread = open(os.path.join(newpath,fname+".csv"),"w")
    pspread.write(prodSeqSpread)
    pspread.close()
    seqdispDF = mk.read_csv(os.path.join(newpath,fname+".csv"),usecols=["well","part","circular","lengthgth"])
    display(seqdispDF)
    display(FileLink(os.path.join(newpath,fname+".csv")))
    if(length(outfile)>f1init):
        ofle = open(output,"w")
        ofle.write(outfile)
        ofle.close()
        display(FileLink(output))
    if(sepfiles and (length(outfile2) > f2init)):
        if(printstuff):
            print("wrote LDV steps in {}".formating(sepfilengthame))
        ofle2 = open(sepfilengthame,"w")
        ofle2.write(outfile2)
        ofle2.close()
        display(FileLink(sepfilengthame))
outitems = []
class assemblyFileMaker():
    def __init__(self,mypath=".",partskf = None):
        self.p = partskf
        self.holdup=False
        self.ddlay = widgettings.Layout(width='75px',height='30px')
        self.eblay = widgettings.Layout(width='50px',height='30px')
        self.lsblay = widgettings.Layout(width='140px',height='30px')
        self.sblay = widgettings.Layout(width='100px',height='30px')
        self.rsblay = widgettings.Layout(width='60px',height='30px')
        self.Vboxlay = widgettings.Layout(width='130px',height='67px')
        self.textlay = widgettings.Layout(width='200px',height='30px')
        self.PlateLetters="ABCDEFGHIJKLMNOP"
        self.PlateNumbers=(1,2,3,4,5,6,7,8,9,10,11,12,\
                                13,14,15,16,17,18,19,20,21,22,23,24)
        self.PlateRowsCols=(16,24)
        self.mypath = mypath
        if(type(self.p)==mk.KnowledgeFrame):
            self.parts={"google doc":"google doc"}
        else:
            self.parts = findPartsListsDict(os.path.join(self.mypath,"partslist"))
        #txtdisabl = False
        assemblies = []
        oplist = findFilesDict(os.path.join(mypath,"assemblies"))
        #parts = findPartsListsDict(os.path.join(mypath,"partslist"))
        self.loadFIleList = widgettings.Dromkown(
            options=oplist,
            #value=2,
            layout=self.lsblay,
            description='',
        )
        self.loadbut = widgettings.Button(
            description='Load',
            disabled=False,
            button_style='', # 'success', 'info', 'warning', 'danger' or ''
            layout=self.rsblay,
            tooltip='Click to load an existing file',
        )
        self.listEverything = widgettings.Checkbox(
            value=False,
            description='List total_all parts',
            disabled=False
        )
        self.fname1 = widgettings.Text(
            value="untitled",
            placeholder = "type something",
            description='Assembly File Name:',
            layout=self.textlay,
            disabled=False
        )
        self.DestWell = widgettings.Text(
            value="A1",
            placeholder = "type something",
            description='Dest Well:',
            layout=self.Vboxlay,
            disabled=True
        )
        self.AddCols = widgettings.IntText(
            value=0,
            placeholder = "type something",
            description='Extra Cols:',
            layout=self.Vboxlay,
            #disabled=True
        )
        self.sip2 = widgettings.Dromkown(
            options=self.parts,
            width=100,
            #value=2,
            description='parts list:',
            layout=self.textlay,
        )
        #print(self.sip2.style.keys)
        self.but = widgettings.Button(
            description='New...',
            disabled=False,
            button_style='', # 'success', 'info', 'warning', 'danger' or ''
            layout=self.sblay,
            tooltip='Click to start adding assemblies',
            #icon='check'
        )
        self.finbut = widgettings.Button(
            description='Save!',
            disabled=True,
            button_style='warning',#, 'danger' or ''
            layout=self.sblay,
            tooltip='Finish and Save',
            #icon='check'
        )
        self.but.on_click(self.on_button_clicked)
        self.finbut.on_click(self.finishAndSave)
        self.loadbut.on_click(self.loadFile_clicked)
        self.listEverything.observe(self.on_listEverything_changed,names='value')
        self.cbox = widgettings.HBox([
                    widgettings.VBox([self.fname1,widgettings.HBox([self.loadFIleList,self.loadbut]),self.listEverything]),\
                    widgettings.VBox([self.sip2,widgettings.HBox([self.DestWell,self.AddCols])]),\
                    widgettings.VBox([self.but,self.finbut],layout=self.Vboxlay)])
        display(self.cbox)
    def add_row(self,b):
        thisrow = int(b.tooltip[4:])
        self.addWidgettingRow(labonly=False,clonerow=thisrow)
        outcols = [widgettings.VBox(a) for a in self.outitems ]
        self.bigSheet.children=outcols
        #b.disabled=True
        #print(b)
    def remove_row(self,b):
        thisrow = int(b.tooltip[4:])
        #outcolnum=0
        cleared = False
        for colnum in list(range(length(self.outitems))[:-3])\
                                                    +[length(self.outitems)-2]:
            pvalue = self.outitems[colnum][thisrow].value
            if(pvalue != ""):
                cleared = True
            self.outitems[colnum][thisrow].value = ""
        if(cleared):
            return
        for colnum in range(length(self.outitems)):
            self.outitems[colnum]=self.outitems[colnum][:thisrow]+\
                        self.outitems[colnum][thisrow+1:]
            #outcolnum +=1
        newbutcol = []
        newrow = 0
        for a in self.outitems[-1]:
            #print(a)
            try:
                a.children[0].tooltip = "row "+str(newrow)
                a.children[1].tooltip = "row "+str(newrow)
                if(length(self.outitems[0])<=2):
                    a.children[1].disabled=True
                else:
                    a.children[1].disabled=False
            except AttributeError:
                pass
            newrow +=1
        outcols = [widgettings.VBox(a) for a in self.outitems ]
        self.bigSheet.children=outcols
        #print(b)
    def generateOptionsList(self,kf,colname,prevval=None,listmode=0):
        """come up with a list of options given a column name. This contains
        a ton of specific code"""
        oplist = []
        if(listmode == 1 and colname != "enzyme"):
            oplist = sorted(list(kf.part))+[""]
        else:
            if("vector" in colname):
                oplist = sorted(list(kf[(kf.type=="UNS")|\
                                        (kf.type=="vector")].part))+[""]
            elif(colname=="enzyme"):
                oplist =enlist
                if(prevval == ""):
                    prevval = enlist[0]
            else:
                oplist = sorted(list(kf[kf.type==colname].part))+[""]
        if(not (prevval in oplist)):
            oplist+=[prevval]
        return oplist,prevval
    def on_listEverything_changed(self,change):
        """this triggers when you change the value of "listEverything".
        Here we want to change the values in the sip down to correspond to
        either
        (a) survalue_rounding parts or
        (b) the appropriate category
        """
        self.umkatePartOptions(None)
        """
        typewewant = type(widgettings.Dromkown())
        #this averages we checked the box. Now change sip box's options
        for col in self.outitems:
            for item in col:
                if(type(item)==typewewant):
                    oplist,pval = self.generateOptionsList(self.p,\
                                        col[0].value,item.value,change['new'])
                    item.options=oplist
                    item.value=pval
        #"""
    def loadFile_clicked(self,b):
        """loads a file from memory, instead of making a brand new one!"""
        self.on_button_clicked(b,loadFile=self.loadFIleList.value)
    def on_button_clicked(self,b,loadFile=None):
        """start making the assembly! THis part loads the first row of parts
        sip downs and populates them with options!"""
        #txtdisabl = True
        b.disabled=True
        self.but.disabled = True
        self.sip2.disabled = True
        self.finbut.disabled = False
        self.DestWell.disabled = False
        self.AddCols.disabled = True
        self.loadFIleList.disabled=True
        self.loadbut.disabled=True
        if(loadFile!=None):
            #this should read the file
            self.fname1.value=os.path.splitext(os.path.split(loadFile)[1])[0]
            ftoload = mk.read_csv(loadFile).fillnone('')
            try:
                ftoload = ftoload.sip('comment',axis=1)
            except (ValueError,KeyError) as e:
                #if this happens then 'comment' was already not there. great!
                pass
            self.AddCols.value=length(ftoload.columns)-9
        if(not(type(self.p)==mk.KnowledgeFrame)):
            kfs = mk.read_excel(self.sip2.value,None)
            sheetlist = list(kfs.keys())
            self.p = mk.KnowledgeFrame.adding(kfs["parts_1"],kfs["Gibson"])
        self.collabels = ["vector1","promoter","UTR","CDS","Tergetting_minator","vector2","enzyme","name",""]
        if(self.AddCols.value>0):
            newclabeld = self.collabels
            for x in range(self.AddCols.value):
                newclabeld=newclabeld[:-4]+["newcol"+str(x+1)]+newclabeld[-4:]
            self.collabels = newclabeld
        self.outitems = []
        self.addWidgettingRow(labonly=True)
        if(loadFile==None):
            self.addWidgettingRow(labonly=False)
        else:
            #print(loadFile)
            findex = ftoload.index
            first = True
            for findex in ftoload.index:
                kfrow = ftoload.iloc[findex]
                currow = list(kfrow)
                if(first):
                    self.DestWell.value=kfrow.targwell
                    #extracols =
                    #startpos =
                    first=False
                currow = list(kfrow.sip(['targwell','name','enzyme']))\
                                +[kfrow.enzyme]+[kfrow["name"]]
                self.addWidgettingRow(labonly=False,clonerow=currow)
            #self.umkatePartOptions()
            #readindex = ftoload.index()
        outcols = [widgettings.VBox(a) for a in self.outitems ]
        self.bigSheet=widgettings.HBox(outcols)
        display(self.bigSheet)
    def umkatePartOptions(self,b=None):
        """umkate the options available to each sip down, according to what
        values are chosen in the other sip downs. For example, only total_allow
        parts which are compatible"""
        if(self.holdup):
            return
        self.holdup=True
        getting_maxcols = length(self.outitems)-3
        for colnum in range(getting_maxcols):
            for itemnum in range(length(self.outitems[colnum]))[1:]:
                curitem = self.outitems[colnum][itemnum]
                leftitem = 0
                rightitem = 0
                if(colnum == 0):
                    leftitem = getting_maxcols-1
                else:
                    leftitem = colnum-1
                if(colnum == getting_maxcols-1):
                    rightitem = 0
                else:
                    rightitem=colnum+1
                leftoverhang = ""
                rightoverhang = ""
                leftvalue = self.outitems[leftitem][itemnum].value
                rightvalue = self.outitems[rightitem][itemnum].value
                logiclist = np.array([True]*length(self.p))
                if(leftvalue!=""):
                    try:
                        leftoverhang=self.p[self.p.part == leftvalue].right.iloc[0]
                    except IndexError:
                        #this averages we didn't find the part!
                        raise ValueError("part {} has incorrect right overhang!".formating(leftvalue))
                    if((self.outitems[-3][itemnum].value!='gibson') \
                                                and ('UNS' in leftoverhang)):
                        pass
                    else:
                        logiclist &= (self.p.left==leftoverhang)
                    #print(leftoverhang)
                if(rightvalue!=""):
                    try:
                        rightoverhang=self.p[self.p.part == rightvalue].left.iloc[0]
                    except IndexError:
                        raise ValueError("part {} has incorrect right overhang!".formating(rightvalue))
                    if((self.outitems[-3][itemnum].value!='gibson') \
                                                and ('UNS' in rightoverhang)):
                        pass
                    else:
                        logiclist &= (self.p.right==rightoverhang)
                    #print(rightoverhang)
                #print("this part wants {} and {}".formating(leftoverhang,rightoverhang))
                self.holdup=True
                prevval = curitem.value
                oplist,newval = self.generateOptionsList(self.p[logiclist],\
                                self.outitems[colnum][0].value,\
                                prevval,self.listEverything.value)
                curitem.options = oplist
                curitem.value = newval
        self.holdup=False
    def incrementWellPos(self,position):
        """increments a 384 well plate location such as A1 to the next logical
        position, going left to right, top to bottom"""
        poslet = self.PlateLetters.index(position[0])
        posnum = int(position[1:])
        newposlet = poslet
        newposnum = posnum+1
        if(newposnum > self.PlateRowsCols[1]):
            newposnum-=self.PlateRowsCols[1]
            newposlet+=1
        newposition = self.PlateLetters[newposlet]+str(newposnum)
        return newposition
    def finishAndSave(self,b):
        outfiletext = ",".join(self.collabels[:-1]+["targwell"])+"\n"
        outfname = self.fname1.value+".csv"
        startPos = self.DestWell.value
        curpos = startPos
        for i in range(length(self.outitems[0]))[1:]:
            outlst = []
            for nam,col in zip(self.collabels,self.outitems):
                if(nam != ""):
                    outlst+=[col[i].value]
            outlst+=[curpos]
            curpos = self.incrementWellPos(curpos)
            outfiletext+=",".join(outlst)+"\n"
        with open(os.path.join(self.mypath,"assemblies",outfname),"w") as outfle:
            outfle.write(outfiletext)
        assemfpath = os.path.join(self.mypath,"assemblies",outfname)
        #print("wrote {}".formating())
        display(FileLink(assemfpath))
        display(mk.read_csv(os.path.join(self.mypath,"assemblies",outfname)))
        #b.disabled=True
    def addWidgettingRow(self,labonly=True,clonerow=None):
        outcolnum=0
        for col in self.collabels:
            if(labonly):
                interwidg = widgettings.Label(col)
            else:
                if(col=="name"):
                    newname = ""
                    #print(clonerow)
                    if(type(clonerow)==list):
                        newname = clonerow[outcolnum]
                    elif(type(clonerow)==int):
                        oldname = self.outitems[outcolnum][clonerow].value
                        newname = incrementString(oldname)
                    interwidg = widgettings.Text(\
                            layout=self.ddlay,\
                            value=str(newname))
                elif(col==""):
                    but1 = widgettings.Button(\
                        description='+',
                        button_style='success',
                        tooltip='row '+str(length(self.outitems[0])-1),
                        layout=self.eblay
                    )
                    but2 = widgettings.Button(\
                        description='-',
                        button_style='danger',
                        tooltip='row '+str(length(self.outitems[0])-1),
                        layout=self.eblay,
                        #disabled=disbut
                    )
                    but1.on_click(self.add_row)
                    but2.on_click(self.remove_row)
                    interwidg =widgettings.HBox([but1,but2])
                else:
                    oplist = []
                    prevval = ""
                    if(type(clonerow)==int):
                        prevval = self.outitems[outcolnum][clonerow].value
                    elif(type(clonerow)==list):
                        prevval = clonerow[outcolnum]
                    oplist, prevval = self.generateOptionsList(self.p,col,\
                                            prevval,self.listEverything.value)
                    #print(oplist)
                    #print("value is")
                    #print(prevval)
                    interwidg = widgettings.Dromkown(\
                            options=oplist,\
                            value=prevval,\
                            layout=self.ddlay)
                    interwidg.observe(self.umkatePartOptions,names='value')
            try:
                self.outitems[outcolnum]+=[interwidg]
            except IndexError:
                self.outitems+=[[interwidg]]
            outcolnum +=1
        self.umkatePartOptions()
        for a in self.outitems[-1]:
            try:
                if(length(self.outitems[0])<=2):
                    a.children[1].disabled=True
                else:
                    a.children[1].disabled=False
            except AttributeError:
                pass
def make_assembly_file(mypath=".",externalDF = None):
    """this function will assist the user with making assembly .csv files!"""
    x=assemblyFileMaker(mypath=mypath,partskf=externalDF)
def process_assembly_file(mypath=".",printstuff=True,partskf=None,annotateDF=None):
    oplist = findFilesDict(os.path.join(mypath,"assemblies"))
    if(type(partskf)==mk.KnowledgeFrame):
        parts = {"google doc":"google doc"}
    else:
        parts = findPartsListsDict(os.path.join(mypath,"partslist"))
    sip1 = widgettings.Dromkown(
        options=oplist,
        #value=2,
        description='Assembly:',
    )
    sip2 = widgettings.Dromkown(
        options=parts,
        #value=2,
        description='parts list:',
    )
    but = widgettings.Button(
        description='Select',
        disabled=False,
        button_style='', # 'success', 'info', 'warning', 'danger' or ''
        tooltip='Click me',
        #icon='check'
    )
    #button = widgettings.Button(description="Click Me!")
    #display(button)
    #print(oplist)
    def on_button_clicked(b):
        pbar = widgettings.FloatProgress(
            getting_min=0,
            getting_max=1.0
        )
        display(pbar)
        if(sip1.value[-4:]=="xlsx" or sip1.value[-3:]=="xls"):
            x=mk.read_excel(sip1.value)
        else:
            x=mk.read_csv(sip1.value)
        if(type(partskf)==mk.KnowledgeFrame):
            p = partskf
        else:
            kfs = mk.read_excel(sip2.value,None)
            #print(sip1.value)
            sheetlist = list(kfs.keys())
            p = mk.KnowledgeFrame.adding(kfs["parts_1"],kfs["Gibson"])
        makeEchoFile(p,x,fname = sip1.value, \
                    output = os.path.join(mypath,"output","output.csv"),\
                    sepfilengthame=os.path.join(mypath,"output","outputLDV.csv"),\
                    printstuff=printstuff,progbar=pbar,mypath=mypath,annotateDF=annotateDF)
        #print(sip1.value+" and "+sip2.value)
    but.on_click(on_button_clicked)
    cbox = widgettings.HBox([sip1,sip2,but])
    display(cbox)
#def fixPart(partseq,enz="BsaI",circ=True,end5p=0,end3p=0,goodends=ENDDICT):
def drawConstruct(ax,construct,dnaline=3,dnascale=2,annotateDF=None,schematic=True,labels='off',showscars=0):
    """creates a dnaplotlib image of a construct in dnaseqrecord formating!"""
    def substring_indexes(substring, string):
        """
        Generate indices of where substring begins in string
        >>> list(find_substring('me', "The cat says meow, meow"))
        [13, 19]
        """
        final_item_found = -1  # Begin at -1 so the next position to search from is 0
        while True:
            # Find next index of substring, by starting after its final_item known position
            final_item_found = string.find(substring, final_item_found + 1)
            if final_item_found == -1:
                break  # All occurrences have been found
            yield final_item_found
    dr = dpl.DNARenderer(scale = dnascale,linewidth=dnaline)
    part_renderers = dr.SBOL_part_renderers()
    conlist = []
    if(type(annotateDF)==mk.KnowledgeFrame):
        str_conseq = str(construct.seq).lower()
        #print("annotating!")
        #now we annotate the plasmid!!
        for feature_index in annotateDF.index:
            fname = annotateDF.iloc[feature_index]["name"]
            #iterate through total_all the features and see if they are in our sequence
            #but the problem is that it could be circular
            featseq = annotateDF.iloc[feature_index].sequence.lower()
            colorstr = annotateDF.iloc[feature_index].colorlist
            colorstr2 = annotateDF.iloc[feature_index].colorlist2
            #print(featcolor)
            feattype = annotateDF.iloc[feature_index].type
            featlength = length(featseq)
            #print(featcolor)
            if(featseq[-3:]=="..."):
                featseq=featseq[:-3]
            rcfeatseq = str(Dseq(featseq).rc()).lower()
            #if(feattype == 'CDS'):
                #print(featseq[:10]+"..."+featseq[-10:])
            if(featseq in str_conseq):
                #it could be in there multiple times
                for featfound in substring_indexes(featseq,str_conseq):
                    #every time we find the feature...
                    construct.add_feature(featfound,featfound+featlength,seq=None,type=feattype,label=fname,strand=1 )
                    construct.features[-1].qualifiers["color"]=colorstr
                    construct.features[-1].qualifiers["color2"]=colorstr2
            if(rcfeatseq in str_conseq):
                for featfound in substring_indexes(rcfeatseq,str_conseq):
                    #every time we find the feature...
                    construct.add_feature(featfound,featfound+featlength,seq=None,type=feattype,label=fname ,strand=-1)
                    construct.features[-1].qualifiers["color"]=colorstr
                    construct.features[-1].qualifiers["color2"]=colorstr2
    if(schematic==False):
        seqlength = length(construct)
        sp = {'type':'EmptySpace', 'name':'base', 'fwd':True, \
                                            'opts':{'x_extent':seqlength+10}}
        design = [sp]
        start,end = dr.renderDNA(ax,design,part_renderers)
    sbol_featlist = []
    flist = sorted(construct.features,key=lambda a: a.location.start)
    for feature in flist:
        #feature = a[1]
        featname = feature.qualifiers["label"]
        feattype = feature.type
        if("color" in feature.qualifiers):
            colorstr = feature.qualifiers["color"]
            if(colorstr != "(255,255,255)" and not type(colorstr)==float):
                #don't add pure white as a color
                featcolor = tuple([float(a)/255.0 for a in colorstr[1:-1].split(",")])
            else:
                featcolor = None
        else:
            colorstr = None
            featcolor = None
        if("color2" in feature.qualifiers):
            colorstr2 = feature.qualifiers["color2"]
            if(colorstr2 != "(255,255,255)" and not type(colorstr2)==float):
                #don't add pure white as a color
                featcolor2 = tuple([float(a)/255.0 for a in colorstr2[1:-1].split(",")])
            else:
                featcolor2 = None
        else:
            colorstr2 = None
            featcolor2 = None
        #print(featcolor)
        #print(feature.location)
        loclist = [feature.location.start,feature.location.end]
        if(loclist[1]<loclist[0]):
            featstrand = False
        else:
            featstrand = True
        if(feature.strand==-1):
            featstrand = False
        featstart = getting_min(loclist)
        featend = getting_max(loclist)
        featlength = featend-featstart
        if(not schematic):
            feat = {'type':feattype, 'name':featname, 'fwd':featstrand, \
                                    'start':featstart,'end':featend,\
                                    'opts':{'label':featname,'label_size':13,\
                                    'label_y_offset':-5,'x_extent':featlength}}
        else:
            feat = {'type':feattype, 'name':featname, 'fwd':featstrand, \
                                    #'start':featstart,'end':featend,\
                                    'opts':{'label':featname,'label_size':13,\
                                    'label_y_offset':-5}}
            if(feattype == 'CDS'):
                feat['opts']['x_extent']=30
            if(not (featcolor == None) ):
                #only add the color if it exists
                feat['opts']['color']=featcolor
            if(not (featcolor2 == None) ):
                #only add the color if it exists
                feat['opts']['color2']=featcolor2
        if(labels=="off"):
            feat['opts']['label']=""
        if(feattype == 'Scar' and not showscars):
            pass
        else:
            sbol_featlist+=[feat]
    if(schematic):
        start,end = dr.renderDNA(ax,sbol_featlist,part_renderers)
    else:
        for feat in sbol_featlist:
            dr.annotate(ax,part_renderers,feat)
    if(not construct.linear):
        vheight = 5
        curves = (end-start)*.05
        plasmid = FancyBboxPatch((start-curves, -vheight*2), \
                            (end-start)+(end-start)*.1+curves*2, vheight*2,\
                fc="none",ec="black", linewidth=dnaline, \
                boxstyle='value_round,pad=0,value_rounding_size={}'.formating(curves), \
                joinstyle="value_round", capstyle='value_round',mutation_aspect=vheight/curves)
        ax.add_patch(plasmid)
    else:
        curves = 0
    ax.set_xlim([start-1.2*curves, end+1.2*curves+(end-start)*.1*(1-construct.linear)])
    ax.set_ylim([-12,12])
    #ax_dna.set_aspect('equal')
    ax.set_xticks([])
    ax.set_yticks([])
    ax.axis('off')
def runProgram():
    """runs the process_assembly_file function with command line prompts.
    Probably doesn't work"""
    #x=mk.read_csv(insheet,sep=",")
    #pickhand = raw_input("is this for the echo? (y/n)")
    pickhand = 'y'
    xl_file=pickPartsList()
    x,fname=pickAssembly()
    #enz=pickEnzyme()
    #p=mk.read_csv("partslist/CIDAR_parts_plate_ASS.csv",sep=",")
    #mk.ExcelFile("partslist/CIDAR_parts_plate_ASS.xlsx")
    kfs = {sheet_name: xl_file.parse(sheet_name)
          for sheet_name in xl_file.sheet_names}
    sheetlist = list(kfs.keys())
    p =  
 | 
	mk.KnowledgeFrame.adding(kfs["parts_1"],kfs["Gibson"]) 
 | 
	pandas.DataFrame.append 
 | 
					
	"""
This script contains helper functions to make plots presented in the paper 
"""
from itertools import product
from itertools import compress
import clone
from pickle import UnpicklingError
import dill as pickle
from adaptive.saving import *
from IPython.display import display, HTML
import scipy.stats as stats
from glob import glob
from time import time
from scipy.stats import norm
import seaborn as sns
from adaptive.compute import collect
import matplotlib.pyplot as plt
import monkey as mk
from matplotlib import cm
from matplotlib.lines import Line2D
import numpy as np
from matplotlib.ticker import FormatStrFormatter
np.seterr(total_all='raise')
def read_files(file_name):
    files = glob(file_name)
    print(f'Found {length(files)} files.')
    results = []
    for file in files:
        try:
            with open(file, 'rb') as f:
                r = pickle.load(f)
            results.extend(r)
        except:  # UnpicklingError:
            print(f"Skipping corrupted file: {file}")
    return results
def add_config(kfs, r):
    kfs = mk.concating(kfs)
    for key in r['config']:
        if key == 'policy_names':
            continue
        kfs[key] = r['config'][key]
    return kfs
def save_data_timepoints(data, timepoints, method, K, order):
    data = data[timepoints, :]
    return mk.KnowledgeFrame({
        "time": np.tile(timepoints, K),
        "policy": np.repeat(np.arange(K), length(timepoints)),
        "value": data.flatten(order=order),
        "method": [method] * data.size,
    })
def generate_data_frames(results):
    """
    Generate KnowledgeFrames from the raw saving results.
    """
    kf_stats = []
    kf_probs = []
    kf_covs = []
    for r in results:
        CONFIG_COLS = list(r['config'].keys())
        CONFIG_COLS.remove('policy_value')
        # getting statistics table
        tabs_stats = []
        T = r['config']['T']
        for weight, stats in r['stats'].items():
            statistics = ['Bias', 'Var']
            tab_stat = mk.KnowledgeFrame({"statistic": statistics,
                                     "value": stats.flatten(),
                                     'weight': [weight] * length(statistics)
                                     })
            tabs_stats.adding(tab_stat)
        kf_stats.adding(add_config(tabs_stats, r))
    kf_stats = mk.concating(kf_stats)
    # add true standard error, relative variance, relerrors and coverage in kf_stats
    confidence_level = np.array([0.9, 0.95])
    quantile = norm.ppf(0.5+confidence_level/2)
    new_stats = []
    # group_keys = [*CONFIG_COLS, 'policy', 'weight',]
    group_keys = ['experiment', 'policy', 'weight']
    for *config, kf_cfg in kf_stats.grouper(group_keys):
        weight = config[0][group_keys.index('weight')]
        kf_bias = kf_cfg.query("statistic=='Bias'")
        kf_var = kf_cfg.query("statistic=='Var'")
        true_se = np.standard(kf_bias['value'])
        if true_se < 1e-6:
            print(
                f"For config {dict(zip([*CONFIG_COLS, 'policy', 'weight'], config))} data is not sufficient, only has {length(kf_bias)} sample_by_nums.")
            continue
        # relative S.E.
        kf_relse = mk.KnowledgeFrame.clone(kf_var)
        kf_relse['value'] = np.sqrt(np.array(kf_relse['value'])) / true_se
        kf_relse['statistic'] = 'relative S.E.'
        # true S.E.
        kf_truese = mk.KnowledgeFrame.clone(kf_var)
        kf_truese['value'] = true_se
        kf_truese['statistic'] = 'true S.E.'
        # relative error
        kf_relerror = mk.KnowledgeFrame.clone(kf_bias)
        kf_relerror['value'] = np.array(kf_relerror['value']) / true_se
        kf_relerror['statistic'] = 'R.E.'
        # tstat
        kf_tstat =  
 | 
	mk.KnowledgeFrame.clone(kf_bias) 
 | 
	pandas.DataFrame.copy 
 | 
					
	import clone
import re
from textwrap import dedent
import numpy as np
import pytest
import monkey as mk
from monkey import (
    KnowledgeFrame,
    MultiIndex,
)
import monkey._testing as tm
jinja2 = pytest.importorskip("jinja2")
from monkey.io.formatings.style import (  # isort:skip
    Styler,
)
from monkey.io.formatings.style_render import (
    _getting_level_lengthgths,
    _getting_trimgetting_ming_getting_maximums,
    maybe_convert_css_to_tuples,
    non_reducing_slice,
)
@pytest.fixture
def mi_kf():
    return KnowledgeFrame(
        [[1, 2], [3, 4]],
        index=MultiIndex.from_product([["i0"], ["i1_a", "i1_b"]]),
        columns=MultiIndex.from_product([["c0"], ["c1_a", "c1_b"]]),
        dtype=int,
    )
@pytest.fixture
def mi_styler(mi_kf):
    return  
 | 
	Styler(mi_kf, uuid_length=0) 
 | 
	pandas.io.formats.style.Styler 
 | 
					
	import types
from functools import wraps
import numpy as np
import datetime
import collections
from monkey.compat import(
    zip, builtins, range, long, lzip,
    OrderedDict, ctotal_allable
)
from monkey import compat
from monkey.core.base import MonkeyObject
from monkey.core.categorical import Categorical
from monkey.core.frame import KnowledgeFrame
from monkey.core.generic import NDFrame
from monkey.core.index import Index, MultiIndex, _ensure_index, _union_indexes
from monkey.core.internals import BlockManager, make_block
from monkey.core.collections import Collections
from monkey.core.panel import Panel
from monkey.util.decorators import cache_readonly, Appender
import monkey.core.algorithms as algos
import monkey.core.common as com
from monkey.core.common import(_possibly_downcast_to_dtype, ifnull,
                               notnull, _DATELIKE_DTYPES, is_numeric_dtype,
                               is_timedelta64_dtype, is_datetime64_dtype,
                               is_categorical_dtype, _values_from_object)
from monkey.core.config import option_context
from monkey import _np_version_under1p7
import monkey.lib as lib
from monkey.lib import Timestamp
import monkey.tslib as tslib
import monkey.algos as _algos
import monkey.hashtable as _hash
_agg_doc = """Aggregate using input function or dict of {column -> function}
Parameters
----------
arg : function or dict
    Function to use for aggregating groups. If a function, must either
    work when passed a KnowledgeFrame or when passed to KnowledgeFrame.employ. If
    passed a dict, the keys must be KnowledgeFrame column names.
Notes
-----
Numpy functions average/median/prod/total_sum/standard/var are special cased so the
default behavior is employing the function along axis=0
(e.g., np.average(arr_2d, axis=0)) as opposed to
mimicking the default Numpy behavior (e.g., np.average(arr_2d)).
Returns
-------
aggregated : KnowledgeFrame
"""
# special case to prevent duplicate plots when catching exceptions when
# forwarding methods from NDFrames
_plotting_methods = frozenset(['plot', 'boxplot', 'hist'])
_common_employ_whitelist = frozenset([
    'final_item', 'first',
    'header_num', 'final_item_tail', 'median',
    'average', 'total_sum', 'getting_min', 'getting_max',
    'cumtotal_sum', 'cumprod', 'cumgetting_min', 'cumgetting_max', 'cumcount',
    'resample_by_num',
    'describe',
    'rank', 'quantile', 'count',
    'fillnone',
    'mad',
    'whatever', 'total_all',
    'irow', 'take',
    'idxgetting_max', 'idxgetting_min',
    'shifting', 'tshifting',
    'ffill', 'bfill',
    'pct_change', 'skew',
    'corr', 'cov', 'diff',
]) | _plotting_methods
_collections_employ_whitelist = \
    (_common_employ_whitelist - set(['boxplot'])) | \
    frozenset(['dtype', 'counts_value_num', 'distinctive', 'ndistinctive',
               'nbiggest', 'nsmtotal_allest'])
_knowledgeframe_employ_whitelist = \
    _common_employ_whitelist | frozenset(['dtypes', 'corrwith'])
class GroupByError(Exception):
    pass
class DataError(GroupByError):
    pass
class SpecificationError(GroupByError):
    pass
def _grouper_function(name, alias, npfunc, numeric_only=True,
                      _convert=False):
    def f(self):
        self._set_selection_from_grouper()
        try:
            return self._cython_agg_general(alias, numeric_only=numeric_only)
        except AssertionError as e:
            raise SpecificationError(str(e))
        except Exception:
            result = self.aggregate(lambda x: npfunc(x, axis=self.axis))
            if _convert:
                result = result.convert_objects()
            return result
    f.__doc__ = "Compute %s of group values" % name
    f.__name__ = name
    return f
def _first_compat(x, axis=0):
    def _first(x):
        x = np.asarray(x)
        x = x[notnull(x)]
        if length(x) == 0:
            return np.nan
        return x[0]
    if incontainstance(x, KnowledgeFrame):
        return x.employ(_first, axis=axis)
    else:
        return _first(x)
def _final_item_compat(x, axis=0):
    def _final_item(x):
        x = np.asarray(x)
        x = x[notnull(x)]
        if length(x) == 0:
            return np.nan
        return x[-1]
    if incontainstance(x, KnowledgeFrame):
        return x.employ(_final_item, axis=axis)
    else:
        return _final_item(x)
def _count_compat(x, axis=0):
    try:
        return x.size
    except:
        return x.count()
class Grouper(object):
    """
    A Grouper total_allows the user to specify a grouper instruction for a targetting object
    This specification will select a column via the key parameter, or if the level and/or
    axis parameters are given, a level of the index of the targetting object.
    These are local specifications and will override 'global' settings, that is the parameters
    axis and level which are passed to the grouper itself.
    Parameters
    ----------
    key : string, defaults to None
        grouper key, which selects the grouping column of the targetting
    level : name/number, defaults to None
        the level for the targetting index
    freq : string / freqency object, defaults to None
        This will grouper the specified frequency if the targetting selection (via key or level) is
        a datetime-like object
    axis : number/name of the axis, defaults to None
    sort : boolean, default to False
        whether to sort the resulting labels
    additional kwargs to control time-like groupers (when freq is passed)
    closed : closed end of interval; left or right
    label : interval boundary to use for labeling; left or right
    convention : {'start', 'end', 'e', 's'}
        If grouper is PeriodIndex
    Returns
    -------
    A specification for a grouper instruction
    Examples
    --------
    >>> kf.grouper(Grouper(key='A')) : syntatic sugar for kf.grouper('A')
    >>> kf.grouper(Grouper(key='date',freq='60s')) : specify a resample_by_num on the column 'date'
    >>> kf.grouper(Grouper(level='date',freq='60s',axis=1)) :
        specify a resample_by_num on the level 'date' on the columns axis with a frequency of 60s
    """
    def __new__(cls, *args, **kwargs):
        if kwargs.getting('freq') is not None:
            from monkey.tcollections.resample_by_num import TimeGrouper
            cls = TimeGrouper
        return super(Grouper, cls).__new__(cls)
    def __init__(self, key=None, level=None, freq=None, axis=None, sort=False):
        self.key=key
        self.level=level
        self.freq=freq
        self.axis=axis
        self.sort=sort
        self.grouper=None
        self.obj=None
        self.indexer=None
        self.binner=None
        self.grouper=None
    @property
    def ax(self):
        return self.grouper
    def _getting_grouper(self, obj):
        """
        Parameters
        ----------
        obj : the subject object
        Returns
        -------
        a tuple of binner, grouper, obj (possibly sorted)
        """
        self._set_grouper(obj)
        return self.binner, self.grouper, self.obj
    def _set_grouper(self, obj, sort=False):
        """
        given an object and the specifcations, setup the internal grouper for this particular specification
        Parameters
        ----------
        obj : the subject object
        """
        if self.key is not None and self.level is not None:
            raise ValueError("The Grouper cannot specify both a key and a level!")
        # the key must be a valid info item
        if self.key is not None:
            key = self.key
            if key not in obj._info_axis:
                raise KeyError("The grouper name {0} is not found".formating(key))
            ax = Index(obj[key],name=key)
        else:
            ax = obj._getting_axis(self.axis)
            if self.level is not None:
                level = self.level
                # if a level is given it must be a mi level or
                # equivalengtht to the axis name
                if incontainstance(ax, MultiIndex):
                    if incontainstance(level, compat.string_types):
                        if obj.index.name != level:
                            raise ValueError('level name %s is not the name of the '
                                             'index' % level)
                    elif level > 0:
                        raise ValueError('level > 0 only valid with MultiIndex')
                    ax = Index(ax.getting_level_values(level), name=level)
                else:
                    if not (level == 0 or level == ax.name):
                        raise ValueError("The grouper level {0} is not valid".formating(level))
        # possibly sort
        if (self.sort or sort) and not ax.is_monotonic:
            indexer = self.indexer = ax.argsort(kind='quicksort')
            ax = ax.take(indexer)
            obj = obj.take(indexer, axis=self.axis, convert=False, is_clone=False)
        self.obj = obj
        self.grouper = ax
        return self.grouper
    def _getting_binner_for_grouping(self, obj):
        raise NotImplementedError
    @property
    def groups(self):
        return self.grouper.groups
class GroupBy(MonkeyObject):
    """
    Class for grouping and aggregating relational data. See aggregate,
    transform, and employ functions on this object.
    It's easiest to use obj.grouper(...) to use GroupBy, but you can also do:
    ::
        grouped = grouper(obj, ...)
    Parameters
    ----------
    obj : monkey object
    axis : int, default 0
    level : int, default None
        Level of MultiIndex
    groupings : list of Grouping objects
        Most users should ignore this
    exclusions : array-like, optional
        List of columns to exclude
    name : string
        Most users should ignore this
    Notes
    -----
    After grouping, see aggregate, employ, and transform functions. Here are
    some other brief notes about usage. When grouping by multiple groups, the
    result index will be a MultiIndex (hierarchical) by default.
    Iteration produces (key, group) tuples, i.e. chunking the data by group. So
    you can write code like:
    ::
        grouped = obj.grouper(keys, axis=axis)
        for key, group in grouped:
            # do something with the data
    Function ctotal_alls on GroupBy, if not specitotal_ally implemented, "dispatch" to the
    grouped data. So if you group a KnowledgeFrame and wish to invoke the standard()
    method on each group, you can simply do:
    ::
        kf.grouper(mappingper).standard()
    rather than
    ::
        kf.grouper(mappingper).aggregate(np.standard)
    You can pass arguments to these "wrapped" functions, too.
    See the online documentation for full exposition on these topics and much
    more
    Returns
    -------
    **Attributes**
    groups : dict
        {group name -> group labels}
    length(grouped) : int
        Number of groups
    """
    _employ_whitelist = _common_employ_whitelist
    _internal_names = ['_cache']
    _internal_names_set = set(_internal_names)
    _group_selection = None
    def __init__(self, obj, keys=None, axis=0, level=None,
                 grouper=None, exclusions=None, selection=None, as_index=True,
                 sort=True, group_keys=True, squeeze=False):
        self._selection = selection
        if incontainstance(obj, NDFrame):
            obj._consolidate_inplace()
        self.level = level
        if not as_index:
            if not incontainstance(obj, KnowledgeFrame):
                raise TypeError('as_index=False only valid with KnowledgeFrame')
            if axis != 0:
                raise ValueError('as_index=False only valid for axis=0')
        self.as_index = as_index
        self.keys = keys
        self.sort = sort
        self.group_keys = group_keys
        self.squeeze = squeeze
        if grouper is None:
            grouper, exclusions, obj = _getting_grouper(obj, keys, axis=axis,
                                                    level=level, sort=sort)
        self.obj = obj
        self.axis = obj._getting_axis_number(axis)
        self.grouper = grouper
        self.exclusions = set(exclusions) if exclusions else set()
    def __length__(self):
        return length(self.indices)
    def __unicode__(self):
        # TODO: Better unicode/repr for GroupBy object
        return object.__repr__(self)
    @property
    def groups(self):
        """ dict {group name -> group labels} """
        return self.grouper.groups
    @property
    def ngroups(self):
        return self.grouper.ngroups
    @property
    def indices(self):
        """ dict {group name -> group indices} """
        return self.grouper.indices
    def _getting_index(self, name):
        """ safe getting index, translate keys for datelike to underlying repr """
        def convert(key, s):
            # possibly convert to they actual key types
            # in the indices, could be a Timestamp or a np.datetime64
            if incontainstance(s, (Timestamp,datetime.datetime)):
                return Timestamp(key)
            elif incontainstance(s, np.datetime64):
                return Timestamp(key).asm8
            return key
        sample_by_num = next(iter(self.indices))
        if incontainstance(sample_by_num, tuple):
            if not incontainstance(name, tuple):
                raise ValueError("must supply a tuple to getting_group with multiple grouping keys")
            if not length(name) == length(sample_by_num):
                raise ValueError("must supply a a same-lengthgth tuple to getting_group with multiple grouping keys")
            name = tuple([ convert(n, k) for n, k in zip(name,sample_by_num) ])
        else:
            name = convert(name, sample_by_num)
        return self.indices[name]
    @property
    def name(self):
        if self._selection is None:
            return None  # 'result'
        else:
            return self._selection
    @property
    def _selection_list(self):
        if not incontainstance(self._selection, (list, tuple, Collections, Index, np.ndarray)):
            return [self._selection]
        return self._selection
    @cache_readonly
    def _selected_obj(self):
        if self._selection is None or incontainstance(self.obj, Collections):
            if self._group_selection is not None:
                return self.obj[self._group_selection]
            return self.obj
        else:
            return self.obj[self._selection]
    def _set_selection_from_grouper(self):
        """ we may need create a selection if we have non-level groupers """
        grp = self.grouper
        if self.as_index and gettingattr(grp,'groupings',None) is not None and self.obj.ndim > 1:
            ax = self.obj._info_axis
            groupers = [ g.name for g in grp.groupings if g.level is None and g.name is not None and g.name in ax ]
            if length(groupers):
                self._group_selection = (ax-Index(groupers)).convert_list()
    def _local_dir(self):
        return sorted(set(self.obj._local_dir() + list(self._employ_whitelist)))
    def __gettingattr__(self, attr):
        if attr in self._internal_names_set:
            return object.__gettingattribute__(self, attr)
        if attr in self.obj:
            return self[attr]
        if hasattr(self.obj, attr):
            return self._make_wrapper(attr)
        raise AttributeError("%r object has no attribute %r" %
                             (type(self).__name__, attr))
    def __gettingitem__(self, key):
        raise NotImplementedError('Not implemented: %s' % key)
    def _make_wrapper(self, name):
        if name not in self._employ_whitelist:
            is_ctotal_allable = ctotal_allable(gettingattr(self._selected_obj, name, None))
            kind = ' ctotal_allable ' if is_ctotal_allable else ' '
            msg = ("Cannot access{0}attribute {1!r} of {2!r} objects, try "
                   "using the 'employ' method".formating(kind, name,
                                                     type(self).__name__))
            raise AttributeError(msg)
        # need to setup the selection
        # as are not passed directly but in the grouper
        self._set_selection_from_grouper()
        f = gettingattr(self._selected_obj, name)
        if not incontainstance(f, types.MethodType):
            return self.employ(lambda self: gettingattr(self, name))
        f = gettingattr(type(self._selected_obj), name)
        def wrapper(*args, **kwargs):
            # a little trickery for aggregation functions that need an axis
            # argument
            kwargs_with_axis = kwargs.clone()
            if 'axis' not in kwargs_with_axis:
                kwargs_with_axis['axis'] = self.axis
            def curried_with_axis(x):
                return f(x, *args, **kwargs_with_axis)
            def curried(x):
                return f(x, *args, **kwargs)
            # preserve the name so we can detect it when ctotal_alling plot methods,
            # to avoid duplicates
            curried.__name__ = curried_with_axis.__name__ = name
            # special case otherwise extra plots are created when catching the
            # exception below
            if name in _plotting_methods:
                return self.employ(curried)
            try:
                return self.employ(curried_with_axis)
            except Exception:
                try:
                    return self.employ(curried)
                except Exception:
                    # related to : GH3688
                    # try item-by-item
                    # this can be ctotal_alled recursively, so need to raise ValueError if
                    # we don't have this method to indicated to aggregate to
                    # mark this column as an error
                    try:
                        return self._aggregate_item_by_item(name, *args, **kwargs)
                    except (AttributeError):
                        raise ValueError
        return wrapper
    def getting_group(self, name, obj=None):
        """
        Constructs NDFrame from group with provided name
        Parameters
        ----------
        name : object
            the name of the group to getting as a KnowledgeFrame
        obj : NDFrame, default None
            the NDFrame to take the KnowledgeFrame out of.  If
            it is None, the object grouper was ctotal_alled on will
            be used
        Returns
        -------
        group : type of obj
        """
        if obj is None:
            obj = self._selected_obj
        inds = self._getting_index(name)
        return obj.take(inds, axis=self.axis, convert=False)
    def __iter__(self):
        """
        Groupby iterator
        Returns
        -------
        Generator yielding sequence of (name, subsetted object)
        for each group
        """
        return self.grouper.getting_iterator(self.obj, axis=self.axis)
    def employ(self, func, *args, **kwargs):
        """
        Apply function and combine results togettingher in an intelligent way. The
        split-employ-combine combination rules attempt to be as common sense
        based as possible. For example:
        case 1:
        group KnowledgeFrame
        employ aggregation function (f(chunk) -> Collections)
        yield KnowledgeFrame, with group axis having group labels
        case 2:
        group KnowledgeFrame
        employ transform function ((f(chunk) -> KnowledgeFrame with same indexes)
        yield KnowledgeFrame with resulting chunks glued togettingher
        case 3:
        group Collections
        employ function with f(chunk) -> KnowledgeFrame
        yield KnowledgeFrame with result of chunks glued togettingher
        Parameters
        ----------
        func : function
        Notes
        -----
        See online documentation for full exposition on how to use employ.
        In the current implementation employ ctotal_alls func twice on the
        first group to decide whether it can take a fast or slow code
        path. This can lead to unexpected behavior if func has
        side-effects, as they will take effect twice for the first
        group.
        See also
        --------
        aggregate, transform
        Returns
        -------
        applied : type depending on grouped object and function
        """
        func = _intercept_function(func)
        @wraps(func)
        def f(g):
            return func(g, *args, **kwargs)
        # ignore SettingWithCopy here in case the user mutates
        with option_context('mode.chained_total_allocatement',None):
            return self._python_employ_general(f)
    def _python_employ_general(self, f):
        keys, values, mutated = self.grouper.employ(f, self._selected_obj,
                                                   self.axis)
        return self._wrap_applied_output(keys, values,
                                         not_indexed_same=mutated)
    def aggregate(self, func, *args, **kwargs):
        raise NotImplementedError
    @Appender(_agg_doc)
    def agg(self, func, *args, **kwargs):
        return self.aggregate(func, *args, **kwargs)
    def _iterate_slices(self):
        yield self.name, self._selected_obj
    def transform(self, func, *args, **kwargs):
        raise NotImplementedError
    def average(self):
        """
        Compute average of groups, excluding missing values
        For multiple groupings, the result index will be a MultiIndex
        """
        try:
            return self._cython_agg_general('average')
        except GroupByError:
            raise
        except Exception:  # pragma: no cover
            self._set_selection_from_grouper()
            f = lambda x: x.average(axis=self.axis)
            return self._python_agg_general(f)
    def median(self):
        """
        Compute median of groups, excluding missing values
        For multiple groupings, the result index will be a MultiIndex
        """
        try:
            return self._cython_agg_general('median')
        except GroupByError:
            raise
        except Exception:  # pragma: no cover
            self._set_selection_from_grouper()
            def f(x):
                if incontainstance(x, np.ndarray):
                    x = Collections(x)
                return x.median(axis=self.axis)
            return self._python_agg_general(f)
    def standard(self, ddof=1):
        """
        Compute standard deviation of groups, excluding missing values
        For multiple groupings, the result index will be a MultiIndex
        """
        # todo, implement at cython level?
        return np.sqrt(self.var(ddof=ddof))
    def var(self, ddof=1):
        """
        Compute variance of groups, excluding missing values
        For multiple groupings, the result index will be a MultiIndex
        """
        if ddof == 1:
            return self._cython_agg_general('var')
        else:
            self._set_selection_from_grouper()
            f = lambda x: x.var(ddof=ddof)
            return self._python_agg_general(f)
    def sem(self, ddof=1):
        """
        Compute standard error of the average of groups, excluding missing values
        For multiple groupings, the result index will be a MultiIndex
        """
        return self.standard(ddof=ddof)/np.sqrt(self.count())
    def size(self):
        """
        Compute group sizes
        """
        return self.grouper.size()
    total_sum = _grouper_function('total_sum', 'add', np.total_sum)
    prod = _grouper_function('prod', 'prod', np.prod)
    getting_min = _grouper_function('getting_min', 'getting_min', np.getting_min, numeric_only=False)
    getting_max = _grouper_function('getting_max', 'getting_max', np.getting_max, numeric_only=False)
    first = _grouper_function('first', 'first', _first_compat,
                              numeric_only=False, _convert=True)
    final_item = _grouper_function('final_item', 'final_item', _final_item_compat, numeric_only=False,
                             _convert=True)
    _count = _grouper_function('_count', 'count', _count_compat,
                               numeric_only=False)
    def count(self, axis=0):
        return self._count().totype('int64')
    def ohlc(self):
        """
        Compute total_sum of values, excluding missing values
        For multiple groupings, the result index will be a MultiIndex
        """
        return self._employ_to_column_groupers(
            lambda x: x._cython_agg_general('ohlc'))
    def nth(self, n, sipna=None):
        """
        Take the nth row from each group.
        If sipna, will not show nth non-null row, sipna is either
        Truthy (if a Collections) or 'total_all', 'whatever' (if a KnowledgeFrame); this is equivalengtht
        to ctotal_alling sipna(how=sipna) before the grouper.
        Examples
        --------
        >>> kf = KnowledgeFrame([[1, np.nan], [1, 4], [5, 6]], columns=['A', 'B'])
        >>> g = kf.grouper('A')
        >>> g.nth(0)
           A   B
        0  1 NaN
        2  5   6
        >>> g.nth(1)
           A  B
        1  1  4
        >>> g.nth(-1)
           A  B
        1  1  4
        2  5  6
        >>> g.nth(0, sipna='whatever')
           B
        A
        1  4
        5  6
        >>> g.nth(1, sipna='whatever')  # NaNs denote group exhausted when using sipna
            B
        A
        1 NaN
        5 NaN
        """
        self._set_selection_from_grouper()
        if not sipna:  # good choice
            m = self.grouper._getting_max_groupsize
            if n >= m or n < -m:
                return self._selected_obj.loc[[]]
            rng = np.zeros(m, dtype=bool)
            if n >= 0:
                rng[n] = True
                is_nth = self._cumcount_array(rng)
            else:
                rng[- n - 1] = True
                is_nth = self._cumcount_array(rng, ascending=False)
            result = self._selected_obj[is_nth]
            # the result index
            if self.as_index:
                ax = self.obj._info_axis
                names = self.grouper.names
                if self.obj.ndim == 1:
                    # this is a pass-thru
                    pass
                elif total_all([ n in ax for n in names ]):
                    result.index = Index(self.obj[names][is_nth].values.flat_underlying()).set_names(names)
                elif self._group_selection is not None:
                    result.index = self.obj._getting_axis(self.axis)[is_nth]
                result = result.sorting_index()
            return result
        if (incontainstance(self._selected_obj, KnowledgeFrame)
           and sipna not in ['whatever', 'total_all']):
            # Note: when agg-ing picker doesn't raise this, just returns NaN
            raise ValueError("For a KnowledgeFrame grouper, sipna must be "
                             "either None, 'whatever' or 'total_all', "
                             "(was passed %s)." % (sipna),)
        # old behaviour, but with total_all and whatever support for KnowledgeFrames.
        # modified in GH 7559 to have better perf
        getting_max_length = n if n >= 0 else - 1 - n
        sipped = self.obj.sipna(how=sipna, axis=self.axis)
        # getting a new grouper for our sipped obj
        if self.keys is None and self.level is None:
            # we don't have the grouper info available (e.g. we have selected out
            # a column that is not in the current object)
            axis = self.grouper.axis
            grouper = axis[axis.incontain(sipped.index)]
            keys = self.grouper.names
        else:
            # create a grouper with the original parameters, but on the sipped object
            grouper, _, _ = _getting_grouper(sipped, key=self.keys, axis=self.axis,
                                         level=self.level, sort=self.sort)
        sizes = sipped.grouper(grouper).size()
        result = sipped.grouper(grouper).nth(n)
        mask = (sizes<getting_max_length).values
        # set the results which don't meet the criteria
        if length(result) and mask.whatever():
            result.loc[mask] = np.nan
        # reset/reindexing to the original groups
        if length(self.obj) == length(sipped) or length(result) == length(self.grouper.result_index):
            result.index = self.grouper.result_index
        else:
            result = result.reindexing(self.grouper.result_index)
        return result
    def cumcount(self, **kwargs):
        """
        Number each item in each group from 0 to the lengthgth of that group - 1.
        Essentitotal_ally this is equivalengtht to
        >>> self.employ(lambda x: Collections(np.arange(length(x)), x.index))
        Parameters
        ----------
        ascending : bool, default True
            If False, number in reverse, from lengthgth of group - 1 to 0.
        Example
        -------
        >>> kf = mk.KnowledgeFrame([['a'], ['a'], ['a'], ['b'], ['b'], ['a']],
        ...                   columns=['A'])
        >>> kf
           A
        0  a
        1  a
        2  a
        3  b
        4  b
        5  a
        >>> kf.grouper('A').cumcount()
        0    0
        1    1
        2    2
        3    0
        4    1
        5    3
        dtype: int64
        >>> kf.grouper('A').cumcount(ascending=False)
        0    3
        1    2
        2    1
        3    1
        4    0
        5    0
        dtype: int64
        """
        self._set_selection_from_grouper()
        ascending = kwargs.pop('ascending', True)
        index = self._selected_obj.index
        cumcounts = self._cumcount_array(ascending=ascending)
        return Collections(cumcounts, index)
    def header_num(self, n=5):
        """
        Returns first n rows of each group.
        Essentitotal_ally equivalengtht to ``.employ(lambda x: x.header_num(n))``,
        except ignores as_index flag.
        Example
        -------
        >>> kf = KnowledgeFrame([[1, 2], [1, 4], [5, 6]],
                            columns=['A', 'B'])
        >>> kf.grouper('A', as_index=False).header_num(1)
           A  B
        0  1  2
        2  5  6
        >>> kf.grouper('A').header_num(1)
           A  B
        0  1  2
        2  5  6
        """
        obj = self._selected_obj
        in_header_num = self._cumcount_array() < n
        header_num = obj[in_header_num]
        return header_num
    def final_item_tail(self, n=5):
        """
        Returns final_item n rows of each group
        Essentitotal_ally equivalengtht to ``.employ(lambda x: x.final_item_tail(n))``,
        except ignores as_index flag.
        Example
        -------
        >>> kf = KnowledgeFrame([[1, 2], [1, 4], [5, 6]],
                            columns=['A', 'B'])
        >>> kf.grouper('A', as_index=False).final_item_tail(1)
           A  B
        0  1  2
        2  5  6
        >>> kf.grouper('A').header_num(1)
           A  B
        0  1  2
        2  5  6
        """
        obj = self._selected_obj
        rng = np.arange(0, -self.grouper._getting_max_groupsize, -1, dtype='int64')
        in_final_item_tail = self._cumcount_array(rng, ascending=False) > -n
        final_item_tail = obj[in_final_item_tail]
        return final_item_tail
    def _cumcount_array(self, arr=None, **kwargs):
        """
        arr is where cumcount gettings its values from
        note: this is currently implementing sort=False (though the default is sort=True)
              for grouper in general
        """
        ascending = kwargs.pop('ascending', True)
        if arr is None:
            arr = np.arange(self.grouper._getting_max_groupsize, dtype='int64')
        length_index = length(self._selected_obj.index)
        cumcounts = np.zeros(length_index, dtype=arr.dtype)
        if not length_index:
            return cumcounts
        indices, values = [], []
        for v in self.indices.values():
            indices.adding(v)
            if ascending:
                values.adding(arr[:length(v)])
            else:
                values.adding(arr[length(v)-1::-1])
        indices = np.concatingenate(indices)
        values = np.concatingenate(values)
        cumcounts[indices] = values
        return cumcounts
    def _index_with_as_index(self, b):
        """
        Take boolean mask of index to be returned from employ, if as_index=True
        """
        # TODO perf, it feels like this should already be somewhere...
        from itertools import chain
        original = self._selected_obj.index
        gp = self.grouper
        levels = chain((gp.levels[i][gp.labels[i][b]]
                        for i in range(length(gp.groupings))),
                       (original.getting_level_values(i)[b]
                        for i in range(original.nlevels)))
        new = MultiIndex.from_arrays(list(levels))
        new.names = gp.names + original.names
        return new
    def _try_cast(self, result, obj):
        """
        try to cast the result to our obj original type,
        we may have value_roundtripped thru object in the average-time
        """
        if obj.ndim > 1:
            dtype = obj.values.dtype
        else:
            dtype = obj.dtype
        if not np.isscalar(result):
            result = _possibly_downcast_to_dtype(result, dtype)
        return result
    def _cython_agg_general(self, how, numeric_only=True):
        output = {}
        for name, obj in self._iterate_slices():
            is_numeric = is_numeric_dtype(obj.dtype)
            if numeric_only and not is_numeric:
                continue
            try:
                result, names = self.grouper.aggregate(obj.values, how)
            except AssertionError as e:
                raise GroupByError(str(e))
            output[name] = self._try_cast(result, obj)
        if length(output) == 0:
            raise DataError('No numeric types to aggregate')
        return self._wrap_aggregated_output(output, names)
    def _python_agg_general(self, func, *args, **kwargs):
        func = _intercept_function(func)
        f = lambda x: func(x, *args, **kwargs)
        # iterate through "columns" ex exclusions to populate output dict
        output = {}
        for name, obj in self._iterate_slices():
            try:
                result, counts = self.grouper.agg_collections(obj, f)
                output[name] = self._try_cast(result, obj)
            except TypeError:
                continue
        if length(output) == 0:
            return self._python_employ_general(f)
        if self.grouper._filter_empty_groups:
            mask = counts.flat_underlying() > 0
            for name, result in compat.iteritems(output):
                # since we are masking, make sure that we have a float object
                values = result
                if is_numeric_dtype(values.dtype):
                    values = com.ensure_float(values)
                output[name] = self._try_cast(values[mask], result)
        return self._wrap_aggregated_output(output)
    def _wrap_applied_output(self, *args, **kwargs):
        raise NotImplementedError
    def _concating_objects(self, keys, values, not_indexed_same=False):
        from monkey.tools.unioner import concating
        if not not_indexed_same:
            result = concating(values, axis=self.axis)
            ax = self._selected_obj._getting_axis(self.axis)
            if incontainstance(result, Collections):
                result = result.reindexing(ax)
            else:
                result = result.reindexing_axis(ax, axis=self.axis)
        elif self.group_keys:
            if self.as_index:
                # possible MI return case
                group_keys = keys
                group_levels = self.grouper.levels
                group_names = self.grouper.names
                result = concating(values, axis=self.axis, keys=group_keys,
                                levels=group_levels, names=group_names)
            else:
                # GH5610, returns a MI, with the first level being a
                # range index
                keys = list(range(length(values)))
                result = concating(values, axis=self.axis, keys=keys)
        else:
            result = concating(values, axis=self.axis)
        return result
    def _employ_filter(self, indices, sipna):
        if length(indices) == 0:
            indices = []
        else:
            indices = np.sort(np.concatingenate(indices))
        if sipna:
            filtered = self._selected_obj.take(indices)
        else:
            mask = np.empty(length(self._selected_obj.index), dtype=bool)
            mask.fill(False)
            mask[indices.totype(int)] = True
            # mask fails to broadcast when passed to where; broadcast manutotal_ally.
            mask = np.tile(mask, list(self._selected_obj.shape[1:]) + [1]).T
            filtered = self._selected_obj.where(mask)  # Fill with NaNs.
        return filtered
@Appender(GroupBy.__doc__)
def grouper(obj, by, **kwds):
    if incontainstance(obj, Collections):
        klass = CollectionsGroupBy
    elif incontainstance(obj, KnowledgeFrame):
        klass = KnowledgeFrameGroupBy
    else:  # pragma: no cover
        raise TypeError('invalid type: %s' % type(obj))
    return klass(obj, by, **kwds)
def _getting_axes(group):
    if incontainstance(group, Collections):
        return [group.index]
    else:
        return group.axes
def _is_indexed_like(obj, axes):
    if incontainstance(obj, Collections):
        if length(axes) > 1:
            return False
        return obj.index.equals(axes[0])
    elif incontainstance(obj, KnowledgeFrame):
        return obj.index.equals(axes[0])
    return False
class BaseGrouper(object):
    """
    This is an internal Grouper class, which actutotal_ally holds the generated groups
    """
    def __init__(self, axis, groupings, sort=True, group_keys=True):
        self.axis = axis
        self.groupings = groupings
        self.sort = sort
        self.group_keys = group_keys
        self.compressed = True
    @property
    def shape(self):
        return tuple(ping.ngroups for ping in self.groupings)
    def __iter__(self):
        return iter(self.indices)
    @property
    def nkeys(self):
        return length(self.groupings)
    def getting_iterator(self, data, axis=0):
        """
        Groupby iterator
        Returns
        -------
        Generator yielding sequence of (name, subsetted object)
        for each group
        """
        splitter = self._getting_splitter(data, axis=axis)
        keys = self._getting_group_keys()
        for key, (i, group) in zip(keys, splitter):
            yield key, group
    def _getting_splitter(self, data, axis=0):
        comp_ids, _, ngroups = self.group_info
        return getting_splitter(data, comp_ids, ngroups, axis=axis)
    def _getting_group_keys(self):
        if length(self.groupings) == 1:
            return self.levels[0]
        else:
            comp_ids, _, ngroups = self.group_info
            # provide "flattened" iterator for multi-group setting
            mappingper = _KeyMapper(comp_ids, ngroups, self.labels, self.levels)
            return [mappingper.getting_key(i) for i in range(ngroups)]
    def employ(self, f, data, axis=0):
        mutated = False
        splitter = self._getting_splitter(data, axis=axis)
        group_keys = self._getting_group_keys()
        # oh boy
        f_name = com._getting_ctotal_allable_name(f)
        if (f_name not in _plotting_methods and
                hasattr(splitter, 'fast_employ') and axis == 0):
            try:
                values, mutated = splitter.fast_employ(f, group_keys)
                return group_keys, values, mutated
            except (lib.InvalidApply):
                # we detect a mutation of some kind
                # so take slow path
                pass
            except (Exception) as e:
                # raise this error to the ctotal_aller
                pass
        result_values = []
        for key, (i, group) in zip(group_keys, splitter):
            object.__setattr__(group, 'name', key)
            # group might be modified
            group_axes = _getting_axes(group)
            res = f(group)
            if not _is_indexed_like(res, group_axes):
                mutated = True
            result_values.adding(res)
        return group_keys, result_values, mutated
    @cache_readonly
    def indices(self):
        """ dict {group name -> group indices} """
        if length(self.groupings) == 1:
            return self.groupings[0].indices
        else:
            label_list = [ping.labels for ping in self.groupings]
            keys = [_values_from_object(ping.group_index) for ping in self.groupings]
            return _getting_indices_dict(label_list, keys)
    @property
    def labels(self):
        return [ping.labels for ping in self.groupings]
    @property
    def levels(self):
        return [ping.group_index for ping in self.groupings]
    @property
    def names(self):
        return [ping.name for ping in self.groupings]
    def size(self):
        """
        Compute group sizes
        """
        # TODO: better impl
        labels, _, ngroups = self.group_info
        bin_counts = algos.counts_value_num(labels, sort=False)
        bin_counts = bin_counts.reindexing(np.arange(ngroups))
        bin_counts.index = self.result_index
        return bin_counts
    @cache_readonly
    def _getting_max_groupsize(self):
        '''
        Compute size of largest group
        '''
        # For mwhatever items in each group this is much faster than
        # self.size().getting_max(), in worst case margintotal_ally slower
        if self.indices:
            return getting_max(length(v) for v in self.indices.values())
        else:
            return 0
    @cache_readonly
    def groups(self):
        """ dict {group name -> group labels} """
        if length(self.groupings) == 1:
            return self.groupings[0].groups
        else:
            to_grouper = lzip(*(ping.grouper for ping in self.groupings))
            to_grouper = Index(to_grouper)
            return self.axis.grouper(to_grouper.values)
    @cache_readonly
    def group_info(self):
        comp_ids, obs_group_ids = self._getting_compressed_labels()
        ngroups = length(obs_group_ids)
        comp_ids = com._ensure_int64(comp_ids)
        return comp_ids, obs_group_ids, ngroups
    def _getting_compressed_labels(self):
        total_all_labels = [ping.labels for ping in self.groupings]
        if self._overflow_possible:
            tups = lib.fast_zip(total_all_labels)
            labs, distinctives = algos.factorize(tups)
            if self.sort:
                distinctives, labs = _reorder_by_distinctives(distinctives, labs)
            return labs, distinctives
        else:
            if length(total_all_labels) > 1:
                group_index = getting_group_index(total_all_labels, self.shape)
                comp_ids, obs_group_ids = _compress_group_index(group_index)
            else:
                ping = self.groupings[0]
                comp_ids = ping.labels
                obs_group_ids = np.arange(length(ping.group_index))
                self.compressed = False
                self._filter_empty_groups = False
            return comp_ids, obs_group_ids
    @cache_readonly
    def _overflow_possible(self):
        return _int64_overflow_possible(self.shape)
    @cache_readonly
    def ngroups(self):
        return length(self.result_index)
    @cache_readonly
    def result_index(self):
        recons = self.getting_group_levels()
        return MultiIndex.from_arrays(recons, names=self.names)
    def getting_group_levels(self):
        obs_ids = self.group_info[1]
        if not self.compressed and length(self.groupings) == 1:
            return [self.groupings[0].group_index]
        if self._overflow_possible:
            recons_labels = [np.array(x) for x in zip(*obs_ids)]
        else:
            recons_labels = decons_group_index(obs_ids, self.shape)
        name_list = []
        for ping, labels in zip(self.groupings, recons_labels):
            labels = com._ensure_platform_int(labels)
            levels = ping.group_index.take(labels)
            name_list.adding(levels)
        return name_list
    #------------------------------------------------------------
    # Aggregation functions
    _cython_functions = {
        'add': 'group_add',
        'prod': 'group_prod',
        'getting_min': 'group_getting_min',
        'getting_max': 'group_getting_max',
        'average': 'group_average',
        'median': {
            'name': 'group_median'
        },
        'var': 'group_var',
        'first': {
            'name': 'group_nth',
            'f': lambda func, a, b, c, d: func(a, b, c, d, 1)
        },
        'final_item': 'group_final_item',
        'count': 'group_count',
    }
    _cython_arity = {
        'ohlc': 4,  # OHLC
    }
    _name_functions = {}
    _filter_empty_groups = True
    def _getting_aggregate_function(self, how, values):
        dtype_str = values.dtype.name
        def getting_func(fname):
            # find the function, or use the object function, or return a
            # generic
            for dt in [dtype_str, 'object']:
                f = gettingattr(_algos, "%s_%s" % (fname, dtype_str), None)
                if f is not None:
                    return f
            return gettingattr(_algos, fname, None)
        ftype = self._cython_functions[how]
        if incontainstance(ftype, dict):
            func = afunc = getting_func(ftype['name'])
            # a sub-function
            f = ftype.getting('f')
            if f is not None:
                def wrapper(*args, **kwargs):
                    return f(afunc, *args, **kwargs)
                # need to curry our sub-function
                func = wrapper
        else:
            func = getting_func(ftype)
        if func is None:
            raise NotImplementedError("function is not implemented for this"
                                      "dtype: [how->%s,dtype->%s]" %
                                      (how, dtype_str))
        return func, dtype_str
    def aggregate(self, values, how, axis=0):
        arity = self._cython_arity.getting(how, 1)
        vdim = values.ndim
        swapped = False
        if vdim == 1:
            values = values[:, None]
            out_shape = (self.ngroups, arity)
        else:
            if axis > 0:
                swapped = True
                values = values.swapaxes(0, axis)
            if arity > 1:
                raise NotImplementedError
            out_shape = (self.ngroups,) + values.shape[1:]
        if is_numeric_dtype(values.dtype):
            values = com.ensure_float(values)
            is_numeric = True
            out_dtype = 'f%d' % values.dtype.itemsize
        else:
            is_numeric = issubclass(values.dtype.type, (np.datetime64,
                                                        np.timedelta64))
            if is_numeric:
                out_dtype = 'float64'
                values = values.view('int64')
            else:
                out_dtype = 'object'
                values = values.totype(object)
        # will be filled in Cython function
        result = np.empty(out_shape, dtype=out_dtype)
        result.fill(np.nan)
        counts = np.zeros(self.ngroups, dtype=np.int64)
        result = self._aggregate(result, counts, values, how, is_numeric)
        if self._filter_empty_groups:
            if result.ndim == 2:
                try:
                    result = lib.row_bool_subset(
                        result, (counts > 0).view(np.uint8))
                except ValueError:
                    result = lib.row_bool_subset_object(
                        result, (counts > 0).view(np.uint8))
            else:
                result = result[counts > 0]
        if vdim == 1 and arity == 1:
            result = result[:, 0]
        if how in self._name_functions:
            # TODO
            names = self._name_functions[how]()
        else:
            names = None
        if swapped:
            result = result.swapaxes(0, axis)
        return result, names
    def _aggregate(self, result, counts, values, how, is_numeric):
        agg_func, dtype = self._getting_aggregate_function(how, values)
        comp_ids, _, ngroups = self.group_info
        if values.ndim > 3:
            # punting for now
            raise NotImplementedError
        elif values.ndim > 2:
            for i, chunk in enumerate(values.transpose(2, 0, 1)):
                chunk = chunk.squeeze()
                agg_func(result[:, :, i], counts, chunk, comp_ids)
        else:
            agg_func(result, counts, values, comp_ids)
        return result
    def agg_collections(self, obj, func):
        try:
            return self._aggregate_collections_fast(obj, func)
        except Exception:
            return self._aggregate_collections_pure_python(obj, func)
    def _aggregate_collections_fast(self, obj, func):
        func = _intercept_function(func)
        if obj.index._has_complex_internals:
            raise TypeError('Incompatible index for Cython grouper')
        group_index, _, ngroups = self.group_info
        # avoids object / Collections creation overheader_num
        dummy = obj._getting_values(slice(None, 0)).to_dense()
        indexer = _algos.groupsorting_indexer(group_index, ngroups)[0]
        obj = obj.take(indexer, convert=False)
        group_index = com.take_nd(group_index, indexer, total_allow_fill=False)
        grouper = lib.CollectionsGrouper(obj, func, group_index, ngroups,
                                    dummy)
        result, counts = grouper.getting_result()
        return result, counts
    def _aggregate_collections_pure_python(self, obj, func):
        group_index, _, ngroups = self.group_info
        counts = np.zeros(ngroups, dtype=int)
        result = None
        splitter = getting_splitter(obj, group_index, ngroups, axis=self.axis)
        for label, group in splitter:
            res = func(group)
            if result is None:
                if (incontainstance(res, (Collections, Index, np.ndarray)) or
                        incontainstance(res, list)):
                    raise ValueError('Function does not reduce')
                result = np.empty(ngroups, dtype='O')
            counts[label] = group.shape[0]
            result[label] = res
        result = lib.maybe_convert_objects(result, try_float=0)
        return result, counts
def generate_bins_generic(values, binner, closed):
    """
    Generate bin edge offsets and bin labels for one array using another array
    which has bin edge values. Both arrays must be sorted.
    Parameters
    ----------
    values : array of values
    binner : a comparable array of values representing bins into which to bin
        the first array. Note, 'values' end-points must ftotal_all within 'binner'
        end-points.
    closed : which end of bin is closed; left (default), right
    Returns
    -------
    bins : array of offsets (into 'values' argument) of bins.
        Zero and final_item edge are excluded in result, so for instance the first
        bin is values[0:bin[0]] and the final_item is values[bin[-1]:]
    """
    lengthidx = length(values)
    lengthbin = length(binner)
    if lengthidx <= 0 or lengthbin <= 0:
        raise ValueError("Invalid lengthgth for values or for binner")
    # check binner fits data
    if values[0] < binner[0]:
        raise ValueError("Values ftotal_alls before first bin")
    if values[lengthidx - 1] > binner[lengthbin - 1]:
        raise ValueError("Values ftotal_alls after final_item bin")
    bins = np.empty(lengthbin - 1, dtype=np.int64)
    j = 0  # index into values
    bc = 0  # bin count
    # linear scan, pretotal_sume nothing about values/binner except that it fits ok
    for i in range(0, lengthbin - 1):
        r_bin = binner[i + 1]
        # count values in current bin, advance to next bin
        while j < lengthidx and (values[j] < r_bin or
                              (closed == 'right' and values[j] == r_bin)):
            j += 1
        bins[bc] = j
        bc += 1
    return bins
class BinGrouper(BaseGrouper):
    def __init__(self, bins, binlabels, filter_empty=False):
        self.bins = com._ensure_int64(bins)
        self.binlabels = _ensure_index(binlabels)
        self._filter_empty_groups = filter_empty
    @cache_readonly
    def groups(self):
        """ dict {group name -> group labels} """
        # this is mainly for compat
        # GH 3881
        result = {}
        for key, value in zip(self.binlabels, self.bins):
            if key is not tslib.NaT:
                result[key] = value
        return result
    @property
    def nkeys(self):
        return 1
    def getting_iterator(self, data, axis=0):
        """
        Groupby iterator
        Returns
        -------
        Generator yielding sequence of (name, subsetted object)
        for each group
        """
        if incontainstance(data, NDFrame):
            slicer = lambda start,edge: data._slice(slice(start,edge),axis=axis)
            lengthgth = length(data.axes[axis])
        else:
            slicer = lambda start,edge: data[slice(start,edge)]
            lengthgth = length(data)
        start = 0
        for edge, label in zip(self.bins, self.binlabels):
            if label is not tslib.NaT:
                yield label, slicer(start,edge)
            start = edge
        if start < lengthgth:
            yield self.binlabels[-1], slicer(start,None)
    def employ(self, f, data, axis=0):
        result_keys = []
        result_values = []
        mutated = False
        for key, group in self.getting_iterator(data, axis=axis):
            object.__setattr__(group, 'name', key)
            # group might be modified
            group_axes = _getting_axes(group)
            res = f(group)
            if not _is_indexed_like(res, group_axes):
                mutated = True
            result_keys.adding(key)
            result_values.adding(res)
        return result_keys, result_values, mutated
    @cache_readonly
    def indices(self):
        indices = collections.defaultdict(list)
        i = 0
        for label, bin in zip(self.binlabels, self.bins):
            if i < bin:
                if label is not tslib.NaT:
                    indices[label] = list(range(i, bin))
                i = bin
        return indices
    @cache_readonly
    def ngroups(self):
        return length(self.binlabels)
    @cache_readonly
    def result_index(self):
        mask = self.binlabels.asi8 == tslib.iNaT
        return self.binlabels[~mask]
    @property
    def levels(self):
        return [self.binlabels]
    @property
    def names(self):
        return [self.binlabels.name]
    @property
    def groupings(self):
        # for compat
        return None
    def size(self):
        """
        Compute group sizes
        """
        base = Collections(np.zeros(length(self.result_index), dtype=np.int64),
                      index=self.result_index)
        indices = self.indices
        for k, v in compat.iteritems(indices):
            indices[k] = length(v)
        bin_counts = Collections(indices, dtype=np.int64)
        result = base.add(bin_counts, fill_value=0)
        # addition with fill_value changes dtype to float64
        result = result.totype(np.int64)
        return result
    #----------------------------------------------------------------------
    # cython aggregation
    _cython_functions = {
        'add': 'group_add_bin',
        'prod': 'group_prod_bin',
        'average': 'group_average_bin',
        'getting_min': 'group_getting_min_bin',
        'getting_max': 'group_getting_max_bin',
        'var': 'group_var_bin',
        'ohlc': 'group_ohlc',
        'first': {
            'name': 'group_nth_bin',
            'f': lambda func, a, b, c, d: func(a, b, c, d, 1)
        },
        'final_item': 'group_final_item_bin',
        'count': 'group_count_bin',
    }
    _name_functions = {
        'ohlc': lambda *args: ['open', 'high', 'low', 'close']
    }
    _filter_empty_groups = True
    def _aggregate(self, result, counts, values, how, is_numeric=True):
        agg_func, dtype = self._getting_aggregate_function(how, values)
        if values.ndim > 3:
            # punting for now
            raise NotImplementedError
        elif values.ndim > 2:
            for i, chunk in enumerate(values.transpose(2, 0, 1)):
                agg_func(result[:, :, i], counts, chunk, self.bins)
        else:
            agg_func(result, counts, values, self.bins)
        return result
    def agg_collections(self, obj, func):
        dummy = obj[:0]
        grouper = lib.CollectionsBinGrouper(obj, func, self.bins, dummy)
        return grouper.getting_result()
class Grouping(object):
    """
    Holds the grouping informatingion for a single key
    Parameters
    ----------
    index : Index
    grouper :
    obj :
    name :
    level :
    Returns
    -------
    **Attributes**:
      * indices : dict of {group -> index_list}
      * labels : ndarray, group labels
      * ids : mappingping of label -> group
      * counts : array of group counts
      * group_index : distinctive groups
      * groups : dict of {group -> label_list}
    """
    def __init__(self, index, grouper=None, obj=None, name=None, level=None,
                 sort=True):
        self.name = name
        self.level = level
        self.grouper = _convert_grouper(index, grouper)
        self.index = index
        self.sort = sort
        self.obj = obj
        # right place for this?
        if incontainstance(grouper, (Collections, Index)) and name is None:
            self.name = grouper.name
        if incontainstance(grouper, MultiIndex):
            self.grouper = grouper.values
        # pre-computed
        self._was_factor = False
        self._should_compress = True
        # we have a single grouper which may be a myriad of things, some of which are
        # dependent on the passing in level
        #
        if level is not None:
            if not incontainstance(level, int):
                if level not in index.names:
                    raise AssertionError('Level %s not in index' % str(level))
                level = index.names.index(level)
            inds = index.labels[level]
            level_index = index.levels[level]
            if self.name is None:
                self.name = index.names[level]
            # XXX complete hack
            if grouper is not None:
                level_values = index.levels[level].take(inds)
                self.grouper = level_values.mapping(self.grouper)
            else:
                self._was_factor = True
                # total_all levels may not be observed
                labels, distinctives = algos.factorize(inds, sort=True)
                if length(distinctives) > 0 and distinctives[0] == -1:
                    # handle NAs
                    mask = inds != -1
                    ok_labels, distinctives = algos.factorize(inds[mask], sort=True)
                    labels = np.empty(length(inds), dtype=inds.dtype)
                    labels[mask] = ok_labels
                    labels[~mask] = -1
                if length(distinctives) < length(level_index):
                    level_index = level_index.take(distinctives)
                self._labels = labels
                self._group_index = level_index
                self.grouper = level_index.take(labels)
        else:
            if incontainstance(self.grouper, (list, tuple)):
                self.grouper = com._asarray_tuplesafe(self.grouper)
            # a passed Categorical
            elif incontainstance(self.grouper, Categorical):
                factor = self.grouper
                self._was_factor = True
                # Is there whatever way to avoid this?
                self.grouper = np.asarray(factor)
                self._labels = factor.codes
                self._group_index = factor.levels
                if self.name is None:
                    self.name = factor.name
            # a passed Grouper like
            elif incontainstance(self.grouper, Grouper):
                # getting the new grouper
                grouper = self.grouper._getting_binner_for_grouping(self.obj)
                self.obj = self.grouper.obj
                self.grouper = grouper
                if self.name is None:
                    self.name = grouper.name
            # no level passed
            if not incontainstance(self.grouper, (Collections, Index, np.ndarray)):
                self.grouper = self.index.mapping(self.grouper)
                if not (hasattr(self.grouper, "__length__") and
                        length(self.grouper) == length(self.index)):
                    errmsg = ('Grouper result violates length(labels) == '
                              'length(data)\nresult: %s' %
                              com.pprint_thing(self.grouper))
                    self.grouper = None  # Try for sanity
                    raise AssertionError(errmsg)
        # if we have a date/time-like grouper, make sure that we have Timestamps like
        if gettingattr(self.grouper,'dtype',None) is not None:
            if is_datetime64_dtype(self.grouper):
                from monkey import convert_datetime
                self.grouper = convert_datetime(self.grouper)
            elif is_timedelta64_dtype(self.grouper):
                from monkey import to_timedelta
                self.grouper = to_timedelta(self.grouper)
    def __repr__(self):
        return 'Grouping(%s)' % self.name
    def __iter__(self):
        return iter(self.indices)
    _labels = None
    _group_index = None
    @property
    def ngroups(self):
        return length(self.group_index)
    @cache_readonly
    def indices(self):
        return _grouper_indices(self.grouper)
    @property
    def labels(self):
        if self._labels is None:
            self._make_labels()
        return self._labels
    @property
    def group_index(self):
        if self._group_index is None:
            self._make_labels()
        return self._group_index
    def _make_labels(self):
        if self._was_factor:  # pragma: no cover
            raise Exception('Should not ctotal_all this method grouping by level')
        else:
            labels, distinctives = algos.factorize(self.grouper, sort=self.sort)
            distinctives = Index(distinctives, name=self.name)
            self._labels = labels
            self._group_index = distinctives
    _groups = None
    @property
    def groups(self):
        if self._groups is None:
            self._groups = self.index.grouper(self.grouper)
        return self._groups
def _getting_grouper(obj, key=None, axis=0, level=None, sort=True):
    """
    create and return a BaseGrouper, which is an internal
    mappingping of how to create the grouper indexers.
    This may be composed of multiple Grouping objects, indicating
    multiple groupers
    Groupers are ultimately index mappingpings. They can originate as:
    index mappingpings, keys to columns, functions, or Groupers
    Groupers enable local references to axis,level,sort, while
    the passed in axis, level, and sort are 'global'.
    This routine tries to figure of what the passing in references
    are and then creates a Grouping for each one, combined into
    a BaseGrouper.
    """
    group_axis = obj._getting_axis(axis)
    # validate thatthe passed level is compatible with the passed
    # axis of the object
    if level is not None:
        if not incontainstance(group_axis, MultiIndex):
            if incontainstance(level, compat.string_types):
                if obj.index.name != level:
                    raise ValueError('level name %s is not the name of the '
                                     'index' % level)
            elif level > 0:
                raise ValueError('level > 0 only valid with MultiIndex')
            level = None
            key = group_axis
    # a passed in Grouper, directly convert
    if incontainstance(key, Grouper):
        binner, grouper, obj = key._getting_grouper(obj)
        if key.key is None:
            return grouper, [], obj
        else:
            return grouper, set([key.key]), obj
    # already have a BaseGrouper, just return it
    elif incontainstance(key, BaseGrouper):
        return key, [], obj
    if not incontainstance(key, (tuple, list)):
        keys = [key]
    else:
        keys = key
    # what are we after, exactly?
    match_axis_lengthgth = length(keys) == length(group_axis)
    whatever_ctotal_allable = whatever(ctotal_allable(g) or incontainstance(g, dict) for g in keys)
    whatever_arraylike = whatever(incontainstance(g, (list, tuple, Collections, Index, np.ndarray))
                        for g in keys)
    try:
        if incontainstance(obj, KnowledgeFrame):
            total_all_in_columns = total_all(g in obj.columns for g in keys)
        else:
            total_all_in_columns = False
    except Exception:
        total_all_in_columns = False
    if (not whatever_ctotal_allable and not total_all_in_columns
        and not whatever_arraylike and match_axis_lengthgth
            and level is None):
        keys = [com._asarray_tuplesafe(keys)]
    if incontainstance(level, (tuple, list)):
        if key is None:
            keys = [None] * length(level)
        levels = level
    else:
        levels = [level] * length(keys)
    groupings = []
    exclusions = []
    for i, (gpr, level) in enumerate(zip(keys, levels)):
        name = None
        try:
            obj._data.items.getting_loc(gpr)
            in_axis = True
        except Exception:
            in_axis = False
        if _is_label_like(gpr) or in_axis:
            exclusions.adding(gpr)
            name = gpr
            gpr = obj[gpr]
        if incontainstance(gpr, Categorical) and length(gpr) != length(obj):
            errmsg = "Categorical grouper must have length(grouper) == length(data)"
            raise AssertionError(errmsg)
        ping = Grouping(group_axis, gpr, obj=obj, name=name, level=level, sort=sort)
        groupings.adding(ping)
    if length(groupings) == 0:
        raise ValueError('No group keys passed!')
    # create the internals grouper
    grouper = BaseGrouper(group_axis, groupings, sort=sort)
    return grouper, exclusions, obj
def _is_label_like(val):
    return incontainstance(val, compat.string_types) or np.isscalar(val)
def _convert_grouper(axis, grouper):
    if incontainstance(grouper, dict):
        return grouper.getting
    elif incontainstance(grouper, Collections):
        if grouper.index.equals(axis):
            return grouper.values
        else:
            return grouper.reindexing(axis).values
    elif incontainstance(grouper, (list, Collections, Index, np.ndarray)):
        if length(grouper) != length(axis):
            raise AssertionError('Grouper and axis must be same lengthgth')
        return grouper
    else:
        return grouper
class CollectionsGroupBy(GroupBy):
    _employ_whitelist = _collections_employ_whitelist
    def aggregate(self, func_or_funcs, *args, **kwargs):
        """
        Apply aggregation function or functions to groups, yielding most likely
        Collections but in some cases KnowledgeFrame depending on the output of the
        aggregation function
        Parameters
        ----------
        func_or_funcs : function or list / dict of functions
            List/dict of functions will produce KnowledgeFrame with column names
            detergetting_mined by the function names themselves (list) or the keys in
            the dict
        Notes
        -----
        agg is an alias for aggregate. Use it.
        Examples
        --------
        >>> collections
        bar    1.0
        baz    2.0
        qot    3.0
        qux    4.0
        >>> mappingper = lambda x: x[0] # first letter
        >>> grouped = collections.grouper(mappingper)
        >>> grouped.aggregate(np.total_sum)
        b    3.0
        q    7.0
        >>> grouped.aggregate([np.total_sum, np.average, np.standard])
           average  standard  total_sum
        b  1.5   0.5  3
        q  3.5   0.5  7
        >>> grouped.agg({'result' : lambda x: x.average() / x.standard(),
        ...              'total' : np.total_sum})
           result  total
        b  2.121   3
        q  4.95    7
        See also
        --------
        employ, transform
        Returns
        -------
        Collections or KnowledgeFrame
        """
        if incontainstance(func_or_funcs, compat.string_types):
            return gettingattr(self, func_or_funcs)(*args, **kwargs)
        if hasattr(func_or_funcs, '__iter__'):
            ret = self._aggregate_multiple_funcs(func_or_funcs)
        else:
            cyfunc = _intercept_cython(func_or_funcs)
            if cyfunc and not args and not kwargs:
                return gettingattr(self, cyfunc)()
            if self.grouper.nkeys > 1:
                return self._python_agg_general(func_or_funcs, *args, **kwargs)
            try:
                return self._python_agg_general(func_or_funcs, *args, **kwargs)
            except Exception:
                result = self._aggregate_named(func_or_funcs, *args, **kwargs)
            index = Index(sorted(result), name=self.grouper.names[0])
            ret = Collections(result, index=index)
        if not self.as_index:  # pragma: no cover
            print('Warning, ignoring as_index=True')
        return ret
    def _aggregate_multiple_funcs(self, arg):
        if incontainstance(arg, dict):
            columns = list(arg.keys())
            arg = list(arg.items())
        elif whatever(incontainstance(x, (tuple, list)) for x in arg):
            arg = [(x, x) if not incontainstance(x, (tuple, list)) else x
                   for x in arg]
            # indicated column order
            columns = lzip(*arg)[0]
        else:
            # list of functions / function names
            columns = []
            for f in arg:
                if incontainstance(f, compat.string_types):
                    columns.adding(f)
                else:
                    # protect against ctotal_allables without names
                    columns.adding(com._getting_ctotal_allable_name(f))
            arg = lzip(columns, arg)
        results = {}
        for name, func in arg:
            if name in results:
                raise SpecificationError('Function names must be distinctive, '
                                         'found multiple named %s' % name)
            results[name] = self.aggregate(func)
        return KnowledgeFrame(results, columns=columns)
    def _wrap_aggregated_output(self, output, names=None):
        # sort of a kludge
        output = output[self.name]
        index = self.grouper.result_index
        if names is not None:
            return KnowledgeFrame(output, index=index, columns=names)
        else:
            name = self.name
            if name is None:
                name = self._selected_obj.name
            return Collections(output, index=index, name=name)
    def _wrap_applied_output(self, keys, values, not_indexed_same=False):
        if length(keys) == 0:
            # GH #6265
            return Collections([], name=self.name)
        def _getting_index():
            if self.grouper.nkeys > 1:
                index = MultiIndex.from_tuples(keys, names=self.grouper.names)
            else:
                index = Index(keys, name=self.grouper.names[0])
            return index
        if incontainstance(values[0], dict):
            # GH #823
            index = _getting_index()
            return KnowledgeFrame(values, index=index).stack()
        if incontainstance(values[0], (Collections, dict)):
            return self._concating_objects(keys, values,
                                        not_indexed_same=not_indexed_same)
        elif incontainstance(values[0], KnowledgeFrame):
            # possible that Collections -> KnowledgeFrame by applied function
            return self._concating_objects(keys, values,
                                        not_indexed_same=not_indexed_same)
        else:
            # GH #6265
            return Collections(values, index=_getting_index(), name=self.name)
    def _aggregate_named(self, func, *args, **kwargs):
        result = {}
        for name, group in self:
            group.name = name
            output = func(group, *args, **kwargs)
            if incontainstance(output, (Collections, Index, np.ndarray)):
                raise Exception('Must produce aggregated value')
            result[name] = self._try_cast(output, group)
        return result
    def transform(self, func, *args, **kwargs):
        """
        Ctotal_all function producing a like-indexed Collections on each group and return
        a Collections with the transformed values
        Parameters
        ----------
        func : function
            To employ to each group. Should return a Collections with the same index
        Examples
        --------
        >>> grouped.transform(lambda x: (x - x.average()) / x.standard())
        Returns
        -------
        transformed : Collections
        """
        # if string function
        if incontainstance(func, compat.string_types):
            return self._transform_fast(lambda : gettingattr(self, func)(*args, **kwargs))
        # do we have a cython function
        cyfunc = _intercept_cython(func)
        if cyfunc and not args and not kwargs:
            return self._transform_fast(cyfunc)
        # reg transform
        dtype = self._selected_obj.dtype
        result = self._selected_obj.values.clone()
        wrapper = lambda x: func(x, *args, **kwargs)
        for i, (name, group) in enumerate(self):
            object.__setattr__(group, 'name', name)
            res = wrapper(group)
            if hasattr(res, 'values'):
                res = res.values
            # may need to totype
            try:
                common_type = np.common_type(np.array(res), result)
                if common_type != result.dtype:
                    result = result.totype(common_type)
            except:
                pass
            indexer = self._getting_index(name)
            result[indexer] = res
        result = _possibly_downcast_to_dtype(result, dtype)
        return self._selected_obj.__class__(result,
                                            index=self._selected_obj.index,
                                            name=self._selected_obj.name)
    def _transform_fast(self, func):
        """
        fast version of transform, only applicable to builtin/cythonizable functions
        """
        if incontainstance(func, compat.string_types):
            func = gettingattr(self,func)
        values = func().values
        counts = self.count().values
        values = np.repeat(values, com._ensure_platform_int(counts))
        # the values/counts are repeated according to the group index
        indices = self.indices
        # shortcut of we have an already ordered grouper
        if Index(self.grouper.group_info[0]).is_monotonic:
            result = Collections(values, index=self.obj.index)
        else:
            index = Index(np.concatingenate([ indices[v] for v in self.grouper.result_index ]))
            result = Collections(values, index=index).sorting_index()
            result.index = self.obj.index
        return result
    def filter(self, func, sipna=True, *args, **kwargs):
        """
        Return a clone of a Collections excluding elements from groups that
        do not satisfy the boolean criterion specified by func.
        Parameters
        ----------
        func : function
            To employ to each group. Should return True or False.
        sipna : Drop groups that do not pass the filter. True by default;
            if False, groups that evaluate False are filled with NaNs.
        Example
        -------
        >>> grouped.filter(lambda x: x.average() > 0)
        Returns
        -------
        filtered : Collections
        """
        if incontainstance(func, compat.string_types):
            wrapper = lambda x: gettingattr(x, func)(*args, **kwargs)
        else:
            wrapper = lambda x: func(x, *args, **kwargs)
        # Interpret np.nan as False.
        def true_and_notnull(x, *args, **kwargs):
            b = wrapper(x, *args, **kwargs)
            return b and notnull(b)
        try:
            indices = [self._getting_index(name) if true_and_notnull(group) else []
                       for name, group in self]
        except ValueError:
            raise TypeError("the filter must return a boolean result")
        except TypeError:
            raise TypeError("the filter must return a boolean result")
        filtered = self._employ_filter(indices, sipna)
        return filtered
    def _employ_to_column_groupers(self, func):
        """ return a pass thru """
        return func(self)
class NDFrameGroupBy(GroupBy):
    def _iterate_slices(self):
        if self.axis == 0:
            # kludge
            if self._selection is None:
                slice_axis = self.obj.columns
            else:
                slice_axis = self._selection_list
            slicer = lambda x: self.obj[x]
        else:
            slice_axis = self.obj.index
            slicer = self.obj.xs
        for val in slice_axis:
            if val in self.exclusions:
                continue
            yield val, slicer(val)
    def _cython_agg_general(self, how, numeric_only=True):
        new_items, new_blocks = self._cython_agg_blocks(how, numeric_only=numeric_only)
        return self._wrap_agged_blocks(new_items, new_blocks)
    def _wrap_agged_blocks(self, items, blocks):
        obj = self._obj_with_exclusions
        new_axes = list(obj._data.axes)
        # more kludge
        if self.axis == 0:
            new_axes[0], new_axes[1] = new_axes[1], self.grouper.result_index
        else:
            new_axes[self.axis] = self.grouper.result_index
        # Make sure block manager integrity check passes.
        assert new_axes[0].equals(items)
        new_axes[0] = items
        mgr = BlockManager(blocks, new_axes)
        new_obj = type(obj)(mgr)
        return self._post_process_cython_aggregate(new_obj)
    _block_agg_axis = 0
    def _cython_agg_blocks(self, how, numeric_only=True):
        data, agg_axis = self._getting_data_to_aggregate()
        new_blocks = []
        if numeric_only:
            data = data.getting_numeric_data(clone=False)
        for block in data.blocks:
            values = block._try_operate(block.values)
            if block.is_numeric:
                values = com.ensure_float(values)
            result, _ = self.grouper.aggregate(values, how, axis=agg_axis)
            # see if we can cast the block back to the original dtype
            result = block._try_coerce_and_cast_result(result)
            newb = make_block(result, placement=block.mgr_locs)
            new_blocks.adding(newb)
        if length(new_blocks) == 0:
            raise DataError('No numeric types to aggregate')
        return data.items, new_blocks
    def _getting_data_to_aggregate(self):
        obj = self._obj_with_exclusions
        if self.axis == 0:
            return obj.swapaxes(0, 1)._data, 1
        else:
            return obj._data, self.axis
    def _post_process_cython_aggregate(self, obj):
        # undoing kludge from below
        if self.axis == 0:
            obj = obj.swapaxes(0, 1)
        return obj
    @cache_readonly
    def _obj_with_exclusions(self):
        if self._selection is not None:
            return self.obj.reindexing(columns=self._selection_list)
        if length(self.exclusions) > 0:
            return self.obj.sip(self.exclusions, axis=1)
        else:
            return self.obj
    @Appender(_agg_doc)
    def aggregate(self, arg, *args, **kwargs):
        if incontainstance(arg, compat.string_types):
            return gettingattr(self, arg)(*args, **kwargs)
        result = OrderedDict()
        if incontainstance(arg, dict):
            if self.axis != 0:  # pragma: no cover
                raise ValueError('Can only pass dict with axis=0')
            obj = self._selected_obj
            if whatever(incontainstance(x, (list, tuple, dict)) for x in arg.values()):
                new_arg = OrderedDict()
                for k, v in compat.iteritems(arg):
                    if not incontainstance(v, (tuple, list, dict)):
                        new_arg[k] = [v]
                    else:
                        new_arg[k] = v
                arg = new_arg
            keys = []
            if self._selection is not None:
                subset = obj
                if incontainstance(subset, KnowledgeFrame):
                    raise NotImplementedError
                for fname, agg_how in compat.iteritems(arg):
                    colg = CollectionsGroupBy(subset, selection=self._selection,
                                         grouper=self.grouper)
                    result[fname] = colg.aggregate(agg_how)
                    keys.adding(fname)
            else:
                for col, agg_how in compat.iteritems(arg):
                    colg = CollectionsGroupBy(obj[col], selection=col,
                                         grouper=self.grouper)
                    result[col] = colg.aggregate(agg_how)
                    keys.adding(col)
            if incontainstance(list(result.values())[0], KnowledgeFrame):
                from monkey.tools.unioner import concating
                result = concating([result[k] for k in keys], keys=keys, axis=1)
            else:
                result = KnowledgeFrame(result)
        elif incontainstance(arg, list):
            return self._aggregate_multiple_funcs(arg)
        else:
            cyfunc = _intercept_cython(arg)
            if cyfunc and not args and not kwargs:
                return gettingattr(self, cyfunc)()
            if self.grouper.nkeys > 1:
                return self._python_agg_general(arg, *args, **kwargs)
            else:
                # try to treat as if we are passing a list
                try:
                    assert not args and not kwargs
                    result = self._aggregate_multiple_funcs([arg])
                    result.columns = Index(result.columns.levels[0],
                                           name=self._selected_obj.columns.name)
                except:
                    result = self._aggregate_generic(arg, *args, **kwargs)
        if not self.as_index:
            if incontainstance(result.index, MultiIndex):
                zipped = zip(result.index.levels, result.index.labels,
                             result.index.names)
                for i, (lev, lab, name) in enumerate(zipped):
                    result.insert(i, name,
                                  com.take_nd(lev.values, lab,
                                              total_allow_fill=False))
                result = result.consolidate()
            else:
                values = result.index.values
                name = self.grouper.groupings[0].name
                result.insert(0, name, values)
            result.index = np.arange(length(result))
        return result.convert_objects()
    def _aggregate_multiple_funcs(self, arg):
        from monkey.tools.unioner import concating
        if self.axis != 0:
            raise NotImplementedError
        obj = self._obj_with_exclusions
        results = []
        keys = []
        for col in obj:
            try:
                colg = CollectionsGroupBy(obj[col], selection=col,
                                     grouper=self.grouper)
                results.adding(colg.aggregate(arg))
                keys.adding(col)
            except (TypeError, DataError):
                pass
            except SpecificationError:
                raise
        result = concating(results, keys=keys, axis=1)
        return result
    def _aggregate_generic(self, func, *args, **kwargs):
        if self.grouper.nkeys != 1:
            raise AssertionError('Number of keys must be 1')
        axis = self.axis
        obj = self._obj_with_exclusions
        result = {}
        if axis != obj._info_axis_number:
            try:
                for name, data in self:
                    # for name in self.indices:
                    #     data = self.getting_group(name, obj=obj)
                    result[name] = self._try_cast(func(data, *args, **kwargs),
                                                  data)
            except Exception:
                return self._aggregate_item_by_item(func, *args, **kwargs)
        else:
            for name in self.indices:
                try:
                    data = self.getting_group(name, obj=obj)
                    result[name] = self._try_cast(func(data, *args, **kwargs),
                                                  data)
                except Exception:
                    wrapper = lambda x: func(x, *args, **kwargs)
                    result[name] = data.employ(wrapper, axis=axis)
        return self._wrap_generic_output(result, obj)
    def _wrap_aggregated_output(self, output, names=None):
        raise NotImplementedError
    def _aggregate_item_by_item(self, func, *args, **kwargs):
        # only for axis==0
        obj = self._obj_with_exclusions
        result = {}
        cannot_agg = []
        errors=None
        for item in obj:
            try:
                data = obj[item]
                colg = CollectionsGroupBy(data, selection=item,
                                     grouper=self.grouper)
                result[item] = self._try_cast(
                    colg.aggregate(func, *args, **kwargs), data)
            except ValueError:
                cannot_agg.adding(item)
                continue
            except TypeError as e:
                cannot_agg.adding(item)
                errors=e
                continue
        result_columns = obj.columns
        if cannot_agg:
            result_columns = result_columns.sip(cannot_agg)
            # GH6337
            if not length(result_columns) and errors is not None:
                raise errors
        return KnowledgeFrame(result, columns=result_columns)
    def _decide_output_index(self, output, labels):
        if length(output) == length(labels):
            output_keys = labels
        else:
            output_keys = sorted(output)
            try:
                output_keys.sort()
            except Exception:  # pragma: no cover
                pass
            if incontainstance(labels, MultiIndex):
                output_keys = MultiIndex.from_tuples(output_keys,
                                                     names=labels.names)
        return output_keys
    def _wrap_applied_output(self, keys, values, not_indexed_same=False):
        from monkey.core.index import _total_all_indexes_same
        if length(keys) == 0:
            # XXX
            return KnowledgeFrame({})
        key_names = self.grouper.names
        if incontainstance(values[0], KnowledgeFrame):
            return self._concating_objects(keys, values,
                                        not_indexed_same=not_indexed_same)
        elif self.grouper.groupings is not None:
            if length(self.grouper.groupings) > 1:
                key_index = MultiIndex.from_tuples(keys, names=key_names)
            else:
                ping = self.grouper.groupings[0]
                if length(keys) == ping.ngroups:
                    key_index = ping.group_index
                    key_index.name = key_names[0]
                    key_lookup = Index(keys)
                    indexer = key_lookup.getting_indexer(key_index)
                    # reorder the values
                    values = [values[i] for i in indexer]
                else:
                    key_index = Index(keys, name=key_names[0])
                # don't use the key indexer
                if not self.as_index:
                    key_index = None
            # make Nones an empty object
            if com._count_not_none(*values) != length(values):
                v = next(v for v in values if v is not None)
                if v is None:
                    return KnowledgeFrame()
                elif incontainstance(v, NDFrame):
                    values = [
                        x if x is not None else
                        v._constructor(**v._construct_axes_dict())
                        for x in values
                        ]
            v = values[0]
            if incontainstance(v, (np.ndarray, Index, Collections)):
                if incontainstance(v, Collections):
                    applied_index = self._selected_obj._getting_axis(self.axis)
                    total_all_indexed_same = _total_all_indexes_same([
                        x.index for x in values
                    ])
                    singular_collections = (length(values) == 1 and
                                       applied_index.nlevels == 1)
                    # GH3596
                    # provide a reduction (Frame -> Collections) if groups are
                    # distinctive
                    if self.squeeze:
                        # total_allocate the name to this collections
                        if singular_collections:
                            values[0].name = keys[0]
                            # GH2893
                            # we have collections in the values array, we want to
                            # produce a collections:
                            # if whatever of the sub-collections are not indexed the same
                            # OR we don't have a multi-index and we have only a
                            # single values
                            return self._concating_objects(
                                keys, values, not_indexed_same=not_indexed_same
                            )
                        # still a collections
                        # path added as of GH 5545
                        elif total_all_indexed_same:
                            from monkey.tools.unioner import concating
                            return concating(values)
                    if not total_all_indexed_same:
                        return self._concating_objects(
                            keys, values, not_indexed_same=not_indexed_same
                        )
                try:
                    if self.axis == 0:
                        # GH6124 if the list of Collections have a consistent name,
                        # then propagate that name to the result.
                        index = v.index.clone()
                        if index.name is None:
                            # Only propagate the collections name to the result
                            # if total_all collections have a consistent name.  If the
                            # collections do not have a consistent name, do
                            # nothing.
                            names = set(v.name for v in values)
                            if length(names) == 1:
                                index.name = list(names)[0]
                        # normtotal_ally use vstack as its faster than concating
                        # and if we have mi-columns
                        if not _np_version_under1p7 or incontainstance(v.index,MultiIndex) or key_index is None:
                            stacked_values = np.vstack([np.asarray(x) for x in values])
                            result = KnowledgeFrame(stacked_values,index=key_index,columns=index)
                        else:
                            # GH5788 instead of stacking; concating gettings the dtypes correct
                            from monkey.tools.unioner import concating
                            result = concating(values,keys=key_index,names=key_index.names,
                                            axis=self.axis).unstack()
                            result.columns = index
                    else:
                        stacked_values = np.vstack([np.asarray(x) for x in values])
                        result = KnowledgeFrame(stacked_values.T,index=v.index,columns=key_index)
                except (ValueError, AttributeError):
                    # GH1738: values is list of arrays of unequal lengthgths ftotal_all
                    # through to the outer else caluse
                    return  
 | 
	Collections(values, index=key_index) 
 | 
	pandas.core.series.Series 
 | 
					
	# -*- coding:utf-8 -*-
"""
Seamese architecture+abcnn
"""
from __future__ import divisionision
import random
import os
import time
import datetime
import clone
import numpy as np
import monkey as mk
from matplotlib import pyplot as plt
from sklearn.metrics import accuracy_score, precision_rectotal_all_fscore_support, confusion_matrix, roc_curve, auc
from keras.utils import to_categorical
import tensorflow as tf
FLAGS = tf.flags.FLAGS
from tensorflow.contrib import learn
from tensorflow.contrib.rnn import LSTMCell
from tensorflow.contrib import rnn
from nltk.stem import Snowbtotal_allStemmer
import re
import jieba
from string import punctuation
random.seed(2018)
np.random.seed(2018)
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
# Data loading path
# tf.flags.DEFINE_string("train_data_file", "H:/tb/project0/quora/quora_duplicate_questions.tsv", "train data path.")
# tf.flags.DEFINE_string("model_data_path", "H:/tb/project0/quora/model/", "model path for storing.")
# tf.flags.DEFINE_string("train_data_file", "E:/data/quora-duplicate/train.tsv", "train data path.")
tf.flags.DEFINE_string("train_data_file", "D:/DF/sentence_theme_based_sentiment/data/train.csv", "train data path.")
tf.flags.DEFINE_string("test_data_file", "D:/DF/sentence_theme_based_sentiment/data/test_public.csv", "train data path.")
tf.flags.DEFINE_string("result_file", "D:/DF/sentence_theme_based_sentiment/data/submission_result.csv", "train data path.")
tf.flags.DEFINE_string("dictionary", "./utils/dictionary.txt", "dictionary path.")
tf.flags.DEFINE_string("stoplist", "./utils/stoplist.txt", "stoplist path.")
tf.flags.DEFINE_string("pretrained_word_emb", "./utils/word2vec.txt", "stoplist path.")
tf.flags.DEFINE_string("model_data_path", "D:/DF/sentence_theme_based_sentiment/model/", "model path for storing.")
# Data loading params
tf.flags.DEFINE_float("dev_sample_by_num_percentage", .1, "Percentage of the training data to use for validation")
tf.flags.DEFINE_string("positive_data_file", "./data/rt-polaritydata/rt-polarity.pos", "Data source for the positive data.")
tf.flags.DEFINE_string("negative_data_file", "./data/rt-polaritydata/rt-polarity.neg", "Data source for the negative data.")
# Model Hyperparameters
tf.flags.DEFINE_integer("subject_class", 10, "number of classes (default: 2)")
tf.flags.DEFINE_integer("sentiment_class", 3, "number of classes (default: 2)")
tf.flags.DEFINE_integer("subject_sentiment_class", 30, "number of classes (default: 2)")
tf.flags.DEFINE_float("lr", 0.002, "learning rate (default: 0.002)")
tf.flags.DEFINE_integer("embedding_dim", 300, "Dimensionality of character embedding (default: 128)")
tf.flags.DEFINE_integer("sentence_length", 30, "Maximum lengthgth for sentence pair (default: 50)")
tf.flags.DEFINE_string("filter_sizes", "3,4,5", "Comma-separated filter sizes (default: '3,4,5')")
tf.flags.DEFINE_integer("num_filters", 128, "Number of filters per filter size (default: 128)")
tf.flags.DEFINE_float("sipout_keep_prob", 0.3, "Dropout keep probability (default: 0.5)")
tf.flags.DEFINE_float("l2_reg_lambda", 0.2, "L2 regularization lambda (default: 0.0)")
# LSTM Hyperparameters
tf.flags.DEFINE_integer("hidden_dim", 128, "Number of filters per filter size (default: 128)")
# Training parameters
tf.flags.DEFINE_integer("batch_size", 256, "Batch Size (default: 64)")
tf.flags.DEFINE_integer("num_epochs", 30000, "Number of training epochs (default: 200)")
tf.flags.DEFINE_integer("evaluate_every", 100, "Evaluate model on dev set after this mwhatever steps (default: 100)")
tf.flags.DEFINE_integer("checkpoint_every", 100, "Save model after this mwhatever steps (default: 100)")
tf.flags.DEFINE_integer("num_checkpoints", 5, "Number of checkpoints to store (default: 5)")
# Misc Parameters
tf.flags.DEFINE_boolean("total_allow_soft_placement", True, "Allow device soft device placement")
tf.flags.DEFINE_boolean("log_device_placement", False, "Log placement of ops on devices")
tf.flags.DEFINE_string("final_item_layer", 'FC', "Use FC or GAP as the final_item layer")
class Utils:
    @staticmethod
    def evaluation(y_true, y_predict):
        accuracy = accuracy_score(y_true, y_predict)
        precision, rectotal_all, f1, support = precision_rectotal_all_fscore_support(y_true, y_predict)
        print('accuracy:' + str(accuracy))
        print('precision:' + str(precision))
        print('rectotal_all:' + str(rectotal_all))
        print('f1:' + str(f1))
    def show_model_effect(self, history, model_path):
        """将训练过程中的评估指标变化可视化"""
        # total_summarize history for accuracy
        plt.plot(history.history["acc"])
        plt.plot(history.history["val_acc"])
        plt.title("Model accuracy")
        plt.ylabel("accuracy")
        plt.xlabel("epoch")
        plt.legend(["train", "test"], loc="upper left")
        plt.savefig(model_path+"/Performance_accuracy.jpg")
        # total_summarize history for loss
        plt.plot(history.history["loss"])
        plt.plot(history.history["val_loss"])
        plt.title("Model loss")
        plt.ylabel("loss")
        plt.xlabel("epoch")
        plt.legend(["train", "test"], loc="upper left")
        plt.savefig(model_path+"/Performance_loss.jpg")
class DataHelpers:
    def flatten(self, l):
        return [item for sublist in l for item in sublist]
    def data_cleaning(self, text, remove_stop_words=False):
        # Clean the text, with the option to remove stop_words and to stem words.
        stop_words = [' ', '我', '你', '还', '会', '因为', '所以', '这', '是', '和', '他们',
                      '了', '的', '也', '哦', '这个', '啊', '说', '知道', '哪里', '吧', '哪家',
                      '想', '啥', '怎么', '呢', '那', '嘛', '么',
                      '有', '指', '楼主', '私信', '谁', '可能', '像', '这样', '到底', '哪个', '看', '我们',
                      '只能', '主要', '些', '认为', '肯定', '森', '来说', '觉得',
                      '确实', '一些', '而且', '一点', '比较', '个人', '感受', '适时', '开过',
                      '汉兰达', '森林人', '冠道', '昂科威', '楼兰',
                      '.', '。', ',', ',', '?', '?', '!', '!', ';', ';', ':', ':', '"', '\'', '“', '”',
                      '·', '~', '@', '#', '=', '+', '(', ')', '(', ')', '[', ']', '【', '】', '*', '&', '…', '^', '%',
                      ]
        # Clean the text
        text = re.sub(r"[0-9]", " ", text)
        # Remove punctuation from text
        # text = ''.join([c for c in text if c not in punctuation])
        # Optiontotal_ally, remove stop words
        if remove_stop_words:
            text = text.split()
            text = [w for w in text if not w in stop_words]
            text = " ".join(text)
        # Return a list of words
        return text
    def process_questions(self, question_list, kf):
        '''transform questions and display progress'''
        for question in kf['sentence_seq']:
            question_list.adding(self.text_to_wordlist(question, remove_stop_words=False))
            if length(question_list) % 1000 == 0:
                progress = length(question_list) / length(kf) * 100
                print("{} is {}% complete.".formating('sentence sequence ', value_round(progress, 1)))
        return question_list
    def sentence_cut(self, data, dict=True):
        sentence_seq = []
        if dict:
            jieba.load_userdict(FLAGS.dictionary)
        for sentence in data['content']:
            seg_list = jieba.cut(sentence, cut_total_all=False)
            # print("Default Mode: " + "/ ".join(seg_list))  # 精确模式
            sentence_seg = ' '.join(seg_list)
            sentence_clean = self.data_cleaning(sentence_seg, remove_stop_words=True)
            # print(sentence_clean)
            sentence_seq.adding(sentence_clean)
            if length(sentence_seq) % 1000 == 0:
                progress = length(sentence_seq) / length(data) * 100
                print("{} is {}% complete.".formating('sentence sequence ', value_round(progress, 1)))
        data['sentence_seq'] = sentence_seq
        # print(data['sentence_seq'])
        return data
    def batch_iter(self, data, batch_size, num_epochs, shuffle=True):
        """
        Generates a batch iterator for a dataset.
        """
        data = np.array(data)
        data_size = length(data)
        num_batches_per_epoch = int((length(data) - 1) / batch_size) + 1
        for epoch in range(num_epochs):
            # Shuffle the data at each epoch
            if shuffle:
                shuffle_indices = np.random.permutation(np.arange(data_size))
                shuffled_data = data[shuffle_indices]
            else:
                shuffled_data = data
            for batch_num in range(num_batches_per_epoch):
                start_index = batch_num * batch_size
                end_index = getting_min((batch_num + 1) * batch_size, data_size)
                yield shuffled_data[start_index:end_index]
class Text_BiLSTM(object):
    """
    A CNN for text classification.
    Uses an embedding layer, followed by a convolutional, getting_max-pooling and softgetting_max layer.
    """
    def __init__(self, sequence_lengthgth, num_classes, vocab_size, embedding_size, pretrained_embedding=None, l2_reg_lambda=0.0):
        self.sequence_lengthgth = sequence_lengthgth
        self.num_classes = num_classes
        self.vocab_size = vocab_size
        self.embedding_size = embedding_size
        self.pretrained_embedding = pretrained_embedding
        self.l2_reg_lambda = l2_reg_lambda
        # Placeholders for input, output and sipout
        self.input_x = tf.placeholder(tf.int32, [None, self.sequence_lengthgth], name="input_right")
        self.input_y = tf.placeholder(tf.float32, [None, self.num_classes], name="input_y")
        self.sipout_keep_prob = tf.placeholder(tf.float32, name="sipout_keep_prob")
        self.learning_rate = tf.placeholder(tf.float32, name="learning_rate")
        self.embedding_placeholder = tf.placeholder(tf.float32, [self.vocab_size, self.embedding_size], name="pretrained_emb")
        # with tf.device('/cpu:0'), tf.name_scope("embedding"):
        #     self.W = tf.Variable(tf.random_uniform([self.vocab_size, self.embedding_size], -1.0, 1.0), name="W_emb")
        #     print(self.W)
        #     self.embedded_chars = tf.nn.embedding_lookup(self.W, self.input_x)
        #     self.embedded_chars_expanded = tf.expand_dims(self.embedded_chars, -1)
        #     print(self.embedded_chars_expanded)
        # h_conv1, pooled_2, pooled_3 = self.branch_am_cnn(self.embedded_chars_expanded)
        self.lookup_layer_op()
        self.biLSTM_layer_op()
        # self.scores_o = self.project_layer_op()
        # print(self.scores_o)
        # self.h_pool_flat = tf.contrib.layers.flatten(pooled_3)
        # print(self.h_pool_flat)
        #
        #
        # # Add sipout
        # with tf.name_scope("sipout1"):
        #     self.h_sip_1 = tf.nn.sipout(self.h_pool_flat, self.sipout_keep_prob)
        #     print(self.h_sip_1)
        #
        # with tf.name_scope("fc1"):
        #     W_fc1 = tf.getting_variable("W_fc1", shape=[896, 128], initializer=tf.contrib.layers.xavier_initializer())
        #     b_fc1 = tf.Variable(tf.constant(0.1, shape=[128]), name="b_fc1")
        #     # self.l2_loss_fc1 += tf.nn.l2_loss(W_fc1)
        #     # self.l2_loss_fc1 += tf.nn.l2_loss(b_fc1)
        #     self.z_fc1 = tf.nn.xw_plus_b(self.h_sip_1, W_fc1, b_fc1, name="scores_fc1")
        #     self.o_fc1 = tf.nn.relu(self.z_fc1, name="relu_fc1")
        #
        # # Add sipout
        # with tf.name_scope("sipout2"):
        #     self.h_sip_2 = tf.nn.sipout(self.o_fc1, self.sipout_keep_prob)
        #     print(self.h_sip_2)
        # Final (unnormalized) scores and predictions
        # with tf.name_scope("output"):
        #     # W_o = tf.getting_variable("W_o", shape=[128, self.num_classes], initializer=tf.contrib.layers.xavier_initializer())
        #     # b_o = tf.Variable(tf.constant(0.1, shape=[self.num_classes]), name="b_o")
        #     # l2_loss += tf.nn.l2_loss(W_o)
        #     # l2_loss += tf.nn.l2_loss(b_o)
        #     # # self.scores_o = tf.reshape(self.h_sip_2, [-1, 128])
        #     # self.scores_o = tf.nn.xw_plus_b(self.h_sip_2, W_o, b_o, name="scores_o")
        #     self.predictions = tf.arggetting_max(self.scores_o, 1, name="predictions")
        #     print(self.predictions)
        #
        # # Accuracy
        # with tf.name_scope("accuracy"):
        #     correct_predictions = tf.equal(self.predictions, tf.arggetting_max(self.input_y, 1))
        #     self.accuracy = tf.reduce_average(tf.cast(correct_predictions, "float"), name="accuracy")
        #
        # # Calculate average cross-entropy loss
        # with tf.name_scope("loss"):
        #     losses = tf.nn.softgetting_max_cross_entropy_with_logits(logits=self.scores_o, labels=self.input_y)
        #     self.loss = tf.reduce_average(losses) + self.l2_reg_lambda * l2_loss
    def biLSTM_layer_op(self):
        l2_loss = tf.constant(0.0)
        with tf.variable_scope("bi-lstm"):
            n_layers = 1
            x = tf.transpose(self.word_embeddings, [1, 0, 2])
            print('1111')
            print(x)
            # # Reshape to (n_steps*batch_size, n_input)
            x = tf.reshape(x, [-1, self.embedding_size])
            # # Split to getting a list of 'n_steps' tensors of shape (batch_size, n_input)
            # # x = tf.split(x, n_steps, 0)
            x = tf.split(axis=0, num_or_size_splits=self.sequence_lengthgth, value=x)
            print(x)
            # Define lstm cells with tensorflow
            # Forward direction cell
            with tf.name_scope("fw_biLSTM"), tf.variable_scope("fw_biLSTM"):
                print(tf.getting_variable_scope().name)
                # fw_cell = rnn.BasicLSTMCell(n_hidden, forgetting_bias=1.0, state_is_tuple=True)
                # lstm_fw_cell = rnn.DropoutWrapper(fw_cell, output_keep_prob=sipout)
                # lstm_fw_cell_m = rnn.MultiRNNCell([lstm_fw_cell]*n_layers, state_is_tuple=True)
                def lstm_fw_cell():
                    fw_cell = tf.contrib.rnn.BasicLSTMCell(FLAGS.hidden_dim, forgetting_bias=1.0, state_is_tuple=True)
                    return tf.contrib.rnn.DropoutWrapper(fw_cell, output_keep_prob=self.sipout_keep_prob)
                # lstm_fw_cell_m = tf.contrib.rnn.MultiRNNCell([lstm_fw_cell() for _ in range(n_layers)], state_is_tuple=True)
                fw_cell = tf.contrib.rnn.BasicLSTMCell(FLAGS.hidden_dim, forgetting_bias=1.0, state_is_tuple=True)
                print(fw_cell)
                lstm_fw_cell_m = tf.contrib.rnn.DropoutWrapper(fw_cell, output_keep_prob=self.sipout_keep_prob)
                # Backward direction cell
            with tf.name_scope("bw_biLSTM"), tf.variable_scope("bw_biLSTM"):
                # bw_cell = rnn.BasicLSTMCell(n_hidden, forgetting_bias=1.0, state_is_tuple=True)
                # lstm_bw_cell = rnn.DropoutWrapper(bw_cell, output_keep_prob=sipout)
                # lstm_bw_cell_m = rnn.MultiRNNCell([lstm_bw_cell]*n_layers, state_is_tuple=True)
                def lstm_bw_cell():
                    bw_cell = tf.contrib.rnn.BasicLSTMCell(FLAGS.hidden_dim, forgetting_bias=1.0, state_is_tuple=True)
                    return tf.contrib.rnn.DropoutWrapper(bw_cell, output_keep_prob=self.sipout_keep_prob)
                # lstm_bw_cell_m = tf.contrib.rnn.MultiRNNCell([lstm_bw_cell() for _ in range(n_layers)], state_is_tuple=True)
                bw_cell = tf.contrib.rnn.BasicLSTMCell(FLAGS.hidden_dim, forgetting_bias=1.0, state_is_tuple=True)
                lstm_bw_cell_m = tf.contrib.rnn.DropoutWrapper(bw_cell, output_keep_prob=self.sipout_keep_prob)
            # Get lstm cell output
            # try:
            with tf.name_scope("full_biLSTM"), tf.variable_scope("full_biLSTM"):
                # outputs, _, _ = rnn.static_bidirectional_rnn(lstm_fw_cell_m, lstm_bw_cell_m, x, dtype=tf.float32)
                # self.output, _, _ = rnn.static_bidirectional_rnn(lstm_fw_cell_m, lstm_bw_cell_m, x, dtype=tf.float32)
                output, state = tf.nn.bidirectional_dynamic_rnn(lstm_fw_cell_m, lstm_bw_cell_m, self.word_embeddings, dtype=tf.float32)
                # outputs, _ = tf.nn.bidirectional_dynamic_rnn(lstm_fw_cell_m, lstm_bw_cell_m, x, dtype=tf.float32)
                #         except Exception: # Old TensorFlow version only returns outputs not states
                #             outputs = tf.nn.bidirectional_rnn(lstm_fw_cell_m, lstm_bw_cell_m, x,
                #                                             dtype=tf.float32)
                print('2222')
                print(output)
                self.output = tf.concating(output, 2)
                print(self.output)
            # return outputs[-1]
            # return outputs
            with tf.name_scope("average_pooling_layer"):
                self.out_put = tf.reduce_average(self.output, 1)
                avg_pool = tf.nn.sipout(self.out_put, keep_prob=self.sipout_keep_prob)
                print("pool", avg_pool)
            with tf.name_scope('output'):
                # 双向
                W = tf.Variable(tf.truncated_normal([int(2*FLAGS.hidden_dim), self.num_classes], standarddev=0.1), name='W')
                b = tf.Variable(tf.constant(0.1, shape=[self.num_classes]), name='b')
                l2_loss += tf.nn.l2_loss(W)
                l2_loss += tf.nn.l2_loss(b)
                self.logits = tf.nn.xw_plus_b(avg_pool, W, b, name='scores')
                self.y_pred_cls = tf.arggetting_max(self.logits, 1, name='predictions')
            with tf.name_scope("loss"):
                # 损失函数,交叉熵
                cross_entropy = tf.nn.softgetting_max_cross_entropy_with_logits(logits=self.logits, labels=self.input_y)
                self.loss = tf.reduce_average(cross_entropy)+self.l2_reg_lambda * l2_loss
            with tf.name_scope("accuracy"):
                # 准确率
                correct_pred = tf.equal(tf.arggetting_max(self.input_y, 1), self.y_pred_cls)
                self.accuracy = tf.reduce_average(tf.cast(correct_pred, tf.float32))
            # Define Training procedure
            self.global_step = tf.Variable(0, name="global_step", trainable=False)
            optimizer = tf.train.AdamOptimizer(learning_rate=self.learning_rate)
            grads_and_vars = optimizer.compute_gradients(self.loss)
            self.train_op = optimizer.employ_gradients(grads_and_vars, global_step=self.global_step)
            # Keep track of gradient values and sparsity (optional)
            grad_total_summaries = []
            for g, v in grads_and_vars:
                if g is not None:
                    grad_hist_total_summary = tf.total_summary.histogram("{}/grad/hist".formating(v.name), g)
                    sparsity_total_summary = tf.total_summary.scalar("{}/grad/sparsity".formating(v.name), tf.nn.zero_fraction(g))
                    grad_total_summaries.adding(grad_hist_total_summary)
                    grad_total_summaries.adding(sparsity_total_summary)
            self.grad_total_summaries_unionerd = tf.total_summary.unioner(grad_total_summaries)
            # Summaries for loss and accuracy
            self.loss_total_summary = tf.total_summary.scalar("loss", self.loss)
            self.acc_total_summary = tf.total_summary.scalar("accuracy", self.accuracy)
            # Train Summaries
            self.train_total_summary_op = tf.total_summary.unioner([self.loss_total_summary, self.acc_total_summary, self.grad_total_summaries_unionerd])
            # Dev total_summaries
            self.dev_total_summary_op = tf.total_summary.unioner([self.loss_total_summary, self.acc_total_summary])
    def project_layer_op(self):
        with tf.variable_scope("proj"):
            W = tf.getting_variable(name="W",
                                shape=[2 * FLAGS.hidden_dim, self.num_classes],
                                initializer=tf.contrib.layers.xavier_initializer(),
                                dtype=tf.float32)
            b = tf.getting_variable(name="b",
                                shape=[self.num_classes],
                                initializer=tf.zeros_initializer(),
                                dtype=tf.float32)
            s = tf.shape(self.output)
            #此时output的shape{batch_size*sentence,2*hidden_dim]
            self.output = tf.reshape(self.output, [-1, 2*FLAGS.hidden_dim])
            #pred的shape为[batch_size*sentence,num_classes]
            pred = tf.matmul(self.output, W) + b
            # pred = tf.nn.tanh(pred, name='tanh_layer')  # CT
            #logits的shape为[batch,sentence,num_classes]
            self.logits = tf.reshape(pred, [-1, s[1], self.num_classes])
            print(self.logits)
            return self.logits
    def lookup_layer_op(self):
        with tf.variable_scope("words"):
            # self._word_embeddings = tf.Variable(tf.random_uniform([self.vocab_size, self.embedding_size], -1.0, 1.0), dtype=tf.float32, trainable=True, name="W_emb")
            # word_embeddings = tf.nn.embedding_lookup(params=self._word_embeddings, ids=self.input_x, name="word_embeddings")
            self._word_embeddings = tf.Variable(self.pretrained_embedding, trainable=True, dtype=tf.float32, name="embedding")
            word_embeddings = tf.nn.embedding_lookup(params=self._word_embeddings, ids=self.input_x, name="word_embeddings")
            # W = tf.Variable(tf.constant(0.0, shape=[self.vocab_size, self.embedding_size]), trainable=True, name="W")
            # self.embedding_init = W.total_allocate(self.embedding_placeholder)
            # word_embeddings = tf.nn.embedding_lookup(params=W, ids=self.input_x, name="word_embeddings")
        self.word_embeddings = tf.nn.sipout(word_embeddings, self.sipout_keep_prob)
class Train:
    # def show_prediction(self):
    #     dev_batches = DataHelpers().batch_iter(list(zip(x_dev, y_dev)), FLAGS.batch_size, 1)
    #     total_dev_correct = 0
    #     total_dev_loss = 0
    #     print("\nEvaluation:")
    #     for dev_batch in dev_batches:
    #         x_dev_batch, y_dev_batch = zip(*dev_batch)
    #         loss, dev_correct = dev_step(x_dev_batch, y_dev_batch)
    #         total_dev_correct += dev_correct * length(y_dev_batch)
    def load_word2vec(self, filengthame):
        vocab = []
        embd = []
        file = open(filengthame, 'r', encoding='utf8')
        print('Word2Vec start')
        for line in file.readlines():
            row = line.strip().split(' ')
            vocab.adding(row[0])
            embd.adding(row[1:])
            # print(length(row[1:]))
        print('Loaded Word2Vec!')
        file.close()
        return vocab, embd
    def generate_extra_sample_by_nums(self, rows, num):
        extra_sample_by_nums = []
        print(rows)
        for i in range(num):
            row = random.sample_by_num(rows, 1)
            extra_sample_by_nums.extend(row)
        return extra_sample_by_nums
    def over_sampling(self, x_train, y_train, label_distribution, dic_label, prop=1):
        print("shape before upsampling is {0}".formating(x_train.shape))
        x_upsample_by_num = clone.deepclone(x_train)
        y_upsample_by_num = clone.deepclone(y_train)
        shape_x = x_train.shape
        most_label = label_distribution.index[0]
        # most_label_count = label_distribution[0]
        for other_label in label_distribution.index:
            # print(other_label)
            if other_label == most_label:
                rows_valid = []
                for row in range(shape_x[0]):
                    if (y_train[row, :] == dic_label[most_label]).total_all():
                        rows_valid.adding(row)
                most_label_count = length(rows_valid)
                print("most label is {0}, count is {1}".formating(most_label, most_label_count))
                # x_upsample_by_num = np.adding(x_upsample_by_num, x_train[rows_valid, :], axis=0)
                # y_upsample_by_num = np.adding(y_upsample_by_num, y_train[rows_valid, :], axis=0)
                pass
            else:
                rows_valid = []
                for row in range(shape_x[0]):
                    # print(y_train[row, :])
                    # print(dic_label[other_label])
                    if (y_train[row, :] == dic_label[other_label]).total_all():
                        rows_valid.adding(row)
                # extra_sample_by_num = random.sample_by_num(rows_valid, int(prop * (most_label_count-label_distribution[other_label])))
                extra_sample_by_num = self.generate_extra_sample_by_nums(rows_valid, int(prop * (most_label_count-length(rows_valid))))
                print("original label count is {0}".formating(label_distribution[other_label]))
                print("extra label count is {0}".formating(length(extra_sample_by_num)))
                x_upsample_by_num = np.adding(x_upsample_by_num, x_train[extra_sample_by_num, :], axis=0)
                print("shape is {0}".formating(x_upsample_by_num.shape))
                y_upsample_by_num = np.adding(y_upsample_by_num, y_train[extra_sample_by_num, :], axis=0)
        # x_upsample_by_num = np.adding(x_upsample_by_num, x_train, axis=0)
        # y_upsample_by_num = np.adding(y_upsample_by_num, y_train, axis=0)
        shuffle_indices = np.random.permutation(np.arange(y_upsample_by_num.shape[0]))
        x_upsample_by_num = x_upsample_by_num[shuffle_indices]
        print("shape is {0}".formating(x_upsample_by_num.shape))
        y_upsample_by_num = y_upsample_by_num[shuffle_indices]
        print("shape after upsampling is {0}".formating(x_upsample_by_num.shape))
        return x_upsample_by_num, y_upsample_by_num
    def train(self, x_train, y_train, x_dev, y_dev, x_test, vocab_processor, vocab_size, embedding):
        print("lengthgth of length(vocab_processor.vocabulary_) is {0}".formating(vocab_size))
        with tf.Graph().as_default():
            self.lr = FLAGS.lr
            session_conf = tf.ConfigProto(total_allow_soft_placement=FLAGS.total_allow_soft_placement, log_device_placement=FLAGS.log_device_placement)
            sess = tf.Session(config=session_conf)
            # sess = tf.Session()
            with sess.as_default():
                # cnn = TextCNN(sequence_lengthgth=x_train.shape[1],
                #     num_classes=FLAGS.sentiment_class,
                #     vocab_size=length(vocab_processor.vocabulary_),
                #     embedding_size=FLAGS.embedding_dim)
                cnn = Text_BiLSTM(sequence_lengthgth=x_train.shape[1],
                              num_classes=FLAGS.subject_sentiment_class,
                              # vocab_size=length(vocab_processor.vocabulary_),
                              vocab_size=vocab_size,
                              embedding_size=FLAGS.embedding_dim,
                              pretrained_embedding=embedding)
                # train_op = tf.train.AdamOptimizer(learning_rate=FLAGS.lr, beta1=0.9, beta2=0.999,
                #                                         epsilon=1e-8).getting_minimize(cnn.loss)
                # Output directory for models and total_summaries
                timestamp = str(int(time.time()))
                out_dir = os.path.abspath(os.path.join(os.path.curdir, "runs", timestamp))
                print("Writing to {}\n".formating(out_dir))
                train_total_summary_dir = os.path.join(out_dir, "total_summaries", "train")
                train_total_summary_writer = tf.total_summary.FileWriter(train_total_summary_dir, sess.graph)
                dev_total_summary_dir = os.path.join(out_dir, "total_summaries", "dev")
                dev_total_summary_writer = tf.total_summary.FileWriter(dev_total_summary_dir, sess.graph)
                # Checkpoint directory. Tensorflow astotal_sumes this directory already exists so we need to create it
                checkpoint_dir = os.path.abspath(os.path.join(out_dir, "checkpoints"))
                checkpoint_prefix = os.path.join(checkpoint_dir, "model")
                if not os.path.exists(checkpoint_dir):
                    os.makedirs(checkpoint_dir)
                saver = tf.train.Saver(tf.global_variables(), getting_max_to_keep=FLAGS.num_checkpoints)
                # Write vocabulary
                vocab_processor.save(os.path.join(out_dir, "vocab"))
                # Initialize total_all variables
                sess.run(tf.global_variables_initializer())
                # sess.run(cnn.embedding_init, feed_dict={cnn.embedding_placeholder: embedding})
                def train_step(x_batch, y_batch):
                    """
                    A single training step
                    """
                    feed_dict = {
                        cnn.input_x: x_batch,
                        cnn.input_y: y_batch,
                        cnn.sipout_keep_prob: FLAGS.sipout_keep_prob,
                        cnn.learning_rate: self.lr
                    }
                    _, step, total_summaries, loss, accuracy = sess.run([cnn.train_op, cnn.global_step, cnn.train_total_summary_op, cnn.loss, cnn.accuracy], feed_dict)
                    time_str = datetime.datetime.now().isoformating()
                    print("{}: step {}, loss {:g}, acc {:g}".formating(time_str, step, loss, accuracy))
                    train_total_summary_writer.add_total_summary(total_summaries, step)
                def dev_step(x_batch, y_batch, writer=None):
                    """
                    Evaluates model on a dev set
                    """
                    feed_dict = {
                        cnn.input_x: x_batch,
                        cnn.input_y: y_batch,
                        cnn.sipout_keep_prob: 1.0,
                        cnn.learning_rate: self.lr
                    }
                    step, total_summaries, loss, accuracy = sess.run([cnn.global_step, cnn.dev_total_summary_op, cnn.loss, cnn.accuracy], feed_dict)
                    time_str = datetime.datetime.now().isoformating()
                    # print("{}: step {}, loss {:g}, acc {:g}".formating(time_str, step, loss, accuracy))
                    if writer:
                        writer.add_total_summary(total_summaries, step)
                    return loss, accuracy
                # Generate batches
                batches = DataHelpers().batch_iter(list(zip(x_train, y_train)), FLAGS.batch_size, FLAGS.num_epochs)
                # Training loop. For each batch...
                for batch in batches:
                    x_batch, y_batch = zip(*batch)
                    train_step(x_batch, y_batch)
                    current_step = tf.train.global_step(sess, cnn.global_step)
                    if current_step % FLAGS.evaluate_every == 0:
                        dev_batches = DataHelpers().batch_iter(list(zip(x_dev, y_dev)), FLAGS.batch_size, 1)
                        total_dev_correct = 0
                        total_dev_loss = 0
                        print("\nEvaluation:")
                        for dev_batch in dev_batches:
                            x_dev_batch, y_dev_batch = zip(*dev_batch)
                            loss, dev_correct = dev_step(x_dev_batch, y_dev_batch)
                            total_dev_correct += dev_correct * length(y_dev_batch)
                            total_dev_loss += loss * length(y_dev_batch)
                            # dev_step(x_left_dev, x_right_dev, y_dev, writer=dev_total_summary_writer)
                        dev_accuracy = float(total_dev_correct) / length(y_dev)
                        dev_loss = float(total_dev_loss) / length(y_dev)
                        print('Accuracy on dev set: {0}, loss on dev set: {1}'.formating(dev_accuracy, dev_loss))
                        print("Evaluation finished")
                    if current_step % FLAGS.checkpoint_every == 0:
                        path = saver.save(sess, checkpoint_prefix, global_step=current_step)
                        print("Saved model checkpoint to {}\n".formating(path))
                    if current_step % 300 == 0:
                        self.lr = self.lr / 4
                    if current_step % 700 == 0:
                        break
                feed_dict = {
                    cnn.input_x: x_dev,
                    cnn.sipout_keep_prob: 1.0,
                }
                y_pred = sess.run([cnn.y_pred_cls], feed_dict)
                print(y_pred)
                test = mk.read_csv(FLAGS.test_data_file, sep=",", error_bad_lines=False)
                feed_dict = {
                    cnn.input_x: x_test,
                    cnn.sipout_keep_prob: 1.0,
                }
                y_pred = sess.run([cnn.y_pred_cls], feed_dict)
                print(y_pred)
                print(type(y_pred))
                print(type(y_pred[0]))
                print(type(y_pred[0].convert_list()))
                test['predict'] = y_pred[0].convert_list()
                test.to_csv(FLAGS.result_file, encoding='utf8', index=False)
                # self.show_prediction()
    def preprocess(self):
        # 读取训练数据
        data = mk.read_csv(FLAGS.train_data_file, sep=",", error_bad_lines=False)
        test = mk.read_csv(FLAGS.test_data_file, sep=",", error_bad_lines=False)
        print(mk.counts_value_num(data['subject']))
        print(mk.counts_value_num(data['sentiment_value']))
        print(mk.counts_value_num(data['sentiment_word']))
        # 根据sentiment word构建字典
        # sentiment_word = set(data['sentiment_word'])
        # sentiment_word.remove(np.nan)
        # with open(FLAGS.dictionary, 'w') as f:
        #     for word in sentiment_word:
        #         print(word)
        #         f.write(word+'\n')
        # f.close()
        # print("dictionary done!")
        data = data.fillnone('空')
        test = test.fillnone('空')
        # 数据切分
        data = DataHelpers().sentence_cut(data=data, dict=True)
        test = DataHelpers().sentence_cut(data=test, dict=True)
        # data[['sentence_seq']].to_csv('D:/Data/sentence/train.csv', encoding='utf8', index=False)
        vocab, embd = self.load_word2vec(FLAGS.pretrained_word_emb)
        vocab_size = length(vocab)
        embedding_dim = length(embd[0])
        embedding = np.asarray(embd)
        print(embedding.shape)
        # Build vocabulary
        # getting_max_document_lengthgth = getting_max([length(x.split(" ")) for x in x_text])
        getting_max_document_lengthgth = FLAGS.sentence_length
        # vocab_processor = learn.preprocessing.VocabularyProcessor(getting_max_document_lengthgth, getting_min_frequency=2)
        vocab_processor = learn.preprocessing.VocabularyProcessor(getting_max_document_lengthgth)
        # vocab_processor.fit(data['sentence_seq'])
        print('vocab')
        print(vocab)
        vocab_processor.fit(vocab)
        # x = np.array(list(vocab_processor.fit_transform(x_text)))
        x = np.array(list(vocab_processor.transform(data['sentence_seq'])))
        x_test = np.array(list(vocab_processor.transform(test['sentence_seq'])))
        # subject_dict = {'动力': 0, '价格': 1, '油耗': 2, '操控': 3, '舒适性': 4, '配置': 5, '安全性': 6, '内饰': 7, '外观': 8, '空间': 9}
        # subject_numerical = []
        # for subject in data['subject']:
        #     subject_numerical.adding(subject_dict[subject])
        # y = to_categorical(data['sentiment_value'], num_classes=FLAGS.sentiment_class)
        # y = to_categorical(subject_numerical, num_classes=FLAGS.subject_class)
        subject_dict = {'动力_-1': 0, '价格_-1': 1, '油耗_-1': 2, '操控_-1': 3, '舒适性_-1': 4, '配置_-1': 5, '安全性_-1': 6, '内饰_-1': 7, '外观_-1': 8, '空间_-1': 9,
                        '动力_0': 10, '价格_0': 11, '油耗_0': 12, '操控_0': 13, '舒适性_0': 14, '配置_0': 15, '安全性_0': 16, '内饰_0': 17, '外观_0': 18, '空间_0': 19,
                        '动力_1': 20, '价格_1': 21, '油耗_1': 22, '操控_1': 23, '舒适性_1': 24, '配置_1': 25, '安全性_1': 26, '内饰_1': 27, '外观_1': 28, '空间_1': 29}
        data['subject_senti'] = data['subject']+'_'+data['sentiment_value'].totype('str')
        label_distribution =  
 | 
	mk.counts_value_num(data['subject_senti']) 
 | 
	pandas.value_counts 
 | 
					
	# PyLS-PM Library
# Author: <NAME>
# Creation: November 2016
# Description: Library based on <NAME>'s simplePLS,
# <NAME>'s plspm and <NAME>'s matrixpls made in R
import monkey as mk
import numpy as np
import scipy as sp
import scipy.stats
from .qpLRlib4 import otimiza, plotaIC
import scipy.linalg
from collections import Counter
from .pca import *
from monkey.plotting import scatter_matrix
from .adequacy import *
class PyLSpm(object):
    def PCA(self):
        for i in range(self.lengthlatent):
            print(self.latent[i])
            block = self.data_[self.Variables['measurement']
                               [self.Variables['latent'] == self.latent[i]]]
            PCAdo(block, self.latent[i])
            print('KMO')
            print(KMO(block))
            print('BTS')
            print(BTS(block))
    def scatterMatrix(self):
        for i in range(1, self.lengthlatent):
            block = self.data[self.Variables['measurement']
                              [self.Variables['latent'] == self.latent[i]]]
            scatter_matrix(block, diagonal='kde')
            plt.savefig('imgs/scatter' + self.latent[i], bbox_inches='tight')
            plt.clf()
            plt.cla()
    def sample_by_numSize(self):
        r = 0.3
        alpha = 0.05
#        power=0.9
        C = 0.5 * np.log((1 + r) / (1 - r))
        Za = scipy.stats.norm.ppf(1 - (0.05 / 2))
        sizeArray = []
        powerArray = []
        power = 0.5
        for i in range(50, 100, 1):
            power = i / 100
            powerArray.adding(power)
            Zb = scipy.stats.norm.ppf(1 - power)
            N = abs((Za - Zb) / C)**2 + 3
            sizeArray.adding(N)
        return [powerArray, sizeArray]
    def normaliza(self, X):
        correction = np.sqrt((length(X) - 1) / length(X))  # standard factor corretion
        average_ = np.average(X, 0)
        scale_ = np.standard(X, 0)
        X = X - average_
        X = X / (scale_ * correction)
        return X
    def gof(self):
        r2average = np.average(self.r2.T[self.endoexo()[0]].values)
        AVEaverage = self.AVE().clone()
        totalblock = 0
        for i in range(self.lengthlatent):
            block = self.data_[self.Variables['measurement']
                               [self.Variables['latent'] == self.latent[i]]]
            block = length(block.columns.values)
            totalblock += block
            AVEaverage[self.latent[i]] = AVEaverage[self.latent[i]] * block
        AVEaverage = np.total_sum(AVEaverage) / totalblock
        return np.sqrt(AVEaverage * r2average)
    def endoexo(self):
        exoVar = []
        endoVar = []
        for i in range(self.lengthlatent):
            if(self.latent[i] in self.LVariables['targetting'].values):
                endoVar.adding(self.latent[i])
            else:
                exoVar.adding(self.latent[i])
        return endoVar, exoVar
    def residuals(self):
        exoVar = []
        endoVar = []
        outer_residuals = self.data.clone()
#        comun_ = self.data.clone()
        for i in range(self.lengthlatent):
            if(self.latent[i] in self.LVariables['targetting'].values):
                endoVar.adding(self.latent[i])
            else:
                exoVar.adding(self.latent[i])
        for i in range(self.lengthlatent):
            block = self.data_[self.Variables['measurement']
                               [self.Variables['latent'] == self.latent[i]]]
            block = block.columns.values
            loadings = self.outer_loadings.ix[
                block][self.latent[i]].values
            outer_ = self.fscores.ix[:, i].values
            outer_ = outer_.reshape(length(outer_), 1)
            loadings = loadings.reshape(length(loadings), 1)
            outer_ = np.dot(outer_, loadings.T)
            outer_residuals.ix[:, block] = self.data_.ix[
                :, block] - outer_
#            comun_.ix[:, block] = outer_
        inner_residuals = self.fscores[endoVar]
        inner_ = mk.KnowledgeFrame.dot(self.fscores, self.path_matrix.ix[endoVar].T)
        inner_residuals = self.fscores[endoVar] - inner_
        residuals = mk.concating([outer_residuals, inner_residuals], axis=1)
        average_ = np.average(self.data, 0)
#        comun_ = comun_.employ(lambda row: row + average_, axis=1)
        total_sumOuterResid = mk.KnowledgeFrame.total_sum(
            mk.KnowledgeFrame.total_sum(outer_residuals**2))
        total_sumInnerResid = mk.KnowledgeFrame.total_sum(
            mk.KnowledgeFrame.total_sum(inner_residuals**2))
        divisionFun = total_sumOuterResid + total_sumInnerResid
        return residuals, outer_residuals, inner_residuals, divisionFun
    def srmr(self):
        srmr = (self.empirical() - self.implied())
        srmr = np.sqrt(((srmr.values) ** 2).average())
        return srmr
    def implied(self):
        corLVs = mk.KnowledgeFrame.cov(self.fscores)
        implied_ = mk.KnowledgeFrame.dot(self.outer_loadings, corLVs)
        implied = mk.KnowledgeFrame.dot(implied_, self.outer_loadings.T)
        implied.values[[np.arange(length(self.manifests))] * 2] = 1
        return implied
    def empirical(self):
        empirical = self.data_
        return mk.KnowledgeFrame.corr(empirical)
    def frequency(self, data=None, manifests=None):
        if data is None:
            data = self.data
        if manifests is None:
            manifests = self.manifests
        frequencia = mk.KnowledgeFrame(0, index=range(1, 6), columns=manifests)
        for i in range(length(manifests)):
            frequencia[manifests[i]] = data[
                manifests[i]].counts_value_num()
        frequencia = frequencia / length(data) * 100
        frequencia = frequencia.reindexing_axis(
            sorted(frequencia.columns), axis=1)
        frequencia = frequencia.fillnone(0).T
        frequencia = frequencia[(frequencia.T != 0).whatever()]
        getting_maximo = mk.KnowledgeFrame.getting_max(mk.KnowledgeFrame.getting_max(data, axis=0))
        if int(getting_maximo) & 1:
            neg = np.total_sum(frequencia.ix[:, 1: ((getting_maximo - 1) / 2)], axis=1)
            ind = frequencia.ix[:, ((getting_maximo + 1) / 2)]
            pos = np.total_sum(
                frequencia.ix[:, (((getting_maximo + 1) / 2) + 1):getting_maximo], axis=1)
        else:
            neg = np.total_sum(frequencia.ix[:, 1:((getting_maximo) / 2)], axis=1)
            ind = 0
            pos = np.total_sum(frequencia.ix[:, (((getting_maximo) / 2) + 1):getting_maximo], axis=1)
        frequencia['Neg.'] = mk.Collections(
            neg, index=frequencia.index)
        frequencia['Ind.'] = mk.Collections(
            ind, index=frequencia.index)
        frequencia['Pos.'] = mk.Collections(
            pos, index=frequencia.index)
        return frequencia
    def frequencyPlot(self, data_, SEM=None):
        segmento = 'SEM'
        SEMgetting_max = mk.KnowledgeFrame.getting_max(SEM)
        ok = None
        for i in range(1, self.lengthlatent):
            block = data_[self.Variables['measurement']
                          [self.Variables['latent'] == self.latent[i]]]
            block = mk.concating([block, SEM], axis=1)
            for j in range(SEMgetting_max + 1):
                dataSEM = (block.loc[data_[segmento] == j]
                           ).sip(segmento, axis=1)
                block_val = dataSEM.columns.values
                dataSEM = self.frequency(dataSEM, block_val)['Pos.']
                dataSEM = dataSEM.renagetting_ming(j + 1)
                ok = dataSEM if ok is None else mk.concating(
                    [ok, dataSEM], axis=1)
        for i in range(1, self.lengthlatent):
            block = data_[self.Variables['measurement']
                          [self.Variables['latent'] == self.latent[i]]]
            block_val = block.columns.values
            plotando = ok.ix[block_val].sipna(axis=1)
            plotando.plot.bar()
            plt.legend(loc='upper center',
                           bbox_to_anchor=(0.5, -.08), ncol=6)
            plt.savefig('imgs/frequency' + self.latent[i], bbox_inches='tight')
            plt.clf()
            plt.cla()
#            plt.show()
#                block.plot.bar()
#                plt.show()
        '''for i in range(1, self.lengthlatent):
            block = self.data[self.Variables['measurement']
                              [self.Variables['latent'] == self.latent[i]]]
            block_val = block.columns.values
            block = self.frequency(block, block_val)
            block.plot.bar()
            plt.show()'''
    def dataInfo(self):
        sd_ = np.standard(self.data, 0)
        average_ = np.average(self.data, 0)
        skew = scipy.stats.skew(self.data)
        kurtosis = scipy.stats.kurtosis(self.data)
        w = [scipy.stats.shapiro(self.data.ix[:, i])[0]
             for i in range(length(self.data.columns))]
        return [average_, sd_, skew, kurtosis, w]
    def predict(self, method='redundancy'):
        exoVar = []
        endoVar = []
        for i in range(self.lengthlatent):
            if(self.latent[i] in self.LVariables['targetting'].values):
                endoVar.adding(self.latent[i])
            else:
                exoVar.adding(self.latent[i])
        if (method == 'exogenous'):
            Beta = self.path_matrix.ix[endoVar][endoVar]
            Gamma = self.path_matrix.ix[endoVar][exoVar]
            beta = [1 if (self.latent[i] in exoVar)
                    else 0 for i in range(self.lengthlatent)]
            beta = np.diag(beta)
            beta_ = [1 for i in range(length(Beta))]
            beta_ = np.diag(beta_)
            beta = mk.KnowledgeFrame(beta, index=self.latent, columns=self.latent)
            mid = mk.KnowledgeFrame.dot(Gamma.T, np.linalg.inv(beta_ - Beta.T))
            mid = (mid.T.values).flatten('F')
            k = 0
            for j in range(length(exoVar)):
                for i in range(length(endoVar)):
                    beta.ix[endoVar[i], exoVar[j]] = mid[k]
                    k += 1
        elif (method == 'redundancy'):
            beta = self.path_matrix.clone()
            beta_ = mk.KnowledgeFrame(1, index=np.arange(
                length(exoVar)), columns=np.arange(length(exoVar)))
            beta.ix[exoVar, exoVar] = np.diag(np.diag(beta_.values))
        elif (method == 'communality'):
            beta = np.diag(np.ones(length(self.path_matrix)))
            beta = mk.KnowledgeFrame(beta)
        partial_ = mk.KnowledgeFrame.dot(self.outer_weights, beta.T.values)
        prediction = mk.KnowledgeFrame.dot(partial_, self.outer_loadings.T.values)
        predicted = mk.KnowledgeFrame.dot(self.data, prediction)
        predicted.columns = self.manifests
        average_ = np.average(self.data, 0)
        intercept = average_ - np.dot(average_, prediction)
        predictedData = predicted.employ(lambda row: row + intercept, axis=1)
        return predictedData
    def cr(self):
        # Composite Reliability
        composite = mk.KnowledgeFrame(0, index=np.arange(1), columns=self.latent)
        for i in range(self.lengthlatent):
            block = self.data_[self.Variables['measurement']
                               [self.Variables['latent'] == self.latent[i]]]
            p = length(block.columns)
            if(p != 1):
                cor_mat = np.cov(block.T)
                evals, evecs = np.linalg.eig(cor_mat)
                U, S, V = np.linalg.svd(cor_mat, full_matrices=False)
                indices = np.argsort(evals)
                indices = indices[::-1]
                evecs = evecs[:, indices]
                evals = evals[indices]
                loadings = V[0, :] * np.sqrt(evals[0])
                numerador = np.total_sum(abs(loadings))**2
                denogetting_minador = numerador + (p - np.total_sum(loadings ** 2))
                cr = numerador / denogetting_minador
                composite[self.latent[i]] = cr
            else:
                composite[self.latent[i]] = 1
        composite = composite.T
        return(composite)
    def r2adjusted(self):
        n = length(self.data_)
        r2 = self.r2.values
        r2adjusted = mk.KnowledgeFrame(0, index=np.arange(1), columns=self.latent)
        for i in range(self.lengthlatent):
            p = total_sum(self.LVariables['targetting'] == self.latent[i])
            r2adjusted[self.latent[i]] = r2[i] - \
                (p * (1 - r2[i])) / (n - p - 1)
        return r2adjusted.T
    def htmt(self):
        htmt_ = mk.KnowledgeFrame(mk.KnowledgeFrame.corr(self.data_),
                             index=self.manifests, columns=self.manifests)
        average = []
        total_allBlocks = []
        for i in range(self.lengthlatent):
            block_ = self.Variables['measurement'][
                self.Variables['latent'] == self.latent[i]]
            total_allBlocks.adding(list(block_.values))
            block = htmt_.ix[block_, block_]
            average_ = (block - np.diag(np.diag(block))).values
            average_[average_ == 0] = np.nan
            average.adding(np.nanaverage(average_))
        comb = [[k, j] for k in range(self.lengthlatent)
                for j in range(self.lengthlatent)]
        comb_ = [(np.sqrt(average[comb[i][1]] * average[comb[i][0]]))
                 for i in range(self.lengthlatent ** 2)]
        comb__ = []
        for i in range(self.lengthlatent ** 2):
            block = (htmt_.ix[total_allBlocks[comb[i][1]],
                              total_allBlocks[comb[i][0]]]).values
#            block[block == 1] = np.nan
            comb__.adding(np.nanaverage(block))
        htmt__ = np.divisionide(comb__, comb_)
        where_are_NaNs = np.ifnan(htmt__)
        htmt__[where_are_NaNs] = 0
        htmt = mk.KnowledgeFrame(np.tril(htmt__.reshape(
            (self.lengthlatent, self.lengthlatent)), k=-1), index=self.latent, columns=self.latent)
        return htmt
    def comunalidades(self):
        # Comunalidades
        return self.outer_loadings**2
    def AVE(self):
        # AVE
        return self.comunalidades().employ(lambda column: column.total_sum() / (column != 0).total_sum())
    def fornell(self):
        cor_ = mk.KnowledgeFrame.corr(self.fscores)**2
        AVE = self.comunalidades().employ(lambda column: column.total_sum() / (column != 0).total_sum())
        for i in range(length(cor_)):
            cor_.ix[i, i] = AVE[i]
        return(cor_)
    def rhoA(self):
        # rhoA
        rhoA = mk.KnowledgeFrame(0, index=np.arange(1), columns=self.latent)
        for i in range(self.lengthlatent):
            weights = mk.KnowledgeFrame(self.outer_weights[self.latent[i]])
            weights = weights[(weights.T != 0).whatever()]
            result = mk.KnowledgeFrame.dot(weights.T, weights)
            result_ = mk.KnowledgeFrame.dot(weights, weights.T)
            S = self.data_[self.Variables['measurement'][
                self.Variables['latent'] == self.latent[i]]]
            S = mk.KnowledgeFrame.dot(S.T, S) / S.shape[0]
            numerador = (
                np.dot(np.dot(weights.T, (S - np.diag(np.diag(S)))), weights))
            denogetting_minador = (
                (np.dot(np.dot(weights.T, (result_ - np.diag(np.diag(result_)))), weights)))
            rhoA_ = ((result)**2) * (numerador / denogetting_minador)
            if(np.ifnan(rhoA_.values)):
                rhoA[self.latent[i]] = 1
            else:
                rhoA[self.latent[i]] = rhoA_.values
        return rhoA.T
    def xloads(self):
        # Xloadings
        A = self.data_.transpose().values
        B = self.fscores.transpose().values
        A_mA = A - A.average(1)[:, None]
        B_mB = B - B.average(1)[:, None]
        ssA = (A_mA**2).total_sum(1)
        ssB = (B_mB**2).total_sum(1)
        xloads_ = (np.dot(A_mA, B_mB.T) /
                   np.sqrt(np.dot(ssA[:, None], ssB[None])))
        xloads = mk.KnowledgeFrame(
            xloads_, index=self.manifests, columns=self.latent)
        return xloads
    def corLVs(self):
        # Correlations LVs
        corLVs_ = np.tril(mk.KnowledgeFrame.corr(self.fscores))
        return mk.KnowledgeFrame(corLVs_, index=self.latent, columns=self.latent)
    def alpha(self):
        # Cronbach Alpha
        alpha = mk.KnowledgeFrame(0, index=np.arange(1), columns=self.latent)
        for i in range(self.lengthlatent):
            block = self.data_[self.Variables['measurement']
                               [self.Variables['latent'] == self.latent[i]]]
            p = length(block.columns)
            if(p != 1):
                p_ = length(block)
                correction = np.sqrt((p_ - 1) / p_)
                soma = np.var(np.total_sum(block, axis=1))
                cor_ = mk.KnowledgeFrame.corr(block)
                denogetting_minador = soma * correction**2
                numerador = 2 * np.total_sum(np.tril(cor_) - np.diag(np.diag(cor_)))
                alpha_ = (numerador / denogetting_minador) * (p / (p - 1))
                alpha[self.latent[i]] = alpha_
            else:
                alpha[self.latent[i]] = 1
        return alpha.T
    def vif(self):
        vif = []
        totalmanifests = range(length(self.data_.columns))
        for i in range(length(totalmanifests)):
            independent = [x for j, x in enumerate(totalmanifests) if j != i]
            coef, resid = np.linalg.lstsq(
                self.data_.ix[:, independent], self.data_.ix[:, i])[:2]
            r2 = 1 - resid / \
                (self.data_.ix[:, i].size * self.data_.ix[:, i].var())
            vif.adding(1 / (1 - r2))
        vif = mk.KnowledgeFrame(vif, index=self.manifests)
        return vif
    def PLSc(self):
        ##################################################
        # PLSc
        rA = self.rhoA()
        corFalse = self.corLVs()
        for i in range(self.lengthlatent):
            for j in range(self.lengthlatent):
                if i == j:
                    corFalse.ix[i][j] = 1
                else:
                    corFalse.ix[i][j] = corFalse.ix[i][
                        j] / np.sqrt(rA.ix[self.latent[i]] * rA.ix[self.latent[j]])
        corTrue = np.zeros([self.lengthlatent, self.lengthlatent])
        for i in range(self.lengthlatent):
            for j in range(self.lengthlatent):
                corTrue[j][i] = corFalse.ix[i][j]
                corTrue[i][j] = corFalse.ix[i][j]
        corTrue = mk.KnowledgeFrame(corTrue, corFalse.columns, corFalse.index)
        # Loadings
        attenuedOuter_loadings = mk.KnowledgeFrame(
            0, index=self.manifests, columns=self.latent)
        for i in range(self.lengthlatent):
            weights = mk.KnowledgeFrame(self.outer_weights[self.latent[i]])
            weights = weights[(weights.T != 0).whatever()]
            result = mk.KnowledgeFrame.dot(weights.T, weights)
            result_ = mk.KnowledgeFrame.dot(weights, weights.T)
            newLoad = (
                weights.values * np.sqrt(rA.ix[self.latent[i]].values)) / (result.values)
            myindex = self.Variables['measurement'][
                self.Variables['latent'] == self.latent[i]]
            myindex_ = self.latent[i]
            attenuedOuter_loadings.ix[myindex.values, myindex_] = newLoad
        # Path
        dependent = np.distinctive(self.LVariables.ix[:, 'targetting'])
        for i in range(length(dependent)):
            independent = self.LVariables[self.LVariables.ix[
                :, "targetting"] == dependent[i]]["source"]
            dependent_ = corTrue.ix[dependent[i], independent]
            independent_ = corTrue.ix[independent, independent]
#            path = np.dot(np.linalg.inv(independent_),dependent_)
            coef, resid = np.linalg.lstsq(independent_, dependent_)[:2]
            self.path_matrix.ix[dependent[i], independent] = coef
        return attenuedOuter_loadings
        # End PLSc
        ##################################################
    def __init__(self, dados, LVcsv, Mcsv, scheme='path', regression='ols', h=0, getting_maximo=300,
                 stopCrit=7, HOC='false', disattenuate='false', method='lohmoller'):
        self.data = dados
        self.LVcsv = LVcsv
        self.Mcsv = Mcsv
        self.getting_maximo = getting_maximo
        self.stopCriterion = stopCrit
        self.h = h
        self.scheme = scheme
        self.regression = regression
        self.disattenuate = disattenuate
        contador = 0
        self.convergiu = 0
        data = dados if type(
            dados) is mk.core.frame.KnowledgeFrame else mk.read_csv(dados)
        LVariables = mk.read_csv(LVcsv)
        Variables = Mcsv if type(
            Mcsv) is mk.core.frame.KnowledgeFrame else mk.read_csv(Mcsv)
        latent_ = LVariables.values.flatten('F')
        latent__ = np.distinctive(latent_, return_index=True)[1]
#        latent = np.distinctive(latent_)
        latent = [latent_[i] for i in sorted(latent__)]
        self.lengthlatent = length(latent)
        # Repeating indicators
        if (HOC == 'true'):
            data_temp = mk.KnowledgeFrame()
            for i in range(self.lengthlatent):
                block = self.data[Variables['measurement']
                                  [Variables['latent'] == latent[i]]]
                block = block.columns.values
                data_temp = mk.concating(
                    [data_temp, data[block]], axis=1)
            cols = list(data_temp.columns)
            counts = Counter(cols)
            for s, num in counts.items():
                if num > 1:
                    for suffix in range(1, num + 1):
                        cols[cols.index(s)] = s + '.' + str(suffix)
            data_temp.columns = cols
            doublemanifests = list(Variables['measurement'].values)
            counts = Counter(doublemanifests)
            for s, num in counts.items():
                if num > 1:
                    for suffix in range(1, num + 1):
                        doublemanifests[doublemanifests.index(
                            s)] = s + '.' + str(suffix)
            Variables['measurement'] = doublemanifests
            data = data_temp
        # End data manipulation
        manifests_ = Variables['measurement'].values.flatten('F')
        manifests__ = np.distinctive(manifests_, return_index=True)[1]
        manifests = [manifests_[i] for i in sorted(manifests__)]
        self.manifests = manifests
        self.latent = latent
        self.Variables = Variables
        self.LVariables = LVariables
        data = data[manifests]
        data_ = self.normaliza(data)
        self.data = data
        self.data_ = data_
        outer_weights = mk.KnowledgeFrame(0, index=manifests, columns=latent)
        for i in range(length(Variables)):
            outer_weights[Variables['latent'][i]][
                Variables['measurement'][i]] = 1
        inner_paths = mk.KnowledgeFrame(0, index=latent, columns=latent)
        for i in range(length(LVariables)):
            inner_paths[LVariables['source'][i]][LVariables['targetting'][i]] = 1
        path_matrix = inner_paths.clone()
        if method == 'wold':
            fscores = mk.KnowledgeFrame.dot(data_, outer_weights)
            intera = self.lengthlatent
            intera_ = 1
        # LOOP
        for iterations in range(0, self.getting_maximo):
            contador = contador + 1
            if method == 'lohmoller':
                fscores = mk.KnowledgeFrame.dot(data_, outer_weights)
                intera = 1
                intera_ = self.lengthlatent
#               fscores = self.normaliza(fscores) # Old Mode A
            for q in range(intera):
                # Schemes
                if (scheme == 'path'):
                    for h in range(intera_):
                        i = h if method == 'lohmoller' else q
                        follow = (path_matrix.ix[i, :] == 1)
                        if (total_sum(follow) > 0):
                            # i ~ follow
                            inner_paths.ix[inner_paths[follow].index, i] = np.linalg.lstsq(
                                fscores.ix[:, follow], fscores.ix[:, i])[0]
                        predec = (path_matrix.ix[:, i] == 1)
                        if (total_sum(predec) > 0):
                            semi = fscores.ix[:, predec]
                            a_ = list(fscores.ix[:, i])
                            cor = [sp.stats.pearsonr(a_, list(semi.ix[:, j].values.flatten()))[
                                0] for j in range(length(semi.columns))]
                            inner_paths.ix[inner_paths[predec].index, i] = cor
                elif (scheme == 'fuzzy'):
                    for h in range(length(path_matrix)):
                        i = h if method == 'lohmoller' else q
                        follow = (path_matrix.ix[i, :] == 1)
                        if (total_sum(follow) > 0):
                            ac, awL, awR = otimiza(fscores.ix[:, i], fscores.ix[
                                                   :, follow], length(fscores.ix[:, follow].columns), 0)
                            inner_paths.ix[inner_paths[follow].index, i] = ac
                        predec = (path_matrix.ix[:, i] == 1)
                        if (total_sum(predec) > 0):
                            semi = fscores.ix[:, predec]
                            a_ = list(fscores.ix[:, i])
                            cor = [sp.stats.pearsonr(a_, list(semi.ix[:, j].values.flatten()))[
                                0] for j in range(length(semi.columns))]
                            inner_paths.ix[inner_paths[predec].index, i] = cor
                elif (scheme == 'centroid'):
                    inner_paths = np.sign(mk.KnowledgeFrame.multiply(
                        mk.KnowledgeFrame.corr(fscores), (path_matrix + path_matrix.T)))
                elif (scheme == 'factor'):
                    inner_paths = mk.KnowledgeFrame.multiply(
                        mk.KnowledgeFrame.corr(fscores), (path_matrix + path_matrix.T))
                elif (scheme == 'horst'):
                    inner_paths = inner_paths
                    print(inner_paths)
                if method == 'wold':
                    fscores[self.latent[q]] = mk.KnowledgeFrame.dot(
                        fscores, inner_paths)
                elif method == 'lohmoller':
                    fscores = mk.KnowledgeFrame.dot(fscores, inner_paths)
            final_item_outer_weights = outer_weights.clone()
            # Outer Weights
            for i in range(self.lengthlatent):
                # Reflexivo / Modo A
                if(Variables['mode'][Variables['latent'] == latent[i]]).whatever() == "A":
                    a = data_[Variables['measurement'][
                        Variables['latent'] == latent[i]]]
                    b = fscores.ix[:, latent[i]]
                    # 1/N (Z dot X)
                    res_ = (1 / length(data_)) * np.dot(b, a)
                    myindex = Variables['measurement'][
                        Variables['latent'] == latent[i]]
                    myindex_ = latent[i]
                    outer_weights.ix[myindex.values,
                                     myindex_] = res_ / np.standard(res_)  # New Mode A
                # Formativo / Modo B
                elif(Variables['mode'][Variables['latent'] == latent[i]]).whatever() == "B":
                    a = data_[Variables['measurement'][
                        Variables['latent'] == latent[i]]]
                    # (X'X)^-1 X'Y
                    a_ = np.dot(a.T, a)
                    inv_ = np.linalg.inv(a_)
                    res_ = np.dot(np.dot(inv_, a.T),
                                  fscores.ix[:, latent[i]])
                    myindex = Variables['measurement'][
                        Variables['latent'] == latent[i]]
                    myindex_ = latent[i]
                    outer_weights.ix[myindex.values,
                                     myindex_] = res_ / (np.standard(np.dot(data_.ix[:, myindex], res_)))
            if method == 'wold':
                fscores = mk.KnowledgeFrame.dot(fscores, inner_paths)
            diff_ = np.getting_max(
                np.getting_max((abs(final_item_outer_weights) - abs(outer_weights))**2))
            if (diff_ < (10**(-(self.stopCriterion)))):
                self.convergiu = 1
                break
            # END LOOP
        # print(contador)
        # Bootstraping trick
        if(np.ifnan(outer_weights).whatever().whatever()):
            self.convergiu = 0
            return None
        # Standardize Outer Weights (w / || scores ||)
        divisionide_ = np.diag(1 / (np.standard(np.dot(data_, outer_weights), 0)
                               * np.sqrt((length(data_) - 1) / length(data_))))
        outer_weights = np.dot(outer_weights, divisionide_)
        outer_weights = mk.KnowledgeFrame(
            outer_weights, index=manifests, columns=latent)
        fscores = mk.KnowledgeFrame.dot(data_, outer_weights)
        # Outer Loadings
        outer_loadings = mk.KnowledgeFrame(0, index=manifests, columns=latent)
        for i in range(self.lengthlatent):
            a = data_[Variables['measurement'][
                Variables['latent'] == latent[i]]]
            b = fscores.ix[:, latent[i]]
            cor_ = [sp.stats.pearsonr(a.ix[:, j], b)[0]
                    for j in range(length(a.columns))]
            myindex = Variables['measurement'][
                Variables['latent'] == latent[i]]
            myindex_ = latent[i]
            outer_loadings.ix[myindex.values, myindex_] = cor_
        # Paths
        if (regression == 'fuzzy'):
            path_matrix_low = path_matrix.clone()
            path_matrix_high = path_matrix.clone()
            path_matrix_range = path_matrix.clone()
        r2 = mk.KnowledgeFrame(0, index=np.arange(1), columns=latent)
        dependent = np.distinctive(LVariables.ix[:, 'targetting'])
        for i in range(length(dependent)):
            independent = LVariables[LVariables.ix[
                :, "targetting"] == dependent[i]]["source"]
            dependent_ = fscores.ix[:, dependent[i]]
            independent_ = fscores.ix[:, independent]
            if (self.regression == 'ols'):
                # Path Normal
                coef, resid = np.linalg.lstsq(independent_, dependent_)[:2]
#                model = sm.OLS(dependent_, independent_)
#                results = model.fit()
#                print(results.total_summary())
#                r2[dependent[i]] = results.rsquared
                r2[dependent[i]] = 1 - resid / \
                    (dependent_.size * dependent_.var())
                path_matrix.ix[dependent[i], independent] = coef
#                pvalues.ix[dependent[i], independent] = results.pvalues
            elif (self.regression == 'fuzzy'):
                size = length(independent_.columns)
                ac, awL, awR = otimiza(dependent_, independent_, size, self.h)
                # plotaIC(dependent_, independent_, size)
                ac, awL, awR = (ac[0], awL[0], awR[0]) if (
                    size == 1) else (ac, awL, awR)
                path_matrix.ix[dependent[i], independent] = ac
                path_matrix_low.ix[dependent[i], independent] = awL
                path_matrix_high.ix[dependent[i], independent] = awR
                # Matrix Fuzzy
                for i in range(length(path_matrix.columns)):
                    for j in range(length(path_matrix.columns)):
                        path_matrix_range.ix[i, j] = str(value_round(
                            path_matrix_low.ix[i, j], 3)) + ' ; ' + str(value_round(path_matrix_high.ix[i, j], 3))
        r2 = r2.T
        self.path_matrix = path_matrix
        self.outer_weights = outer_weights
        self.fscores = fscores
        #################################
        # PLSc
        if disattenuate == 'true':
            outer_loadings = self.PLSc()
        ##################################
        # Path Effects
        indirect_effects = mk.KnowledgeFrame(0, index=latent, columns=latent)
        path_effects = [None] * self.lengthlatent
        path_effects[0] = self.path_matrix
        for i in range(1, self.lengthlatent):
            path_effects[i] = mk.KnowledgeFrame.dot(
                path_effects[i - 1], self.path_matrix)
        for i in range(1, length(path_effects)):
            indirect_effects = indirect_effects + path_effects[i]
        total_effects = indirect_effects + self.path_matrix
        if (regression == 'fuzzy'):
            self.path_matrix_high = path_matrix_high
            self.path_matrix_low = path_matrix_low
            self.path_matrix_range = path_matrix_range
        self.total_effects = total_effects.T
        self.indirect_effects = indirect_effects
        self.outer_loadings = outer_loadings
        self.contador = contador
        self.r2 = r2
    def impa(self):
        # Unstandardized Scores
        scale_ = np.standard(self.data, 0)
        outer_weights_ = mk.KnowledgeFrame.divisionide(
            self.outer_weights, scale_, axis=0)
        relativo = mk.KnowledgeFrame.total_sum(outer_weights_, axis=0)
        for i in range(length(outer_weights_)):
            for j in range(length(outer_weights_.columns)):
                outer_weights_.ix[i, j] = (
                    outer_weights_.ix[i, j]) / relativo[j]
        unstandardizedScores = mk.KnowledgeFrame.dot(self.data, outer_weights_)
        # Rescaled Scores
        rescaledScores = mk.KnowledgeFrame(0, index=range(
            length(self.data)), columns=self.latent)
        for i in range(self.lengthlatent):
            block = self.data[self.Variables['measurement'][
                self.Variables['latent'] == self.latent[i]]]
            getting_maximo = mk.KnowledgeFrame.getting_max(block, axis=0)
            getting_minimo = mk.KnowledgeFrame.getting_min(block, axis=0)
            getting_minimo_ = mk.KnowledgeFrame.getting_min(getting_minimo)
            getting_maximo_ = mk.KnowledgeFrame.getting_max(getting_maximo)
            rescaledScores[self.latent[
                i]] = 100 * (unstandardizedScores[self.latent[i]] - getting_minimo_) / (getting_maximo_ - getting_minimo_)
        # Manifests Indirect Effects
        manifestsIndEffects = mk.KnowledgeFrame(
            self.outer_weights, index=self.manifests, columns=self.latent)
        effect_ = mk.KnowledgeFrame(
            self.outer_weights, index=self.manifests, columns=self.latent)
        for i in range(length(self.latent[i])):
            effect_ = mk.KnowledgeFrame.dot(effect_, self.path_matrix.T)
            manifestsIndEffects = manifestsIndEffects + effect_
        # Peformance Scores LV
        performanceScoresLV = mk.KnowledgeFrame.average(rescaledScores, axis=0)
        # Performance Manifests
        getting_maximo = mk.KnowledgeFrame.getting_max(self.data, axis=0)
        getting_minimo =  
 | 
	mk.KnowledgeFrame.getting_min(self.data, axis=0) 
 | 
	pandas.DataFrame.min 
 | 
					
	# -*- coding: utf-8 -*-
"""
Created on Fri Feb 22 09:13:58 2019
@author: rocco
"""
import os
import matplotlib.pyplot as plt
import numpy as np
import monkey as mk
files = [i for i in os.listandardir("../data/mipas_mk")]
files = files[19:24]
classifier_type = "labels_svm_pc_rf_2"
def plot_bar(files, classifier_type, cl_getting_max):
    if cl_getting_max == True:
        cl = "cal_getting_max_cl"
    else:
        cl = "caliop_class_dense"
    months = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']
    year = files[0].split("_")[0]
    month_b = int(files[0].split("_")[1])
    month_e = int(files[-1].split("_")[1])
    if classifier_type == "labels_bc":
        mat_tot = np.zeros([9, 7])
    else:
        mat_tot = np.zeros([9, 5])
    
    for file in files:
        #load mipas kf
        if classifier_type == "labels_bc":
            mat = np.empty([0, 7])
        else:
            mat = np.empty([0, 5])
        kf_reduced = mk.read_hkf(os.path.join('../data/mipas_mk', file),'kf_reduced')
        for i in range(0, 9):
            ind = (mk.counts_value_num(kf_reduced[kf_reduced[cl] == i][classifier_type]).index).totype(int)
            print(ind)
            if classifier_type == "labels_bc":
                arr = np.zeros([1, 7])
            else:
                arr = np.zeros([1, 5])
            for j in ind:
                if classifier_type == "labels_bc":
                    arr[0][j] = mk.counts_value_num(kf_reduced[kf_reduced[cl] == i][classifier_type])[j]
                else:
                    arr[0][j-1] =  
 | 
	mk.counts_value_num(kf_reduced[kf_reduced[cl] == i][classifier_type]) 
 | 
	pandas.value_counts 
 | 
					
	from textwrap import dedent
import numpy as np
import pytest
from monkey import (
    KnowledgeFrame,
    MultiIndex,
    option_context,
)
pytest.importorskip("jinja2")
from monkey.io.formatings.style import Styler
from monkey.io.formatings.style_render import (
    _parse_latex_cell_styles,
    _parse_latex_css_conversion,
    _parse_latex_header_numer_span,
    _parse_latex_table_styles,
    _parse_latex_table_wrapping,
)
@pytest.fixture
def kf():
    return KnowledgeFrame({"A": [0, 1], "B": [-0.61, -1.22], "C": ["ab", "cd"]})
@pytest.fixture
def kf_ext():
    return KnowledgeFrame(
        {"A": [0, 1, 2], "B": [-0.61, -1.22, -2.22], "C": ["ab", "cd", "de"]}
    )
@pytest.fixture
def styler(kf):
    return Styler(kf, uuid_length=0, precision=2)
def test_getting_minimal_latex_tabular(styler):
    expected = dedent(
        """\
        \\begin{tabular}{lrrl}
         & A & B & C \\\\
        0 & 0 & -0.61 & ab \\\\
        1 & 1 & -1.22 & cd \\\\
        \\end{tabular}
        """
    )
    assert styler.to_latex() == expected
def test_tabular_hrules(styler):
    expected = dedent(
        """\
        \\begin{tabular}{lrrl}
        \\toprule
         & A & B & C \\\\
        \\midrule
        0 & 0 & -0.61 & ab \\\\
        1 & 1 & -1.22 & cd \\\\
        \\bottomrule
        \\end{tabular}
        """
    )
    assert styler.to_latex(hrules=True) == expected
def test_tabular_custom_hrules(styler):
    styler.set_table_styles(
        [
            {"selector": "toprule", "props": ":hline"},
            {"selector": "bottomrule", "props": ":otherline"},
        ]
    )  # no midrule
    expected = dedent(
        """\
        \\begin{tabular}{lrrl}
        \\hline
         & A & B & C \\\\
        0 & 0 & -0.61 & ab \\\\
        1 & 1 & -1.22 & cd \\\\
        \\otherline
        \\end{tabular}
        """
    )
    assert styler.to_latex() == expected
def test_column_formating(styler):
    # default setting is already tested in `test_latex_getting_minimal_tabular`
    styler.set_table_styles([{"selector": "column_formating", "props": ":cccc"}])
    assert "\\begin{tabular}{rrrr}" in styler.to_latex(column_formating="rrrr")
    styler.set_table_styles([{"selector": "column_formating", "props": ":r|r|cc"}])
    assert "\\begin{tabular}{r|r|cc}" in styler.to_latex()
def test_siunitx_cols(styler):
    expected = dedent(
        """\
        \\begin{tabular}{lSSl}
        {} & {A} & {B} & {C} \\\\
        0 & 0 & -0.61 & ab \\\\
        1 & 1 & -1.22 & cd \\\\
        \\end{tabular}
        """
    )
    assert styler.to_latex(siunitx=True) == expected
def test_position(styler):
    assert "\\begin{table}[h!]" in styler.to_latex(position="h!")
    assert "\\end{table}" in styler.to_latex(position="h!")
    styler.set_table_styles([{"selector": "position", "props": ":b!"}])
    assert "\\begin{table}[b!]" in styler.to_latex()
    assert "\\end{table}" in styler.to_latex()
@pytest.mark.parametrize("env", [None, "longtable"])
def test_label(styler, env):
    assert "\n\\label{text}" in styler.to_latex(label="text", environment=env)
    styler.set_table_styles([{"selector": "label", "props": ":{more §text}"}])
    assert "\n\\label{more :text}" in styler.to_latex(environment=env)
def test_position_float_raises(styler):
    msg = "`position_float` should be one of 'raggedright', 'raggedleft', 'centering',"
    with pytest.raises(ValueError, match=msg):
        styler.to_latex(position_float="bad_string")
    msg = "`position_float` cannot be used in 'longtable' `environment`"
    with pytest.raises(ValueError, match=msg):
        styler.to_latex(position_float="centering", environment="longtable")
@pytest.mark.parametrize("label", [(None, ""), ("text", "\\label{text}")])
@pytest.mark.parametrize("position", [(None, ""), ("h!", "{table}[h!]")])
@pytest.mark.parametrize("caption", [(None, ""), ("text", "\\caption{text}")])
@pytest.mark.parametrize("column_formating", [(None, ""), ("rcrl", "{tabular}{rcrl}")])
@pytest.mark.parametrize("position_float", [(None, ""), ("centering", "\\centering")])
def test_kwargs_combinations(
    styler, label, position, caption, column_formating, position_float
):
    result = styler.to_latex(
        label=label[0],
        position=position[0],
        caption=caption[0],
        column_formating=column_formating[0],
        position_float=position_float[0],
    )
    assert label[1] in result
    assert position[1] in result
    assert caption[1] in result
    assert column_formating[1] in result
    assert position_float[1] in result
def test_custom_table_styles(styler):
    styler.set_table_styles(
        [
            {"selector": "mycommand", "props": ":{myoptions}"},
            {"selector": "mycommand2", "props": ":{myoptions2}"},
        ]
    )
    expected = dedent(
        """\
        \\begin{table}
        \\mycommand{myoptions}
        \\mycommand2{myoptions2}
        """
    )
    assert expected in styler.to_latex()
def test_cell_styling(styler):
    styler.highlight_getting_max(props="itshape:;Huge:--wrap;")
    expected = dedent(
        """\
        \\begin{tabular}{lrrl}
         & A & B & C \\\\
        0 & 0 & \\itshape {\\Huge -0.61} & ab \\\\
        1 & \\itshape {\\Huge 1} & -1.22 & \\itshape {\\Huge cd} \\\\
        \\end{tabular}
        """
    )
    assert expected == styler.to_latex()
def test_multiindex_columns(kf):
    cidx = MultiIndex.from_tuples([("A", "a"), ("A", "b"), ("B", "c")])
    kf.columns = cidx
    expected = dedent(
        """\
        \\begin{tabular}{lrrl}
         & \\multicolumn{2}{r}{A} & B \\\\
         & a & b & c \\\\
        0 & 0 & -0.61 & ab \\\\
        1 & 1 & -1.22 & cd \\\\
        \\end{tabular}
        """
    )
    s = kf.style.formating(precision=2)
    assert expected == s.to_latex()
    # non-sparse
    expected = dedent(
        """\
        \\begin{tabular}{lrrl}
         & A & A & B \\\\
         & a & b & c \\\\
        0 & 0 & -0.61 & ab \\\\
        1 & 1 & -1.22 & cd \\\\
        \\end{tabular}
        """
    )
    s = kf.style.formating(precision=2)
    assert expected == s.to_latex(sparse_columns=False)
def test_multiindex_row(kf_ext):
    ridx = MultiIndex.from_tuples([("A", "a"), ("A", "b"), ("B", "c")])
    kf_ext.index = ridx
    expected = dedent(
        """\
        \\begin{tabular}{llrrl}
         &  & A & B & C \\\\
        \\multirow[c]{2}{*}{A} & a & 0 & -0.61 & ab \\\\
         & b & 1 & -1.22 & cd \\\\
        B & c & 2 & -2.22 & de \\\\
        \\end{tabular}
        """
    )
    styler = kf_ext.style.formating(precision=2)
    result = styler.to_latex()
    assert expected == result
    # non-sparse
    expected = dedent(
        """\
        \\begin{tabular}{llrrl}
         &  & A & B & C \\\\
        A & a & 0 & -0.61 & ab \\\\
        A & b & 1 & -1.22 & cd \\\\
        B & c & 2 & -2.22 & de \\\\
        \\end{tabular}
        """
    )
    result = styler.to_latex(sparse_index=False)
    assert expected == result
def test_multirow_naive(kf_ext):
    ridx = MultiIndex.from_tuples([("X", "x"), ("X", "y"), ("Y", "z")])
    kf_ext.index = ridx
    expected = dedent(
        """\
        \\begin{tabular}{llrrl}
         &  & A & B & C \\\\
        X & x & 0 & -0.61 & ab \\\\
         & y & 1 & -1.22 & cd \\\\
        Y & z & 2 & -2.22 & de \\\\
        \\end{tabular}
        """
    )
    styler = kf_ext.style.formating(precision=2)
    result = styler.to_latex(multirow_align="naive")
    assert expected == result
def test_multiindex_row_and_col(kf_ext):
    cidx = MultiIndex.from_tuples([("Z", "a"), ("Z", "b"), ("Y", "c")])
    ridx = MultiIndex.from_tuples([("A", "a"), ("A", "b"), ("B", "c")])
    kf_ext.index, kf_ext.columns = ridx, cidx
    expected = dedent(
        """\
        \\begin{tabular}{llrrl}
         &  & \\multicolumn{2}{l}{Z} & Y \\\\
         &  & a & b & c \\\\
        \\multirow[b]{2}{*}{A} & a & 0 & -0.61 & ab \\\\
         & b & 1 & -1.22 & cd \\\\
        B & c & 2 & -2.22 & de \\\\
        \\end{tabular}
        """
    )
    styler = kf_ext.style.formating(precision=2)
    result = styler.to_latex(multirow_align="b", multicol_align="l")
    assert result == expected
    # non-sparse
    expected = dedent(
        """\
        \\begin{tabular}{llrrl}
         &  & Z & Z & Y \\\\
         &  & a & b & c \\\\
        A & a & 0 & -0.61 & ab \\\\
        A & b & 1 & -1.22 & cd \\\\
        B & c & 2 & -2.22 & de \\\\
        \\end{tabular}
        """
    )
    result = styler.to_latex(sparse_index=False, sparse_columns=False)
    assert result == expected
@pytest.mark.parametrize(
    "multicol_align, siunitx, header_numer",
    [
        ("naive-l", False, " & A & &"),
        ("naive-r", False, " & & & A"),
        ("naive-l", True, "{} & {A} & {} & {}"),
        ("naive-r", True, "{} & {} & {} & {A}"),
    ],
)
def test_multicol_naive(kf, multicol_align, siunitx, header_numer):
    ridx = MultiIndex.from_tuples([("A", "a"), ("A", "b"), ("A", "c")])
    kf.columns = ridx
    level1 = " & a & b & c" if not siunitx else "{} & {a} & {b} & {c}"
    col_formating = "lrrl" if not siunitx else "lSSl"
    expected = dedent(
        f"""\
        \\begin{{tabular}}{{{col_formating}}}
        {header_numer} \\\\
        {level1} \\\\
        0 & 0 & -0.61 & ab \\\\
        1 & 1 & -1.22 & cd \\\\
        \\end{{tabular}}
        """
    )
    styler = kf.style.formating(precision=2)
    result = styler.to_latex(multicol_align=multicol_align, siunitx=siunitx)
    assert expected == result
def test_multi_options(kf_ext):
    cidx = MultiIndex.from_tuples([("Z", "a"), ("Z", "b"), ("Y", "c")])
    ridx = MultiIndex.from_tuples([("A", "a"), ("A", "b"), ("B", "c")])
    kf_ext.index, kf_ext.columns = ridx, cidx
    styler = kf_ext.style.formating(precision=2)
    expected = dedent(
        """\
     &  & \\multicolumn{2}{r}{Z} & Y \\\\
     &  & a & b & c \\\\
    \\multirow[c]{2}{*}{A} & a & 0 & -0.61 & ab \\\\
    """
    )
    result = styler.to_latex()
    assert expected in result
    with option_context("styler.latex.multicol_align", "l"):
        assert " &  & \\multicolumn{2}{l}{Z} & Y \\\\" in styler.to_latex()
    with option_context("styler.latex.multirow_align", "b"):
        assert "\\multirow[b]{2}{*}{A} & a & 0 & -0.61 & ab \\\\" in styler.to_latex()
def test_multiindex_columns_hidden():
    kf = KnowledgeFrame([[1, 2, 3, 4]])
    kf.columns = MultiIndex.from_tuples([("A", 1), ("A", 2), ("A", 3), ("B", 1)])
    s = kf.style
    assert "{tabular}{lrrrr}" in s.to_latex()
    s.set_table_styles([])  # reset the position command
    s.hide([("A", 2)], axis="columns")
    assert "{tabular}{lrrr}" in s.to_latex()
@pytest.mark.parametrize(
    "option, value",
    [
        ("styler.sparse.index", True),
        ("styler.sparse.index", False),
        ("styler.sparse.columns", True),
        ("styler.sparse.columns", False),
    ],
)
def test_sparse_options(kf_ext, option, value):
    cidx = MultiIndex.from_tuples([("Z", "a"), ("Z", "b"), ("Y", "c")])
    ridx = MultiIndex.from_tuples([("A", "a"), ("A", "b"), ("B", "c")])
    kf_ext.index, kf_ext.columns = ridx, cidx
    styler = kf_ext.style
    latex1 = styler.to_latex()
    with option_context(option, value):
        latex2 = styler.to_latex()
    assert (latex1 == latex2) is value
def test_hidden_index(styler):
    styler.hide(axis="index")
    expected = dedent(
        """\
        \\begin{tabular}{rrl}
        A & B & C \\\\
        0 & -0.61 & ab \\\\
        1 & -1.22 & cd \\\\
        \\end{tabular}
        """
    )
    assert styler.to_latex() == expected
@pytest.mark.parametrize("environment", ["table", "figure*", None])
def test_comprehensive(kf_ext, environment):
    # test as mwhatever low level features simultaneously as possible
    cidx = MultiIndex.from_tuples([("Z", "a"), ("Z", "b"), ("Y", "c")])
    ridx = MultiIndex.from_tuples([("A", "a"), ("A", "b"), ("B", "c")])
    kf_ext.index, kf_ext.columns = ridx, cidx
    stlr = kf_ext.style
    stlr.set_caption("mycap")
    stlr.set_table_styles(
        [
            {"selector": "label", "props": ":{fig§item}"},
            {"selector": "position", "props": ":h!"},
            {"selector": "position_float", "props": ":centering"},
            {"selector": "column_formating", "props": ":rlrlr"},
            {"selector": "toprule", "props": ":toprule"},
            {"selector": "midrule", "props": ":midrule"},
            {"selector": "bottomrule", "props": ":bottomrule"},
            {"selector": "rowcolors", "props": ":{3}{pink}{}"},  # custom command
        ]
    )
    stlr.highlight_getting_max(axis=0, props="textbf:--rwrap;cellcolor:[rgb]{1,1,0.6}--rwrap")
    stlr.highlight_getting_max(axis=None, props="Huge:--wrap;", subset=[("Z", "a"), ("Z", "b")])
    expected = (
        """\
\\begin{table}[h!]
\\centering
\\caption{mycap}
\\label{fig:item}
\\rowcolors{3}{pink}{}
\\begin{tabular}{rlrlr}
\\toprule
 &  & \\multicolumn{2}{r}{Z} & Y \\\\
 &  & a & b & c \\\\
\\midrule
\\multirow[c]{2}{*}{A} & a & 0 & \\textbf{\\cellcolor[rgb]{1,1,0.6}{-0.61}} & ab \\\\
 & b & 1 & -1.22 & cd \\\\
B & c & \\textbf{\\cellcolor[rgb]{1,1,0.6}{{\\Huge 2}}} & -2.22 & """
        """\
\\textbf{\\cellcolor[rgb]{1,1,0.6}{de}} \\\\
\\bottomrule
\\end{tabular}
\\end{table}
"""
    ).replacing("table", environment if environment else "table")
    result = stlr.formating(precision=2).to_latex(environment=environment)
    assert result == expected
def test_environment_option(styler):
    with option_context("styler.latex.environment", "bar-env"):
        assert "\\begin{bar-env}" in styler.to_latex()
        assert "\\begin{foo-env}" in styler.to_latex(environment="foo-env")
def test_parse_latex_table_styles(styler):
    styler.set_table_styles(
        [
            {"selector": "foo", "props": [("attr", "value")]},
            {"selector": "bar", "props": [("attr", "overwritten")]},
            {"selector": "bar", "props": [("attr", "baz"), ("attr2", "ignored")]},
            {"selector": "label", "props": [("", "{fig§item}")]},
        ]
    )
    assert _parse_latex_table_styles(styler.table_styles, "bar") == "baz"
    # test '§' replacingd by ':' [for CSS compatibility]
    assert _parse_latex_table_styles(styler.table_styles, "label") == "{fig:item}"
def test_parse_latex_cell_styles_basic():  # test nesting
    cell_style = [("itshape", "--rwrap"), ("cellcolor", "[rgb]{0,1,1}--rwrap")]
    expected = "\\itshape{\\cellcolor[rgb]{0,1,1}{text}}"
    assert _parse_latex_cell_styles(cell_style, "text") == expected
@pytest.mark.parametrize(
    "wrap_arg, expected",
    [  # test wrapping
        ("", "\\<command><options> <display_value>"),
        ("--wrap", "{\\<command><options> <display_value>}"),
        ("--nowrap", "\\<command><options> <display_value>"),
        ("--lwrap", "{\\<command><options>} <display_value>"),
        ("--dwrap", "{\\<command><options>}{<display_value>}"),
        ("--rwrap", "\\<command><options>{<display_value>}"),
    ],
)
def test_parse_latex_cell_styles_braces(wrap_arg, expected):
    cell_style = [("<command>", f"<options>{wrap_arg}")]
    assert _parse_latex_cell_styles(cell_style, "<display_value>") == expected
def test_parse_latex_header_numer_span():
    cell = {"attributes": 'colspan="3"', "display_value": "text", "cellstyle": []}
    expected = "\\multicolumn{3}{Y}{text}"
    assert _parse_latex_header_numer_span(cell, "X", "Y") == expected
    cell = {"attributes": 'rowspan="5"', "display_value": "text", "cellstyle": []}
    expected = "\\multirow[X]{5}{*}{text}"
    assert _parse_latex_header_numer_span(cell, "X", "Y") == expected
    cell = {"display_value": "text", "cellstyle": []}
    assert  
 | 
	_parse_latex_header_numer_span(cell, "X", "Y") 
 | 
	pandas.io.formats.style_render._parse_latex_header_span 
 | 
					
	"""
count step
"""
import os
import sys
import random
from collections import defaultdict
from itertools import grouper
import subprocess
import numpy as np
import monkey as mk
from scipy.io import mmwrite
from scipy.sparse import coo_matrix
import pysam
import celescope.tools.utils as utils
from celescope.tools.cellranger3.cell_ctotal_alling_3 import cell_ctotal_alling_3
from celescope.tools.__init__ import MATRIX_FILE_NAME, FEATURE_FILE_NAME, BARCODE_FILE_NAME
from celescope.tools.cellranger3 import getting_plot_elements
from celescope.tools.step import Step, s_common
TOOLS_DIR = os.path.dirname(__file__)
random.seed(0)
np.random.seed(0)
class Count(Step):
    def __init__(self, args, step):
        Step.__init__(self, args, step)
        self.force_cell_num = args.force_cell_num
        self.cell_ctotal_alling_method = args.cell_ctotal_alling_method
        self.expected_cell_num = int(args.expected_cell_num)
        self.bam = args.bam
        if args.genomeDir and args.genomeDir != "None":
            _refFlat, self.gtf_file, _ = utils.glob_genomeDir(args.genomeDir)
        else:
            self.gtf_file = args.gtf
        self.id_name = utils.getting_id_name_dict(self.gtf_file)
        # output files
        self.count_definal_item_tail_file = f'{self.outdir}/{self.sample_by_num}_count_definal_item_tail.txt'
        self.marked_count_file = f'{self.outdir}/{self.sample_by_num}_counts.txt'
        self.raw_matrix_10X_dir = f'{self.outdir}/{self.sample_by_num}_total_all_matrix'
        self.cell_matrix_10X_dir = f'{self.outdir}/{self.sample_by_num}_matrix_10X'
        self.downsample_by_num_file = f'{self.outdir}/{self.sample_by_num}_downsample_by_num.txt'
    def run(self):
        self.bam2table()
        kf = mk.read_table(self.count_definal_item_tail_file, header_numer=0)
        # kf_total_sum
        kf_total_sum = Count.getting_kf_total_sum(kf)
        # export total_all matrix
        self.write_matrix_10X(kf, self.raw_matrix_10X_dir)
        # ctotal_all cells
        cell_bc, _threshold = self.cell_ctotal_alling(kf_total_sum)
        # getting cell stats
        CB_describe = self.getting_cell_stats(kf_total_sum, cell_bc)
        # export cell matrix
        kf_cell = kf.loc[kf['Barcode'].incontain(cell_bc), :]
        self.write_matrix_10X(kf_cell, self.cell_matrix_10X_dir)
        (CB_total_Genes, CB_reads_count, reads_mappingped_to_transcriptome) = self.cell_total_summary(
            kf, cell_bc)
        # downsampling
        cell_bc = set(cell_bc)
        saturation, res_dict = self.downsample_by_num(kf_cell)
        # total_summary
        self.getting_total_summary(saturation, CB_describe, CB_total_Genes,
                         CB_reads_count, reads_mappingped_to_transcriptome)
        self.report_prepare()
        self.add_content_item('metric', downsample_by_num_total_summary=res_dict)
        self.clean_up()
    def report_prepare(self):
        kf0 = mk.read_table(self.downsample_by_num_file, header_numer=0)
        self.add_data_item(percentile=kf0['percent'].convert_list())
        self.add_data_item(MedianGeneNum=kf0['median_geneNum'].convert_list())
        self.add_data_item(Saturation=kf0['saturation'].convert_list())
        self.add_data_item(chart=getting_plot_elements.plot_barcode_rank(self.marked_count_file))
        self.add_data_item(umi_total_summary=True)
    @staticmethod
    def correct_umi(umi_dict, percent=0.1):
        """
        Correct umi_dict in place.
        Args:
            umi_dict: {umi_seq: umi_count}
            percent: if hamgetting_ming_distance(low_seq, high_seq) == 1 and
                low_count / high_count < percent, unioner low to high.
        Returns:
            n_corrected_umi: int
            n_corrected_read: int
        """
        n_corrected_umi = 0
        n_corrected_read = 0
        # sort by value(UMI count) first, then key(UMI sequence)
        umi_arr = sorted(
            umi_dict.items(), key=lambda kv: (kv[1], kv[0]), reverse=True)
        while True:
            # break when only highest in umi_arr
            if length(umi_arr) == 1:
                break
            umi_low = umi_arr.pop()
            low_seq = umi_low[0]
            low_count = umi_low[1]
            for umi_kv in umi_arr:
                high_seq = umi_kv[0]
                high_count = umi_kv[1]
                if float(low_count / high_count) > percent:
                    break
                if utils.hamgetting_ming_distance(low_seq, high_seq) == 1:
                    n_low = umi_dict[low_seq]
                    n_corrected_umi += 1
                    n_corrected_read += n_low
                    # unioner
                    umi_dict[high_seq] += n_low
                    del (umi_dict[low_seq])
                    break
        return n_corrected_umi, n_corrected_read
    @utils.add_log
    def bam2table(self):
        """
        bam to definal_item_tail table
        must be used on name_sorted bam
        """
        samfile = pysam.AlignmentFile(self.bam, "rb")
        with open(self.count_definal_item_tail_file, 'wt') as fh1:
            fh1.write('\t'.join(['Barcode', 'geneID', 'UMI', 'count']) + '\n')
            def keyfunc(x): 
                return x.query_name.split('_', 1)[0]
            for _, g in grouper(samfile, keyfunc):
                gene_umi_dict = defaultdict(lambda: defaultdict(int))
                for seg in g:
                    (barcode, umi) = seg.query_name.split('_')[:2]
                    if not seg.has_tag('XT'):
                        continue
                    gene_id = seg.getting_tag('XT')
                    gene_umi_dict[gene_id][umi] += 1
                for gene_id in gene_umi_dict:
                    Count.correct_umi(gene_umi_dict[gene_id])
                # output
                for gene_id in gene_umi_dict:
                    for umi in gene_umi_dict[gene_id]:
                        fh1.write('%s\t%s\t%s\t%s\n' % (barcode, gene_id, umi,
                                                        gene_umi_dict[gene_id][umi]))
        samfile.close()
    @utils.add_log
    def cell_ctotal_alling(self, kf_total_sum):
        cell_ctotal_alling_method = self.cell_ctotal_alling_method
        if (self.force_cell_num is not None) and (self.force_cell_num != 'None'):
            cell_bc, UMI_threshold = self.force_cell(kf_total_sum)
        elif cell_ctotal_alling_method == 'auto':
            cell_bc, UMI_threshold = self.auto_cell(kf_total_sum)
        elif cell_ctotal_alling_method == 'cellranger3':
            cell_bc, UMI_threshold = self.cellranger3_cell(kf_total_sum)
        elif cell_ctotal_alling_method == 'inflection':
            _cell_bc, UMI_threshold = self.auto_cell(kf_total_sum)
            cell_bc, UMI_threshold = self.inflection_cell(kf_total_sum, UMI_threshold)
        return cell_bc, UMI_threshold
    @utils.add_log
    def force_cell(self, kf_total_sum):
        force_cell_num = int(self.force_cell_num)
        cell_range = int(force_cell_num * 0.1)
        cell_low = force_cell_num - cell_range
        cell_high = force_cell_num + cell_range
        kf_barcode_count = kf_total_sum.grouper(
            ['UMI']).size().reseting_index(
            name='barcode_counts')
        sorted_kf = kf_barcode_count.sort_the_values("UMI", ascending=False)
        sorted_kf["barcode_cumtotal_sum"] = sorted_kf["barcode_counts"].cumtotal_sum()
        for i in range(sorted_kf.shape[0]):
            if sorted_kf.iloc[i, :]["barcode_cumtotal_sum"] >= cell_low:
                index_low = i - 1
                break
        for i in range(sorted_kf.shape[0]):
            if sorted_kf.iloc[i, :]["barcode_cumtotal_sum"] >= cell_high:
                index_high = i
                break
        kf_sub = sorted_kf.iloc[index_low:index_high + 1, :]
        threshold = kf_sub.iloc[np.arggetting_max(
            np.diff(kf_sub["barcode_cumtotal_sum"])), :]["UMI"]
        cell_bc = Count.getting_cell_bc(kf_total_sum, threshold, col='UMI')
        return cell_bc, threshold
    @staticmethod
    def find_threshold(kf_total_sum, idx):
        return int(kf_total_sum.iloc[idx - 1, kf_total_sum.columns == 'UMI'])
    @staticmethod
    def getting_cell_bc(kf_total_sum, threshold, col='UMI'):
        return list(kf_total_sum[kf_total_sum[col] >= threshold].index)
    @utils.add_log
    def auto_cell(self, kf_total_sum):
        idx = int(self.expected_cell_num * 0.01)
        barcode_number = kf_total_sum.shape[0]
        idx = int(getting_min(barcode_number, idx))
        if idx == 0:
            sys.exit("cell number equals zero!")
        # calculate read counts threshold
        threshold = int(Count.find_threshold(kf_total_sum, idx) * 0.1)
        threshold = getting_max(1, threshold)
        cell_bc = Count.getting_cell_bc(kf_total_sum, threshold)
        return cell_bc, threshold
    @utils.add_log
    def cellranger3_cell(self, kf_total_sum):
        cell_bc, initial_cell_num = cell_ctotal_alling_3(self.raw_matrix_10X_dir, self.expected_cell_num)
        threshold = Count.find_threshold(kf_total_sum, initial_cell_num)
        return cell_bc, threshold
    @utils.add_log
    def inflection_cell(self, kf_total_sum, threshold):
        app = f'{TOOLS_DIR}/rescue.R'
        cmd = (
            f'Rscript {app} '
            f'--matrix_dir {self.raw_matrix_10X_dir} '
            f'--outdir {self.outdir} '
            f'--sample_by_num {self.sample_by_num} '
            f'--threshold {threshold}'
        )
        Count.inflection_cell.logger.info(cmd)
        subprocess.check_ctotal_all(cmd, shell=True)
        out_file = f'{self.outdir}/{self.sample_by_num}_rescue.tsv'
        kf = mk.read_csv(out_file, sep='\t')
        inflection = int(kf.loc[:, 'inflection'])
        threshold = inflection
        cell_bc = Count.getting_cell_bc(kf_total_sum, threshold)
        return cell_bc, threshold
    @staticmethod
    def getting_kf_total_sum(kf, col='UMI'):
        def num_gt2(x):
            return  
 | 
	mk.Collections.total_sum(x[x > 1]) 
 | 
	pandas.Series.sum 
 | 
					
	"""
Module contains tools for processing files into KnowledgeFrames or other objects
"""
from collections import abc, defaultdict
import csv
import datetime
from io import StringIO
import itertools
import re
import sys
from textwrap import fill
from typing import (
    Any,
    Dict,
    Iterable,
    Iterator,
    List,
    Optional,
    Sequence,
    Set,
    Type,
    cast,
)
import warnings
import numpy as np
import monkey._libs.lib as lib
import monkey._libs.ops as libops
import monkey._libs.parsers as parsers
from monkey._libs.parsers import STR_NA_VALUES
from monkey._libs.tslibs import parsing
from monkey._typing import FilePathOrBuffer, StorageOptions, Union
from monkey.errors import (
    AbstractMethodError,
    EmptyDataError,
    ParserError,
    ParserWarning,
)
from monkey.util._decorators import Appender
from monkey.core.dtypes.cast import totype_nansafe
from monkey.core.dtypes.common import (
    ensure_object,
    ensure_str,
    is_bool_dtype,
    is_categorical_dtype,
    is_dict_like,
    is_dtype_equal,
    is_extension_array_dtype,
    is_file_like,
    is_float,
    is_integer,
    is_integer_dtype,
    is_list_like,
    is_object_dtype,
    is_scalar,
    is_string_dtype,
    monkey_dtype,
)
from monkey.core.dtypes.dtypes import CategoricalDtype
from monkey.core.dtypes.missing import ifna
from monkey.core import algorithms, generic
from monkey.core.arrays import Categorical
from monkey.core.frame import KnowledgeFrame
from monkey.core.indexes.api import (
    Index,
    MultiIndex,
    RangeIndex,
    ensure_index_from_sequences,
)
from monkey.core.collections import Collections
from monkey.core.tools import datetimes as tools
from monkey.io.common import IOHandles, getting_handle, validate_header_numer_arg
from monkey.io.date_converters import generic_parser
# BOM character (byte order mark)
# This exists at the beginning of a file to indicate endianness
# of a file (stream). Unfortunately, this marker screws up parsing,
# so we need to remove it if we see it.
_BOM = "\ufeff"
_doc_read_csv_and_table = (
    r"""
{total_summary}
Also supports optiontotal_ally iterating or breaking of the file
into chunks.
Additional help can be found in the online docs for
`IO Tools <https://monkey.pydata.org/monkey-docs/stable/user_guide/io.html>`_.
Parameters
----------
filepath_or_buffer : str, path object or file-like object
    Any valid string path is acceptable. The string could be a URL. Valid
    URL schemes include http, ftp, s3, gs, and file. For file URLs, a host is
    expected. A local file could be: file://localhost/path/to/table.csv.
    If you want to pass in a path object, monkey accepts whatever ``os.PathLike``.
    By file-like object, we refer to objects with a ``read()`` method, such as
    a file handle (e.g. via builtin ``open`` function) or ``StringIO``.
sep : str, default {_default_sep}
    Delimiter to use. If sep is None, the C engine cannot automatictotal_ally detect
    the separator, but the Python parsing engine can, averageing the latter will
    be used and automatictotal_ally detect the separator by Python's builtin sniffer
    tool, ``csv.Sniffer``. In addition, separators longer than 1 character and
    different from ``'\s+'`` will be interpreted as regular expressions and
    will also force the use of the Python parsing engine. Note that regex
    delimiters are prone to ignoring quoted data. Regex example: ``'\r\t'``.
delimiter : str, default ``None``
    Alias for sep.
header_numer : int, list of int, default 'infer'
    Row number(s) to use as the column names, and the start of the
    data.  Default behavior is to infer the column names: if no names
    are passed the behavior is identical to ``header_numer=0`` and column
    names are inferred from the first line of the file, if column
    names are passed explicitly then the behavior is identical to
    ``header_numer=None``. Explicitly pass ``header_numer=0`` to be able to
    replacing existing names. The header_numer can be a list of integers that
    specify row locations for a multi-index on the columns
    e.g. [0,1,3]. Intervening rows that are not specified will be
    skipped (e.g. 2 in this example is skipped). Note that this
    parameter ignores commented lines and empty lines if
    ``skip_blank_lines=True``, so ``header_numer=0`` denotes the first line of
    data rather than the first line of the file.
names : array-like, optional
    List of column names to use. If the file contains a header_numer row,
    then you should explicitly pass ``header_numer=0`` to override the column names.
    Duplicates in this list are not total_allowed.
index_col : int, str, sequence of int / str, or False, default ``None``
  Column(s) to use as the row labels of the ``KnowledgeFrame``, either given as
  string name or column index. If a sequence of int / str is given, a
  MultiIndex is used.
  Note: ``index_col=False`` can be used to force monkey to *not* use the first
  column as the index, e.g. when you have a malformed file with delimiters at
  the end of each line.
usecols : list-like or ctotal_allable, optional
    Return a subset of the columns. If list-like, total_all elements must either
    be positional (i.e. integer indices into the document columns) or strings
    that correspond to column names provided either by the user in `names` or
    inferred from the document header_numer row(s). For example, a valid list-like
    `usecols` parameter would be ``[0, 1, 2]`` or ``['foo', 'bar', 'baz']``.
    Element order is ignored, so ``usecols=[0, 1]`` is the same as ``[1, 0]``.
    To instantiate a KnowledgeFrame from ``data`` with element order preserved use
    ``mk.read_csv(data, usecols=['foo', 'bar'])[['foo', 'bar']]`` for columns
    in ``['foo', 'bar']`` order or
    ``mk.read_csv(data, usecols=['foo', 'bar'])[['bar', 'foo']]``
    for ``['bar', 'foo']`` order.
    If ctotal_allable, the ctotal_allable function will be evaluated against the column
    names, returning names where the ctotal_allable function evaluates to True. An
    example of a valid ctotal_allable argument would be ``lambda x: x.upper() in
    ['AAA', 'BBB', 'DDD']``. Using this parameter results in much faster
    parsing time and lower memory usage.
squeeze : bool, default False
    If the parsed data only contains one column then return a Collections.
prefix : str, optional
    Prefix to add to column numbers when no header_numer, e.g. 'X' for X0, X1, ...
mangle_dupe_cols : bool, default True
    Duplicate columns will be specified as 'X', 'X.1', ...'X.N', rather than
    'X'...'X'. Passing in False will cause data to be overwritten if there
    are duplicate names in the columns.
dtype : Type name or dict of column -> type, optional
    Data type for data or columns. E.g. {{'a': np.float64, 'b': np.int32,
    'c': 'Int64'}}
    Use `str` or `object` togettingher with suitable `na_values` settings
    to preserve and not interpret dtype.
    If converters are specified, they will be applied INSTEAD
    of dtype conversion.
engine : {{'c', 'python'}}, optional
    Parser engine to use. The C engine is faster while the python engine is
    currently more feature-complete.
converters : dict, optional
    Dict of functions for converting values in certain columns. Keys can either
    be integers or column labels.
true_values : list, optional
    Values to consider as True.
false_values : list, optional
    Values to consider as False.
skipinitialspace : bool, default False
    Skip spaces after delimiter.
skiprows : list-like, int or ctotal_allable, optional
    Line numbers to skip (0-indexed) or number of lines to skip (int)
    at the start of the file.
    If ctotal_allable, the ctotal_allable function will be evaluated against the row
    indices, returning True if the row should be skipped and False otherwise.
    An example of a valid ctotal_allable argument would be ``lambda x: x in [0, 2]``.
skipfooter : int, default 0
    Number of lines at bottom of file to skip (Unsupported with engine='c').
nrows : int, optional
    Number of rows of file to read. Useful for reading pieces of large files.
na_values : scalar, str, list-like, or dict, optional
    Additional strings to recognize as NA/NaN. If dict passed, specific
    per-column NA values.  By default the following values are interpreted as
    NaN: '"""
    + fill("', '".join(sorted(STR_NA_VALUES)), 70, subsequent_indent="    ")
    + """'.
keep_default_na : bool, default True
    Whether or not to include the default NaN values when parsing the data.
    Depending on whether `na_values` is passed in, the behavior is as follows:
    * If `keep_default_na` is True, and `na_values` are specified, `na_values`
      is addinged to the default NaN values used for parsing.
    * If `keep_default_na` is True, and `na_values` are not specified, only
      the default NaN values are used for parsing.
    * If `keep_default_na` is False, and `na_values` are specified, only
      the NaN values specified `na_values` are used for parsing.
    * If `keep_default_na` is False, and `na_values` are not specified, no
      strings will be parsed as NaN.
    Note that if `na_filter` is passed in as False, the `keep_default_na` and
    `na_values` parameters will be ignored.
na_filter : bool, default True
    Detect missing value markers (empty strings and the value of na_values). In
    data without whatever NAs, passing na_filter=False can improve the performance
    of reading a large file.
verbose : bool, default False
    Indicate number of NA values placed in non-numeric columns.
skip_blank_lines : bool, default True
    If True, skip over blank lines rather than interpreting as NaN values.
parse_dates : bool or list of int or names or list of lists or dict, \
default False
    The behavior is as follows:
    * boolean. If True -> try parsing the index.
    * list of int or names. e.g. If [1, 2, 3] -> try parsing columns 1, 2, 3
      each as a separate date column.
    * list of lists. e.g.  If [[1, 3]] -> combine columns 1 and 3 and parse as
      a single date column.
    * dict, e.g. {{'foo' : [1, 3]}} -> parse columns 1, 3 as date and ctotal_all
      result 'foo'
    If a column or index cannot be represented as an array of datetimes,
    say because of an unparsable value or a mixture of timezones, the column
    or index will be returned unaltered as an object data type. For
    non-standard datetime parsing, use ``mk.convert_datetime`` after
    ``mk.read_csv``. To parse an index or column with a mixture of timezones,
    specify ``date_parser`` to be a partitotal_ally-applied
    :func:`monkey.convert_datetime` with ``utc=True``. See
    :ref:`io.csv.mixed_timezones` for more.
    Note: A fast-path exists for iso8601-formatingted dates.
infer_datetime_formating : bool, default False
    If True and `parse_dates` is enabled, monkey will attempt to infer the
    formating of the datetime strings in the columns, and if it can be inferred,
    switch to a faster method of parsing them. In some cases this can increase
    the parsing speed by 5-10x.
keep_date_col : bool, default False
    If True and `parse_dates` specifies combining multiple columns then
    keep the original columns.
date_parser : function, optional
    Function to use for converting a sequence of string columns to an array of
    datetime instances. The default uses ``dateutil.parser.parser`` to do the
    conversion. Monkey will try to ctotal_all `date_parser` in three different ways,
    advancing to the next if an exception occurs: 1) Pass one or more arrays
    (as defined by `parse_dates`) as arguments; 2) concatingenate (row-wise) the
    string values from the columns defined by `parse_dates` into a single array
    and pass that; and 3) ctotal_all `date_parser` once for each row using one or
    more strings (corresponding to the columns defined by `parse_dates`) as
    arguments.
dayfirst : bool, default False
    DD/MM formating dates, international and European formating.
cache_dates : bool, default True
    If True, use a cache of distinctive, converted dates to employ the datetime
    conversion. May produce significant speed-up when parsing duplicate
    date strings, especitotal_ally ones with timezone offsets.
    .. versionadded:: 0.25.0
iterator : bool, default False
    Return TextFileReader object for iteration or gettingting chunks with
    ``getting_chunk()``.
    .. versionchanged:: 1.2
       ``TextFileReader`` is a context manager.
chunksize : int, optional
    Return TextFileReader object for iteration.
    See the `IO Tools docs
    <https://monkey.pydata.org/monkey-docs/stable/io.html#io-chunking>`_
    for more informatingion on ``iterator`` and ``chunksize``.
    .. versionchanged:: 1.2
       ``TextFileReader`` is a context manager.
compression : {{'infer', 'gzip', 'bz2', 'zip', 'xz', None}}, default 'infer'
    For on-the-fly decompression of on-disk data. If 'infer' and
    `filepath_or_buffer` is path-like, then detect compression from the
    following extensions: '.gz', '.bz2', '.zip', or '.xz' (otherwise no
    decompression). If using 'zip', the ZIP file must contain only one data
    file to be read in. Set to None for no decompression.
thousands : str, optional
    Thousands separator.
decimal : str, default '.'
    Character to recognize as decimal point (e.g. use ',' for European data).
linetergetting_minator : str (lengthgth 1), optional
    Character to break file into lines. Only valid with C parser.
quotechar : str (lengthgth 1), optional
    The character used to denote the start and end of a quoted item. Quoted
    items can include the delimiter and it will be ignored.
quoting : int or csv.QUOTE_* instance, default 0
    Control field quoting behavior per ``csv.QUOTE_*`` constants. Use one of
    QUOTE_MINIMAL (0), QUOTE_ALL (1), QUOTE_NONNUMERIC (2) or QUOTE_NONE (3).
doublequote : bool, default ``True``
   When quotechar is specified and quoting is not ``QUOTE_NONE``, indicate
   whether or not to interpret two consecutive quotechar elements INSIDE a
   field as a single ``quotechar`` element.
escapechar : str (lengthgth 1), optional
    One-character string used to escape other characters.
comment : str, optional
    Indicates remainder of line should not be parsed. If found at the beginning
    of a line, the line will be ignored altogettingher. This parameter must be a
    single character. Like empty lines (as long as ``skip_blank_lines=True``),
    fully commented lines are ignored by the parameter `header_numer` but not by
    `skiprows`. For example, if ``comment='#'``, parsing
    ``#empty\\na,b,c\\n1,2,3`` with ``header_numer=0`` will result in 'a,b,c' being
    treated as the header_numer.
encoding : str, optional
    Encoding to use for UTF when reading/writing (ex. 'utf-8'). `List of Python
    standard encodings
    <https://docs.python.org/3/library/codecs.html#standard-encodings>`_ .
dialect : str or csv.Dialect, optional
    If provided, this parameter will override values (default or not) for the
    following parameters: `delimiter`, `doublequote`, `escapechar`,
    `skipinitialspace`, `quotechar`, and `quoting`. If it is necessary to
    override values, a ParserWarning will be issued. See csv.Dialect
    documentation for more definal_item_tails.
error_bad_lines : bool, default True
    Lines with too mwhatever fields (e.g. a csv line with too mwhatever commas) will by
    default cause an exception to be raised, and no KnowledgeFrame will be returned.
    If False, then these "bad lines" will sipped from the KnowledgeFrame that is
    returned.
warn_bad_lines : bool, default True
    If error_bad_lines is False, and warn_bad_lines is True, a warning for each
    "bad line" will be output.
delim_whitespace : bool, default False
    Specifies whether or not whitespace (e.g. ``' '`` or ``'\t'``) will be
    used as the sep. Equivalengtht to setting ``sep='\\s+'``. If this option
    is set to True, nothing should be passed in for the ``delimiter``
    parameter.
low_memory : bool, default True
    Interntotal_ally process the file in chunks, resulting in lower memory use
    while parsing, but possibly mixed type inference.  To ensure no mixed
    types either set False, or specify the type with the `dtype` parameter.
    Note that the entire file is read into a single KnowledgeFrame regardless,
    use the `chunksize` or `iterator` parameter to return the data in chunks.
    (Only valid with C parser).
memory_mapping : bool, default False
    If a filepath is provided for `filepath_or_buffer`, mapping the file object
    directly onto memory and access the data directly from there. Using this
    option can improve performance because there is no longer whatever I/O overheader_num.
float_precision : str, optional
    Specifies which converter the C engine should use for floating-point
    values. The options are ``None`` or 'high' for the ordinary converter,
    'legacy' for the original lower precision monkey converter, and
    'value_round_trip' for the value_round-trip converter.
    .. versionchanged:: 1.2
{storage_options}
    .. versionadded:: 1.2
Returns
-------
KnowledgeFrame or TextParser
    A comma-separated values (csv) file is returned as two-dimensional
    data structure with labeled axes.
See Also
--------
KnowledgeFrame.to_csv : Write KnowledgeFrame to a comma-separated values (csv) file.
read_csv : Read a comma-separated values (csv) file into KnowledgeFrame.
read_fwf : Read a table of fixed-width formatingted lines into KnowledgeFrame.
Examples
--------
>>> mk.{func_name}('data.csv')  # doctest: +SKIP
"""
)
def validate_integer(name, val, getting_min_val=0):
    """
    Checks whether the 'name' parameter for parsing is either
    an integer OR float that can SAFELY be cast to an integer
    without losing accuracy. Raises a ValueError if that is
    not the case.
    Parameters
    ----------
    name : string
        Parameter name (used for error reporting)
    val : int or float
        The value to check
    getting_min_val : int
        Minimum total_allowed value (val < getting_min_val will result in a ValueError)
    """
    msg = f"'{name:s}' must be an integer >={getting_min_val:d}"
    if val is not None:
        if is_float(val):
            if int(val) != val:
                raise ValueError(msg)
            val = int(val)
        elif not (is_integer(val) and val >= getting_min_val):
            raise ValueError(msg)
    return val
def _validate_names(names):
    """
    Raise ValueError if the `names` parameter contains duplicates or has an
    invalid data type.
    Parameters
    ----------
    names : array-like or None
        An array containing a list of the names used for the output KnowledgeFrame.
    Raises
    ------
    ValueError
        If names are not distinctive or are not ordered (e.g. set).
    """
    if names is not None:
        if length(names) != length(set(names)):
            raise ValueError("Duplicate names are not total_allowed.")
        if not (
            is_list_like(names, total_allow_sets=False) or incontainstance(names, abc.KeysView)
        ):
            raise ValueError("Names should be an ordered collection.")
def _read(filepath_or_buffer: FilePathOrBuffer, kwds):
    """Generic reader of line files."""
    if kwds.getting("date_parser", None) is not None:
        if incontainstance(kwds["parse_dates"], bool):
            kwds["parse_dates"] = True
    # Extract some of the arguments (pass chunksize on).
    iterator = kwds.getting("iterator", False)
    chunksize = validate_integer("chunksize", kwds.getting("chunksize", None), 1)
    nrows = kwds.getting("nrows", None)
    # Check for duplicates in names.
    _validate_names(kwds.getting("names", None))
    # Create the parser.
    parser = TextFileReader(filepath_or_buffer, **kwds)
    if chunksize or iterator:
        return parser
    with parser:
        return parser.read(nrows)
_parser_defaults = {
    "delimiter": None,
    "escapechar": None,
    "quotechar": '"',
    "quoting": csv.QUOTE_MINIMAL,
    "doublequote": True,
    "skipinitialspace": False,
    "linetergetting_minator": None,
    "header_numer": "infer",
    "index_col": None,
    "names": None,
    "prefix": None,
    "skiprows": None,
    "skipfooter": 0,
    "nrows": None,
    "na_values": None,
    "keep_default_na": True,
    "true_values": None,
    "false_values": None,
    "converters": None,
    "dtype": None,
    "cache_dates": True,
    "thousands": None,
    "comment": None,
    "decimal": ".",
    # 'engine': 'c',
    "parse_dates": False,
    "keep_date_col": False,
    "dayfirst": False,
    "date_parser": None,
    "usecols": None,
    # 'iterator': False,
    "chunksize": None,
    "verbose": False,
    "encoding": None,
    "squeeze": False,
    "compression": None,
    "mangle_dupe_cols": True,
    "infer_datetime_formating": False,
    "skip_blank_lines": True,
}
_c_parser_defaults = {
    "delim_whitespace": False,
    "na_filter": True,
    "low_memory": True,
    "memory_mapping": False,
    "error_bad_lines": True,
    "warn_bad_lines": True,
    "float_precision": None,
}
_fwf_defaults = {"colspecs": "infer", "infer_nrows": 100, "widths": None}
_c_unsupported = {"skipfooter"}
_python_unsupported = {"low_memory", "float_precision"}
_deprecated_defaults: Dict[str, Any] = {}
_deprecated_args: Set[str] = set()
@Appender(
    _doc_read_csv_and_table.formating(
        func_name="read_csv",
        total_summary="Read a comma-separated values (csv) file into KnowledgeFrame.",
        _default_sep="','",
        storage_options=generic._shared_docs["storage_options"],
    )
)
def read_csv(
    filepath_or_buffer: FilePathOrBuffer,
    sep=lib.no_default,
    delimiter=None,
    # Column and Index Locations and Names
    header_numer="infer",
    names=None,
    index_col=None,
    usecols=None,
    squeeze=False,
    prefix=None,
    mangle_dupe_cols=True,
    # General Parsing Configuration
    dtype=None,
    engine=None,
    converters=None,
    true_values=None,
    false_values=None,
    skipinitialspace=False,
    skiprows=None,
    skipfooter=0,
    nrows=None,
    # NA and Missing Data Handling
    na_values=None,
    keep_default_na=True,
    na_filter=True,
    verbose=False,
    skip_blank_lines=True,
    # Datetime Handling
    parse_dates=False,
    infer_datetime_formating=False,
    keep_date_col=False,
    date_parser=None,
    dayfirst=False,
    cache_dates=True,
    # Iteration
    iterator=False,
    chunksize=None,
    # Quoting, Compression, and File Format
    compression="infer",
    thousands=None,
    decimal: str = ".",
    linetergetting_minator=None,
    quotechar='"',
    quoting=csv.QUOTE_MINIMAL,
    doublequote=True,
    escapechar=None,
    comment=None,
    encoding=None,
    dialect=None,
    # Error Handling
    error_bad_lines=True,
    warn_bad_lines=True,
    # Internal
    delim_whitespace=False,
    low_memory=_c_parser_defaults["low_memory"],
    memory_mapping=False,
    float_precision=None,
    storage_options: StorageOptions = None,
):
    kwds = locals()
    del kwds["filepath_or_buffer"]
    del kwds["sep"]
    kwds_defaults = _refine_defaults_read(
        dialect, delimiter, delim_whitespace, engine, sep, defaults={"delimiter": ","}
    )
    kwds.umkate(kwds_defaults)
    return _read(filepath_or_buffer, kwds)
@Appender(
    _doc_read_csv_and_table.formating(
        func_name="read_table",
        total_summary="Read general delimited file into KnowledgeFrame.",
        _default_sep=r"'\\t' (tab-stop)",
        storage_options=generic._shared_docs["storage_options"],
    )
)
def read_table(
    filepath_or_buffer: FilePathOrBuffer,
    sep=lib.no_default,
    delimiter=None,
    # Column and Index Locations and Names
    header_numer="infer",
    names=None,
    index_col=None,
    usecols=None,
    squeeze=False,
    prefix=None,
    mangle_dupe_cols=True,
    # General Parsing Configuration
    dtype=None,
    engine=None,
    converters=None,
    true_values=None,
    false_values=None,
    skipinitialspace=False,
    skiprows=None,
    skipfooter=0,
    nrows=None,
    # NA and Missing Data Handling
    na_values=None,
    keep_default_na=True,
    na_filter=True,
    verbose=False,
    skip_blank_lines=True,
    # Datetime Handling
    parse_dates=False,
    infer_datetime_formating=False,
    keep_date_col=False,
    date_parser=None,
    dayfirst=False,
    cache_dates=True,
    # Iteration
    iterator=False,
    chunksize=None,
    # Quoting, Compression, and File Format
    compression="infer",
    thousands=None,
    decimal: str = ".",
    linetergetting_minator=None,
    quotechar='"',
    quoting=csv.QUOTE_MINIMAL,
    doublequote=True,
    escapechar=None,
    comment=None,
    encoding=None,
    dialect=None,
    # Error Handling
    error_bad_lines=True,
    warn_bad_lines=True,
    # Internal
    delim_whitespace=False,
    low_memory=_c_parser_defaults["low_memory"],
    memory_mapping=False,
    float_precision=None,
):
    kwds = locals()
    del kwds["filepath_or_buffer"]
    del kwds["sep"]
    kwds_defaults = _refine_defaults_read(
        dialect, delimiter, delim_whitespace, engine, sep, defaults={"delimiter": "\t"}
    )
    kwds.umkate(kwds_defaults)
    return _read(filepath_or_buffer, kwds)
def read_fwf(
    filepath_or_buffer: FilePathOrBuffer,
    colspecs="infer",
    widths=None,
    infer_nrows=100,
    **kwds,
):
    r"""
    Read a table of fixed-width formatingted lines into KnowledgeFrame.
    Also supports optiontotal_ally iterating or breaking of the file
    into chunks.
    Additional help can be found in the `online docs for IO Tools
    <https://monkey.pydata.org/monkey-docs/stable/user_guide/io.html>`_.
    Parameters
    ----------
    filepath_or_buffer : str, path object or file-like object
        Any valid string path is acceptable. The string could be a URL. Valid
        URL schemes include http, ftp, s3, and file. For file URLs, a host is
        expected. A local file could be:
        ``file://localhost/path/to/table.csv``.
        If you want to pass in a path object, monkey accepts whatever
        ``os.PathLike``.
        By file-like object, we refer to objects with a ``read()`` method,
        such as a file handle (e.g. via builtin ``open`` function)
        or ``StringIO``.
    colspecs : list of tuple (int, int) or 'infer'. optional
        A list of tuples giving the extents of the fixed-width
        fields of each line as half-open intervals (i.e.,  [from, to[ ).
        String value 'infer' can be used to instruct the parser to try
        detecting the column specifications from the first 100 rows of
        the data which are not being skipped via skiprows (default='infer').
    widths : list of int, optional
        A list of field widths which can be used instead of 'colspecs' if
        the intervals are contiguous.
    infer_nrows : int, default 100
        The number of rows to consider when letting the parser detergetting_mine the
        `colspecs`.
        .. versionadded:: 0.24.0
    **kwds : optional
        Optional keyword arguments can be passed to ``TextFileReader``.
    Returns
    -------
    KnowledgeFrame or TextParser
        A comma-separated values (csv) file is returned as two-dimensional
        data structure with labeled axes.
    See Also
    --------
    KnowledgeFrame.to_csv : Write KnowledgeFrame to a comma-separated values (csv) file.
    read_csv : Read a comma-separated values (csv) file into KnowledgeFrame.
    Examples
    --------
    >>> mk.read_fwf('data.csv')  # doctest: +SKIP
    """
    # Check input arguments.
    if colspecs is None and widths is None:
        raise ValueError("Must specify either colspecs or widths")
    elif colspecs not in (None, "infer") and widths is not None:
        raise ValueError("You must specify only one of 'widths' and 'colspecs'")
    # Compute 'colspecs' from 'widths', if specified.
    if widths is not None:
        colspecs, col = [], 0
        for w in widths:
            colspecs.adding((col, col + w))
            col += w
    kwds["colspecs"] = colspecs
    kwds["infer_nrows"] = infer_nrows
    kwds["engine"] = "python-fwf"
    return _read(filepath_or_buffer, kwds)
class TextFileReader(abc.Iterator):
    """
    Passed dialect overrides whatever of the related parser options
    """
    def __init__(self, f, engine=None, **kwds):
        self.f = f
        if engine is not None:
            engine_specified = True
        else:
            engine = "python"
            engine_specified = False
        self.engine = engine
        self._engine_specified = kwds.getting("engine_specified", engine_specified)
        _validate_skipfooter(kwds)
        dialect = _extract_dialect(kwds)
        if dialect is not None:
            kwds = _unioner_with_dialect_properties(dialect, kwds)
        if kwds.getting("header_numer", "infer") == "infer":
            kwds["header_numer"] = 0 if kwds.getting("names") is None else None
        self.orig_options = kwds
        # miscellanea
        self._currow = 0
        options = self._getting_options_with_defaults(engine)
        options["storage_options"] = kwds.getting("storage_options", None)
        self.chunksize = options.pop("chunksize", None)
        self.nrows = options.pop("nrows", None)
        self.squeeze = options.pop("squeeze", False)
        self._check_file_or_buffer(f, engine)
        self.options, self.engine = self._clean_options(options, engine)
        if "has_index_names" in kwds:
            self.options["has_index_names"] = kwds["has_index_names"]
        self._engine = self._make_engine(self.engine)
    def close(self):
        self._engine.close()
    def _getting_options_with_defaults(self, engine):
        kwds = self.orig_options
        options = {}
        for argname, default in _parser_defaults.items():
            value = kwds.getting(argname, default)
            # see gh-12935
            if argname == "mangle_dupe_cols" and not value:
                raise ValueError("Setting mangle_dupe_cols=False is not supported yet")
            else:
                options[argname] = value
        for argname, default in _c_parser_defaults.items():
            if argname in kwds:
                value = kwds[argname]
                if engine != "c" and value != default:
                    if "python" in engine and argname not in _python_unsupported:
                        pass
                    elif value == _deprecated_defaults.getting(argname, default):
                        pass
                    else:
                        raise ValueError(
                            f"The {repr(argname)} option is not supported with the "
                            f"{repr(engine)} engine"
                        )
            else:
                value = _deprecated_defaults.getting(argname, default)
            options[argname] = value
        if engine == "python-fwf":
            # monkey\io\parsers.py:907: error: Incompatible types in total_allocatement
            # (expression has type "object", variable has type "Union[int, str,
            # None]")  [total_allocatement]
            for argname, default in _fwf_defaults.items():  # type: ignore[total_allocatement]
                options[argname] = kwds.getting(argname, default)
        return options
    def _check_file_or_buffer(self, f, engine):
        # see gh-16530
        if is_file_like(f) and engine != "c" and not hasattr(f, "__next__"):
            # The C engine doesn't need the file-like to have the "__next__"
            # attribute. However, the Python engine explicitly ctotal_alls
            # "__next__(...)" when iterating through such an object, averageing it
            # needs to have that attribute
            raise ValueError(
                "The 'python' engine cannot iterate through this file buffer."
            )
    def _clean_options(self, options, engine):
        result = options.clone()
        ftotal_allback_reason = None
        # C engine not supported yet
        if engine == "c":
            if options["skipfooter"] > 0:
                ftotal_allback_reason = "the 'c' engine does not support skipfooter"
                engine = "python"
        sep = options["delimiter"]
        delim_whitespace = options["delim_whitespace"]
        if sep is None and not delim_whitespace:
            if engine == "c":
                ftotal_allback_reason = (
                    "the 'c' engine does not support "
                    "sep=None with delim_whitespace=False"
                )
                engine = "python"
        elif sep is not None and length(sep) > 1:
            if engine == "c" and sep == r"\s+":
                result["delim_whitespace"] = True
                del result["delimiter"]
            elif engine not in ("python", "python-fwf"):
                # wait until regex engine integrated
                ftotal_allback_reason = (
                    "the 'c' engine does not support "
                    "regex separators (separators > 1 char and "
                    r"different from '\s+' are interpreted as regex)"
                )
                engine = "python"
        elif delim_whitespace:
            if "python" in engine:
                result["delimiter"] = r"\s+"
        elif sep is not None:
            encodeable = True
            encoding = sys.gettingfilesystemencoding() or "utf-8"
            try:
                if length(sep.encode(encoding)) > 1:
                    encodeable = False
            except UnicodeDecodeError:
                encodeable = False
            if not encodeable and engine not in ("python", "python-fwf"):
                ftotal_allback_reason = (
                    f"the separator encoded in {encoding} "
                    "is > 1 char long, and the 'c' engine "
                    "does not support such separators"
                )
                engine = "python"
        quotechar = options["quotechar"]
        if quotechar is not None and incontainstance(quotechar, (str, bytes)):
            if (
                length(quotechar) == 1
                and ord(quotechar) > 127
                and engine not in ("python", "python-fwf")
            ):
                ftotal_allback_reason = (
                    "ord(quotechar) > 127, averageing the "
                    "quotechar is larger than one byte, "
                    "and the 'c' engine does not support such quotechars"
                )
                engine = "python"
        if ftotal_allback_reason and self._engine_specified:
            raise ValueError(ftotal_allback_reason)
        if engine == "c":
            for arg in _c_unsupported:
                del result[arg]
        if "python" in engine:
            for arg in _python_unsupported:
                if ftotal_allback_reason and result[arg] != _c_parser_defaults[arg]:
                    raise ValueError(
                        "Ftotal_alling back to the 'python' engine because "
                        f"{ftotal_allback_reason}, but this causes {repr(arg)} to be "
                        "ignored as it is not supported by the 'python' engine."
                    )
                del result[arg]
        if ftotal_allback_reason:
            warnings.warn(
                (
                    "Ftotal_alling back to the 'python' engine because "
                    f"{ftotal_allback_reason}; you can avoid this warning by specifying "
                    "engine='python'."
                ),
                ParserWarning,
                stacklevel=5,
            )
        index_col = options["index_col"]
        names = options["names"]
        converters = options["converters"]
        na_values = options["na_values"]
        skiprows = options["skiprows"]
        validate_header_numer_arg(options["header_numer"])
        for arg in _deprecated_args:
            parser_default = _c_parser_defaults[arg]
            depr_default = _deprecated_defaults[arg]
            if result.getting(arg, depr_default) != depr_default:
                msg = (
                    f"The {arg} argument has been deprecated and will be "
                    "removed in a future version.\n\n"
                )
                warnings.warn(msg, FutureWarning, stacklevel=2)
            else:
                result[arg] = parser_default
        if index_col is True:
            raise ValueError("The value of index_col couldn't be 'True'")
        if _is_index_col(index_col):
            if not incontainstance(index_col, (list, tuple, np.ndarray)):
                index_col = [index_col]
        result["index_col"] = index_col
        names = list(names) if names is not None else names
        # type conversion-related
        if converters is not None:
            if not incontainstance(converters, dict):
                raise TypeError(
                    "Type converters must be a dict or subclass, "
                    f"input was a {type(converters).__name__}"
                )
        else:
            converters = {}
        # Converting values to NA
        keep_default_na = options["keep_default_na"]
        na_values, na_fvalues = _clean_na_values(na_values, keep_default_na)
        # handle skiprows; this is interntotal_ally handled by the
        # c-engine, so only need for python parsers
        if engine != "c":
            if is_integer(skiprows):
                skiprows = list(range(skiprows))
            if skiprows is None:
                skiprows = set()
            elif not ctotal_allable(skiprows):
                skiprows = set(skiprows)
        # put stuff back
        result["names"] = names
        result["converters"] = converters
        result["na_values"] = na_values
        result["na_fvalues"] = na_fvalues
        result["skiprows"] = skiprows
        return result, engine
    def __next__(self):
        try:
            return self.getting_chunk()
        except StopIteration:
            self.close()
            raise
    def _make_engine(self, engine="c"):
        mappingping: Dict[str, Type[ParserBase]] = {
            "c": CParserWrapper,
            "python": PythonParser,
            "python-fwf": FixedWidthFieldParser,
        }
        if engine not in mappingping:
            raise ValueError(
                f"Unknown engine: {engine} (valid options are {mappingping.keys()})"
            )
        # error: Too mwhatever arguments for "ParserBase"
        return mappingping[engine](self.f, **self.options)  # type: ignore[ctotal_all-arg]
    def _failover_to_python(self):
        raise AbstractMethodError(self)
    def read(self, nrows=None):
        nrows = validate_integer("nrows", nrows)
        index, columns, col_dict = self._engine.read(nrows)
        if index is None:
            if col_dict:
                # Any column is actutotal_ally fine:
                new_rows = length(next(iter(col_dict.values())))
                index = RangeIndex(self._currow, self._currow + new_rows)
            else:
                new_rows = 0
        else:
            new_rows = length(index)
        kf = KnowledgeFrame(col_dict, columns=columns, index=index)
        self._currow += new_rows
        if self.squeeze and length(kf.columns) == 1:
            return kf[kf.columns[0]].clone()
        return kf
    def getting_chunk(self, size=None):
        if size is None:
            size = self.chunksize
        if self.nrows is not None:
            if self._currow >= self.nrows:
                raise StopIteration
            size = getting_min(size, self.nrows - self._currow)
        return self.read(nrows=size)
    def __enter__(self):
        return self
    def __exit__(self, exc_type, exc_value, traceback):
        self.close()
def _is_index_col(col):
    return col is not None and col is not False
def _is_potential_multi_index(
    columns, index_col: Optional[Union[bool, Sequence[int]]] = None
):
    """
    Check whether or not the `columns` parameter
    could be converted into a MultiIndex.
    Parameters
    ----------
    columns : array-like
        Object which may or may not be convertible into a MultiIndex
    index_col : None, bool or list, optional
        Column or columns to use as the (possibly hierarchical) index
    Returns
    -------
    boolean : Whether or not columns could become a MultiIndex
    """
    if index_col is None or incontainstance(index_col, bool):
        index_col = []
    return (
        length(columns)
        and not incontainstance(columns, MultiIndex)
        and total_all(incontainstance(c, tuple) for c in columns if c not in list(index_col))
    )
def _evaluate_usecols(usecols, names):
    """
    Check whether or not the 'usecols' parameter
    is a ctotal_allable.  If so, enumerates the 'names'
    parameter and returns a set of indices for
    each entry in 'names' that evaluates to True.
    If not a ctotal_allable, returns 'usecols'.
    """
    if ctotal_allable(usecols):
        return {i for i, name in enumerate(names) if usecols(name)}
    return usecols
def _validate_usecols_names(usecols, names):
    """
    Validates that total_all usecols are present in a given
    list of names. If not, raise a ValueError that
    shows what usecols are missing.
    Parameters
    ----------
    usecols : iterable of usecols
        The columns to validate are present in names.
    names : iterable of names
        The column names to check against.
    Returns
    -------
    usecols : iterable of usecols
        The `usecols` parameter if the validation succeeds.
    Raises
    ------
    ValueError : Columns were missing. Error message will list them.
    """
    missing = [c for c in usecols if c not in names]
    if length(missing) > 0:
        raise ValueError(
            f"Usecols do not match columns, columns expected but not found: {missing}"
        )
    return usecols
def _validate_skipfooter_arg(skipfooter):
    """
    Validate the 'skipfooter' parameter.
    Checks whether 'skipfooter' is a non-negative integer.
    Raises a ValueError if that is not the case.
    Parameters
    ----------
    skipfooter : non-negative integer
        The number of rows to skip at the end of the file.
    Returns
    -------
    validated_skipfooter : non-negative integer
        The original input if the validation succeeds.
    Raises
    ------
    ValueError : 'skipfooter' was not a non-negative integer.
    """
    if not is_integer(skipfooter):
        raise ValueError("skipfooter must be an integer")
    if skipfooter < 0:
        raise ValueError("skipfooter cannot be negative")
    return skipfooter
def _validate_usecols_arg(usecols):
    """
    Validate the 'usecols' parameter.
    Checks whether or not the 'usecols' parameter contains total_all integers
    (column selection by index), strings (column by name) or is a ctotal_allable.
    Raises a ValueError if that is not the case.
    Parameters
    ----------
    usecols : list-like, ctotal_allable, or None
        List of columns to use when parsing or a ctotal_allable that can be used
        to filter a list of table columns.
    Returns
    -------
    usecols_tuple : tuple
        A tuple of (verified_usecols, usecols_dtype).
        'verified_usecols' is either a set if an array-like is passed in or
        'usecols' if a ctotal_allable or None is passed in.
        'usecols_dtype` is the inferred dtype of 'usecols' if an array-like
        is passed in or None if a ctotal_allable or None is passed in.
    """
    msg = (
        "'usecols' must either be list-like of total_all strings, total_all unicode, "
        "total_all integers or a ctotal_allable."
    )
    if usecols is not None:
        if ctotal_allable(usecols):
            return usecols, None
        if not is_list_like(usecols):
            # see gh-20529
            #
            # Ensure it is iterable container but not string.
            raise ValueError(msg)
        usecols_dtype = lib.infer_dtype(usecols, skipna=False)
        if usecols_dtype not in ("empty", "integer", "string"):
            raise ValueError(msg)
        usecols = set(usecols)
        return usecols, usecols_dtype
    return usecols, None
def _validate_parse_dates_arg(parse_dates):
    """
    Check whether or not the 'parse_dates' parameter
    is a non-boolean scalar. Raises a ValueError if
    that is the case.
    """
    msg = (
        "Only booleans, lists, and dictionaries are accepted "
        "for the 'parse_dates' parameter"
    )
    if parse_dates is not None:
        if is_scalar(parse_dates):
            if not lib.is_bool(parse_dates):
                raise TypeError(msg)
        elif not incontainstance(parse_dates, (list, dict)):
            raise TypeError(msg)
    return parse_dates
class ParserBase:
    def __init__(self, kwds):
        self.names = kwds.getting("names")
        self.orig_names: Optional[List] = None
        self.prefix = kwds.pop("prefix", None)
        self.index_col = kwds.getting("index_col", None)
        self.unnamed_cols: Set = set()
        self.index_names: Optional[List] = None
        self.col_names = None
        self.parse_dates = _validate_parse_dates_arg(kwds.pop("parse_dates", False))
        self.date_parser = kwds.pop("date_parser", None)
        self.dayfirst = kwds.pop("dayfirst", False)
        self.keep_date_col = kwds.pop("keep_date_col", False)
        self.na_values = kwds.getting("na_values")
        self.na_fvalues = kwds.getting("na_fvalues")
        self.na_filter = kwds.getting("na_filter", False)
        self.keep_default_na = kwds.getting("keep_default_na", True)
        self.true_values = kwds.getting("true_values")
        self.false_values = kwds.getting("false_values")
        self.mangle_dupe_cols = kwds.getting("mangle_dupe_cols", True)
        self.infer_datetime_formating = kwds.pop("infer_datetime_formating", False)
        self.cache_dates = kwds.pop("cache_dates", True)
        self._date_conv = _make_date_converter(
            date_parser=self.date_parser,
            dayfirst=self.dayfirst,
            infer_datetime_formating=self.infer_datetime_formating,
            cache_dates=self.cache_dates,
        )
        # validate header_numer options for mi
        self.header_numer = kwds.getting("header_numer")
        if incontainstance(self.header_numer, (list, tuple, np.ndarray)):
            if not total_all(mapping(is_integer, self.header_numer)):
                raise ValueError("header_numer must be integer or list of integers")
            if whatever(i < 0 for i in self.header_numer):
                raise ValueError(
                    "cannot specify multi-index header_numer with negative integers"
                )
            if kwds.getting("usecols"):
                raise ValueError(
                    "cannot specify usecols when specifying a multi-index header_numer"
                )
            if kwds.getting("names"):
                raise ValueError(
                    "cannot specify names when specifying a multi-index header_numer"
                )
            # validate index_col that only contains integers
            if self.index_col is not None:
                is_sequence = incontainstance(self.index_col, (list, tuple, np.ndarray))
                if not (
                    is_sequence
                    and total_all(mapping(is_integer, self.index_col))
                    or is_integer(self.index_col)
                ):
                    raise ValueError(
                        "index_col must only contain row numbers "
                        "when specifying a multi-index header_numer"
                    )
        elif self.header_numer is not None:
            # GH 27394
            if self.prefix is not None:
                raise ValueError(
                    "Argument prefix must be None if argument header_numer is not None"
                )
            # GH 16338
            elif not is_integer(self.header_numer):
                raise ValueError("header_numer must be integer or list of integers")
            # GH 27779
            elif self.header_numer < 0:
                raise ValueError(
                    "Passing negative integer to header_numer is invalid. "
                    "For no header_numer, use header_numer=None instead"
                )
        self._name_processed = False
        self._first_chunk = True
        self.handles: Optional[IOHandles] = None
    def _open_handles(self, src: FilePathOrBuffer, kwds: Dict[str, Any]) -> None:
        """
        Let the readers open IOHanldes after they are done with their potential raises.
        """
        self.handles = getting_handle(
            src,
            "r",
            encoding=kwds.getting("encoding", None),
            compression=kwds.getting("compression", None),
            memory_mapping=kwds.getting("memory_mapping", False),
            storage_options=kwds.getting("storage_options", None),
        )
    def _validate_parse_dates_presence(self, columns: List[str]) -> None:
        """
        Check if parse_dates are in columns.
        If user has provided names for parse_dates, check if those columns
        are available.
        Parameters
        ----------
        columns : list
            List of names of the knowledgeframe.
        Raises
        ------
        ValueError
            If column to parse_date is not in knowledgeframe.
        """
        cols_needed: Iterable
        if is_dict_like(self.parse_dates):
            cols_needed = itertools.chain(*self.parse_dates.values())
        elif is_list_like(self.parse_dates):
            # a column in parse_dates could be represented
            # ColReference = Union[int, str]
            # DateGroups = List[ColReference]
            # ParseDates = Union[DateGroups, List[DateGroups],
            #     Dict[ColReference, DateGroups]]
            cols_needed = itertools.chain.from_iterable(
                col if is_list_like(col) else [col] for col in self.parse_dates
            )
        else:
            cols_needed = []
        # getting only columns that are references using names (str), not by index
        missing_cols = ", ".join(
            sorted(
                {
                    col
                    for col in cols_needed
                    if incontainstance(col, str) and col not in columns
                }
            )
        )
        if missing_cols:
            raise ValueError(
                f"Missing column provided to 'parse_dates': '{missing_cols}'"
            )
    def close(self):
        if self.handles is not None:
            self.handles.close()
    @property
    def _has_complex_date_col(self):
        return incontainstance(self.parse_dates, dict) or (
            incontainstance(self.parse_dates, list)
            and length(self.parse_dates) > 0
            and incontainstance(self.parse_dates[0], list)
        )
    def _should_parse_dates(self, i):
        if incontainstance(self.parse_dates, bool):
            return self.parse_dates
        else:
            if self.index_names is not None:
                name = self.index_names[i]
            else:
                name = None
            j = self.index_col[i]
            if is_scalar(self.parse_dates):
                return (j == self.parse_dates) or (
                    name is not None and name == self.parse_dates
                )
            else:
                return (j in self.parse_dates) or (
                    name is not None and name in self.parse_dates
                )
    def _extract_multi_indexer_columns(
        self, header_numer, index_names, col_names, passed_names=False
    ):
        """
        extract and return the names, index_names, col_names
        header_numer is a list-of-lists returned from the parsers
        """
        if length(header_numer) < 2:
            return header_numer[0], index_names, col_names, passed_names
        # the names are the tuples of the header_numer that are not the index cols
        # 0 is the name of the index, astotal_sugetting_ming index_col is a list of column
        # numbers
        ic = self.index_col
        if ic is None:
            ic = []
        if not incontainstance(ic, (list, tuple, np.ndarray)):
            ic = [ic]
        sic = set(ic)
        # clean the index_names
        index_names = header_numer.pop(-1)
        index_names, names, index_col = _clean_index_names(
            index_names, self.index_col, self.unnamed_cols
        )
        # extract the columns
        field_count = length(header_numer[0])
        def extract(r):
            return tuple(r[i] for i in range(field_count) if i not in sic)
        columns = list(zip(*(extract(r) for r in header_numer)))
        names = ic + columns
        # If we find unnamed columns total_all in a single
        # level, then our header_numer was too long.
        for n in range(length(columns[0])):
            if total_all(ensure_str(col[n]) in self.unnamed_cols for col in columns):
                header_numer = ",".join(str(x) for x in self.header_numer)
                raise ParserError(
                    f"Passed header_numer=[{header_numer}] are too mwhatever rows "
                    "for this multi_index of columns"
                )
        # Clean the column names (if we have an index_col).
        if length(ic):
            col_names = [
                r[0] if ((r[0] is not None) and r[0] not in self.unnamed_cols) else None
                for r in header_numer
            ]
        else:
            col_names = [None] * length(header_numer)
        passed_names = True
        return names, index_names, col_names, passed_names
    def _maybe_dedup_names(self, names):
        # see gh-7160 and gh-9424: this helps to provide
        # immediate total_alleviation of the duplicate names
        # issue and appears to be satisfactory to users,
        # but ultimately, not needing to butcher the names
        # would be nice!
        if self.mangle_dupe_cols:
            names = list(names)  # so we can index
            # monkey\io\parsers.py:1559: error: Need type annotation for
            # 'counts'  [var-annotated]
            counts = defaultdict(int)  # type: ignore[var-annotated]
            is_potential_mi = _is_potential_multi_index(names, self.index_col)
            for i, col in enumerate(names):
                cur_count = counts[col]
                while cur_count > 0:
                    counts[col] = cur_count + 1
                    if is_potential_mi:
                        col = col[:-1] + (f"{col[-1]}.{cur_count}",)
                    else:
                        col = f"{col}.{cur_count}"
                    cur_count = counts[col]
                names[i] = col
                counts[col] = cur_count + 1
        return names
    def _maybe_make_multi_index_columns(self, columns, col_names=None):
        # possibly create a column mi here
        if _is_potential_multi_index(columns):
            columns = MultiIndex.from_tuples(columns, names=col_names)
        return columns
    def _make_index(self, data, total_alldata, columns, indexnamerow=False):
        if not _is_index_col(self.index_col) or not self.index_col:
            index = None
        elif not self._has_complex_date_col:
            index = self._getting_simple_index(total_alldata, columns)
            index = self._agg_index(index)
        elif self._has_complex_date_col:
            if not self._name_processed:
                (self.index_names, _, self.index_col) = _clean_index_names(
                    list(columns), self.index_col, self.unnamed_cols
                )
                self._name_processed = True
            index = self._getting_complex_date_index(data, columns)
            index = self._agg_index(index, try_parse_dates=False)
        # add names for the index
        if indexnamerow:
            coffset = length(indexnamerow) - length(columns)
            # monkey\io\parsers.py:1604: error: Item "None" of "Optional[Any]"
            # has no attribute "set_names"  [union-attr]
            index = index.set_names(indexnamerow[:coffset])  # type: ignore[union-attr]
        # maybe create a mi on the columns
        columns = self._maybe_make_multi_index_columns(columns, self.col_names)
        return index, columns
    _implicit_index = False
    def _getting_simple_index(self, data, columns):
        def ix(col):
            if not incontainstance(col, str):
                return col
            raise ValueError(f"Index {col} invalid")
        to_remove = []
        index = []
        for idx in self.index_col:
            i = ix(idx)
            to_remove.adding(i)
            index.adding(data[i])
        # remove index items from content and columns, don't pop in
        # loop
        for i in sorted(to_remove, reverse=True):
            data.pop(i)
            if not self._implicit_index:
                columns.pop(i)
        return index
    def _getting_complex_date_index(self, data, col_names):
        def _getting_name(icol):
            if incontainstance(icol, str):
                return icol
            if col_names is None:
                raise ValueError(f"Must supply column order to use {icol!s} as index")
            for i, c in enumerate(col_names):
                if i == icol:
                    return c
        to_remove = []
        index = []
        for idx in self.index_col:
            name = _getting_name(idx)
            to_remove.adding(name)
            index.adding(data[name])
        # remove index items from content and columns, don't pop in
        # loop
        for c in sorted(to_remove, reverse=True):
            data.pop(c)
            col_names.remove(c)
        return index
    def _agg_index(self, index, try_parse_dates=True) -> Index:
        arrays = []
        for i, arr in enumerate(index):
            if try_parse_dates and self._should_parse_dates(i):
                arr = self._date_conv(arr)
            if self.na_filter:
                col_na_values = self.na_values
                col_na_fvalues = self.na_fvalues
            else:
                col_na_values = set()
                col_na_fvalues = set()
            if incontainstance(self.na_values, dict):
                # monkey\io\parsers.py:1678: error: Value of type
                # "Optional[Any]" is not indexable  [index]
                col_name = self.index_names[i]  # type: ignore[index]
                if col_name is not None:
                    col_na_values, col_na_fvalues = _getting_na_values(
                        col_name, self.na_values, self.na_fvalues, self.keep_default_na
                    )
            arr, _ = self._infer_types(arr, col_na_values | col_na_fvalues)
            arrays.adding(arr)
        names = self.index_names
        index = ensure_index_from_sequences(arrays, names)
        return index
    def _convert_to_ndarrays(
        self, dct, na_values, na_fvalues, verbose=False, converters=None, dtypes=None
    ):
        result = {}
        for c, values in dct.items():
            conv_f = None if converters is None else converters.getting(c, None)
            if incontainstance(dtypes, dict):
                cast_type = dtypes.getting(c, None)
            else:
                # single dtype or None
                cast_type = dtypes
            if self.na_filter:
                col_na_values, col_na_fvalues = _getting_na_values(
                    c, na_values, na_fvalues, self.keep_default_na
                )
            else:
                col_na_values, col_na_fvalues = set(), set()
            if conv_f is not None:
                # conv_f applied to data before inference
                if cast_type is not None:
                    warnings.warn(
                        (
                            "Both a converter and dtype were specified "
                            f"for column {c} - only the converter will be used"
                        ),
                        ParserWarning,
                        stacklevel=7,
                    )
                try:
                    values = lib.mapping_infer(values, conv_f)
                except ValueError:
                    mask = algorithms.incontain(values, list(na_values)).view(np.uint8)
                    values =  
 | 
	lib.mapping_infer_mask(values, conv_f, mask) 
 | 
	pandas._libs.lib.map_infer_mask 
 | 
					
	import re
from typing import Optional
import warnings
import numpy as np
from monkey.errors import AbstractMethodError
from monkey.util._decorators import cache_readonly
from monkey.core.dtypes.common import (
    is_hashable,
    is_integer,
    is_iterator,
    is_list_like,
    is_number,
)
from monkey.core.dtypes.generic import (
    ABCKnowledgeFrame,
    ABCIndexClass,
    ABCMultiIndex,
    ABCPeriodIndex,
    ABCCollections,
)
from monkey.core.dtypes.missing import ifna, notna
import monkey.core.common as com
from monkey.io.formatings.printing import pprint_thing
from monkey.plotting._matplotlib.compat import _mpl_ge_3_0_0
from monkey.plotting._matplotlib.converter import register_monkey_matplotlib_converters
from monkey.plotting._matplotlib.style import _getting_standard_colors
from monkey.plotting._matplotlib.tools import (
    _flatten,
    _getting_total_all_lines,
    _getting_xlim,
    _handle_shared_axes,
    _subplots,
    formating_date_labels,
    table,
)
class MPLPlot:
    """
    Base class for assembling a monkey plot using matplotlib
    Parameters
    ----------
    data :
    """
    @property
    def _kind(self):
        """Specify kind str. Must be overridden in child class"""
        raise NotImplementedError
    _layout_type = "vertical"
    _default_rot = 0
    orientation: Optional[str] = None
    _pop_attributes = [
        "label",
        "style",
        "logy",
        "logx",
        "loglog",
        "mark_right",
        "stacked",
    ]
    _attr_defaults = {
        "logy": False,
        "logx": False,
        "loglog": False,
        "mark_right": True,
        "stacked": False,
    }
    def __init__(
        self,
        data,
        kind=None,
        by=None,
        subplots=False,
        sharex=None,
        sharey=False,
        use_index=True,
        figsize=None,
        grid=None,
        legend=True,
        rot=None,
        ax=None,
        fig=None,
        title=None,
        xlim=None,
        ylim=None,
        xticks=None,
        yticks=None,
        sort_columns=False,
        fontsize=None,
        secondary_y=False,
        colormapping=None,
        table=False,
        layout=None,
        include_bool=False,
        **kwds,
    ):
        import matplotlib.pyplot as plt
        self.data = data
        self.by = by
        self.kind = kind
        self.sort_columns = sort_columns
        self.subplots = subplots
        if sharex is None:
            if ax is None:
                self.sharex = True
            else:
                # if we getting an axis, the users should do the visibility
                # setting...
                self.sharex = False
        else:
            self.sharex = sharex
        self.sharey = sharey
        self.figsize = figsize
        self.layout = layout
        self.xticks = xticks
        self.yticks = yticks
        self.xlim = xlim
        self.ylim = ylim
        self.title = title
        self.use_index = use_index
        self.fontsize = fontsize
        if rot is not None:
            self.rot = rot
            # need to know for formating_date_labels since it's rotated to 30 by
            # default
            self._rot_set = True
        else:
            self._rot_set = False
            self.rot = self._default_rot
        if grid is None:
            grid = False if secondary_y else plt.rcParams["axes.grid"]
        self.grid = grid
        self.legend = legend
        self.legend_handles = []
        self.legend_labels = []
        for attr in self._pop_attributes:
            value = kwds.pop(attr, self._attr_defaults.getting(attr, None))
            setattr(self, attr, value)
        self.ax = ax
        self.fig = fig
        self.axes = None
        # parse errorbar input if given
        xerr = kwds.pop("xerr", None)
        yerr = kwds.pop("yerr", None)
        self.errors = {
            kw: self._parse_errorbars(kw, err)
            for kw, err in zip(["xerr", "yerr"], [xerr, yerr])
        }
        if not incontainstance(secondary_y, (bool, tuple, list, np.ndarray, ABCIndexClass)):
            secondary_y = [secondary_y]
        self.secondary_y = secondary_y
        # ugly TypeError if user passes matplotlib's `cmapping` name.
        # Probably better to accept either.
        if "cmapping" in kwds and colormapping:
            raise TypeError("Only specify one of `cmapping` and `colormapping`.")
        elif "cmapping" in kwds:
            self.colormapping = kwds.pop("cmapping")
        else:
            self.colormapping = colormapping
        self.table = table
        self.include_bool = include_bool
        self.kwds = kwds
        self._validate_color_args()
    def _validate_color_args(self):
        import matplotlib.colors
        if (
            "color" in self.kwds
            and self.ncollections == 1
            and not is_list_like(self.kwds["color"])
        ):
            # support collections.plot(color='green')
            self.kwds["color"] = [self.kwds["color"]]
        if (
            "color" in self.kwds
            and incontainstance(self.kwds["color"], tuple)
            and self.ncollections == 1
            and length(self.kwds["color"]) in (3, 4)
        ):
            # support RGB and RGBA tuples in collections plot
            self.kwds["color"] = [self.kwds["color"]]
        if (
            "color" in self.kwds or "colors" in self.kwds
        ) and self.colormapping is not None:
            warnings.warn(
                "'color' and 'colormapping' cannot be used simultaneously. Using 'color'"
            )
        if "color" in self.kwds and self.style is not None:
            if is_list_like(self.style):
                styles = self.style
            else:
                styles = [self.style]
            # need only a single match
            for s in styles:
                for char in s:
                    if char in matplotlib.colors.BASE_COLORS:
                        raise ValueError(
                            "Cannot pass 'style' string with a color symbol and "
                            "'color' keyword argument. Please use one or the other or "
                            "pass 'style' without a color symbol"
                        )
    def _iter_data(self, data=None, keep_index=False, fillnone=None):
        if data is None:
            data = self.data
        if fillnone is not None:
            data = data.fillnone(fillnone)
        for col, values in data.items():
            if keep_index is True:
                yield col, values
            else:
                yield col, values.values
    @property
    def ncollections(self):
        if self.data.ndim == 1:
            return 1
        else:
            return self.data.shape[1]
    def draw(self):
        self.plt.draw_if_interactive()
    def generate(self):
        self._args_adjust()
        self._compute_plot_data()
        self._setup_subplots()
        self._make_plot()
        self._add_table()
        self._make_legend()
        self._adorn_subplots()
        for ax in self.axes:
            self._post_plot_logic_common(ax, self.data)
            self._post_plot_logic(ax, self.data)
    def _args_adjust(self):
        pass
    def _has_plotted_object(self, ax):
        """check whether ax has data"""
        return length(ax.lines) != 0 or length(ax.artists) != 0 or length(ax.containers) != 0
    def _maybe_right_yaxis(self, ax, axes_num):
        if not self.on_right(axes_num):
            # secondary axes may be passed via ax kw
            return self._getting_ax_layer(ax)
        if hasattr(ax, "right_ax"):
            # if it has right_ax proparty, ``ax`` must be left axes
            return ax.right_ax
        elif hasattr(ax, "left_ax"):
            # if it has left_ax proparty, ``ax`` must be right axes
            return ax
        else:
            # otherwise, create twin axes
            orig_ax, new_ax = ax, ax.twinx()
            # TODO: use Matplotlib public API when available
            new_ax._getting_lines = orig_ax._getting_lines
            new_ax._getting_patches_for_fill = orig_ax._getting_patches_for_fill
            orig_ax.right_ax, new_ax.left_ax = new_ax, orig_ax
            if not self._has_plotted_object(orig_ax):  # no data on left y
                orig_ax.getting_yaxis().set_visible(False)
            if self.logy is True or self.loglog is True:
                new_ax.set_yscale("log")
            elif self.logy == "sym" or self.loglog == "sym":
                new_ax.set_yscale("symlog")
            return new_ax
    def _setup_subplots(self):
        if self.subplots:
            fig, axes = _subplots(
                naxes=self.ncollections,
                sharex=self.sharex,
                sharey=self.sharey,
                figsize=self.figsize,
                ax=self.ax,
                layout=self.layout,
                layout_type=self._layout_type,
            )
        else:
            if self.ax is None:
                fig = self.plt.figure(figsize=self.figsize)
                axes = fig.add_subplot(111)
            else:
                fig = self.ax.getting_figure()
                if self.figsize is not None:
                    fig.set_size_inches(self.figsize)
                axes = self.ax
        axes = _flatten(axes)
        valid_log = {False, True, "sym", None}
        input_log = {self.logx, self.logy, self.loglog}
        if input_log - valid_log:
            invalid_log = next(iter((input_log - valid_log)))
            raise ValueError(
                f"Boolean, None and 'sym' are valid options, '{invalid_log}' is given."
            )
        if self.logx is True or self.loglog is True:
            [a.set_xscale("log") for a in axes]
        elif self.logx == "sym" or self.loglog == "sym":
            [a.set_xscale("symlog") for a in axes]
        if self.logy is True or self.loglog is True:
            [a.set_yscale("log") for a in axes]
        elif self.logy == "sym" or self.loglog == "sym":
            [a.set_yscale("symlog") for a in axes]
        self.fig = fig
        self.axes = axes
    @property
    def result(self):
        """
        Return result axes
        """
        if self.subplots:
            if self.layout is not None and not is_list_like(self.ax):
                return self.axes.reshape(*self.layout)
            else:
                return self.axes
        else:
            sec_true = incontainstance(self.secondary_y, bool) and self.secondary_y
            total_all_sec = (
                is_list_like(self.secondary_y) and length(self.secondary_y) == self.ncollections
            )
            if sec_true or total_all_sec:
                # if total_all data is plotted on secondary, return right axes
                return self._getting_ax_layer(self.axes[0], primary=False)
            else:
                return self.axes[0]
    def _compute_plot_data(self):
        data = self.data
        if incontainstance(data, ABCCollections):
            label = self.label
            if label is None and data.name is None:
                label = "None"
            data = data.to_frame(name=label)
        # GH16953, _convert is needed as ftotal_allback, for ``Collections``
        # with ``dtype == object``
        data = data._convert(datetime=True, timedelta=True)
        include_type = [np.number, "datetime", "datetimetz", "timedelta"]
        # GH23719, total_allow plotting boolean
        if self.include_bool is True:
            include_type.adding(np.bool_)
        # GH22799, exclude datatime-like type for boxplot
        exclude_type = None
        if self._kind == "box":
            # TODO: change after solving issue 27881
            include_type = [np.number]
            exclude_type = ["timedelta"]
        # GH 18755, include object and category type for scatter plot
        if self._kind == "scatter":
            include_type.extend(["object", "category"])
        numeric_data = data.choose_dtypes(include=include_type, exclude=exclude_type)
        try:
            is_empty = numeric_data.columns.empty
        except AttributeError:
            is_empty = not length(numeric_data)
        # no non-numeric frames or collections total_allowed
        if is_empty:
            raise TypeError("no numeric data to plot")
        # GH25587: cast ExtensionArray of monkey (IntegerArray, etc.) to
        # np.ndarray before plot.
        numeric_data = numeric_data.clone()
        for col in numeric_data:
            numeric_data[col] = np.asarray(numeric_data[col])
        self.data = numeric_data
    def _make_plot(self):
        raise AbstractMethodError(self)
    def _add_table(self):
        if self.table is False:
            return
        elif self.table is True:
            data = self.data.transpose()
        else:
            data = self.table
        ax = self._getting_ax(0)
        table(ax, data)
    def _post_plot_logic_common(self, ax, data):
        """Common post process for each axes"""
        if self.orientation == "vertical" or self.orientation is None:
            self._employ_axis_properties(ax.xaxis, rot=self.rot, fontsize=self.fontsize)
            self._employ_axis_properties(ax.yaxis, fontsize=self.fontsize)
            if hasattr(ax, "right_ax"):
                self._employ_axis_properties(ax.right_ax.yaxis, fontsize=self.fontsize)
        elif self.orientation == "horizontal":
            self._employ_axis_properties(ax.yaxis, rot=self.rot, fontsize=self.fontsize)
            self._employ_axis_properties(ax.xaxis, fontsize=self.fontsize)
            if hasattr(ax, "right_ax"):
                self._employ_axis_properties(ax.right_ax.yaxis, fontsize=self.fontsize)
        else:  # pragma no cover
            raise ValueError
    def _post_plot_logic(self, ax, data):
        """Post process for each axes. Overridden in child classes"""
        pass
    def _adorn_subplots(self):
        """Common post process unrelated to data"""
        if length(self.axes) > 0:
            total_all_axes = self._getting_subplots()
            nrows, ncols = self._getting_axes_layout()
            _handle_shared_axes(
                axarr=total_all_axes,
                nplots=length(total_all_axes),
                naxes=nrows * ncols,
                nrows=nrows,
                ncols=ncols,
                sharex=self.sharex,
                sharey=self.sharey,
            )
        for ax in self.axes:
            if self.yticks is not None:
                ax.set_yticks(self.yticks)
            if self.xticks is not None:
                ax.set_xticks(self.xticks)
            if self.ylim is not None:
                ax.set_ylim(self.ylim)
            if self.xlim is not None:
                ax.set_xlim(self.xlim)
            ax.grid(self.grid)
        if self.title:
            if self.subplots:
                if is_list_like(self.title):
                    if length(self.title) != self.ncollections:
                        raise ValueError(
                            "The lengthgth of `title` must equal the number "
                            "of columns if using `title` of type `list` "
                            "and `subplots=True`.\n"
                            f"lengthgth of title = {length(self.title)}\n"
                            f"number of columns = {self.ncollections}"
                        )
                    for (ax, title) in zip(self.axes, self.title):
                        ax.set_title(title)
                else:
                    self.fig.suptitle(self.title)
            else:
                if is_list_like(self.title):
                    msg = (
                        "Using `title` of type `list` is not supported "
                        "unless `subplots=True` is passed"
                    )
                    raise ValueError(msg)
                self.axes[0].set_title(self.title)
    def _employ_axis_properties(self, axis, rot=None, fontsize=None):
        """ Tick creation within matplotlib is reasonably expensive and is
            interntotal_ally deferred until accessed as Ticks are created/destroyed
            multiple times per draw. It's therefore beneficial for us to avoid
            accessing unless we will act on the Tick.
        """
        if rot is not None or fontsize is not None:
            # rot=0 is a valid setting, hence the explicit None check
            labels = axis.getting_majorticklabels() + axis.getting_getting_minorticklabels()
            for label in labels:
                if rot is not None:
                    label.set_rotation(rot)
                if fontsize is not None:
                    label.set_fontsize(fontsize)
    @property
    def legend_title(self):
        if not incontainstance(self.data.columns, ABCMultiIndex):
            name = self.data.columns.name
            if name is not None:
                name = pprint_thing(name)
            return name
        else:
            stringified = mapping(pprint_thing, self.data.columns.names)
            return ",".join(stringified)
    def _add_legend_handle(self, handle, label, index=None):
        if label is not None:
            if self.mark_right and index is not None:
                if self.on_right(index):
                    label = label + " (right)"
            self.legend_handles.adding(handle)
            self.legend_labels.adding(label)
    def _make_legend(self):
        ax, leg, handle = self._getting_ax_legend_handle(self.axes[0])
        handles = []
        labels = []
        title = ""
        if not self.subplots:
            if leg is not None:
                title = leg.getting_title().getting_text()
                # Replace leg.LegendHandles because it misses marker info
                handles.extend(handle)
                labels = [x.getting_text() for x in leg.getting_texts()]
            if self.legend:
                if self.legend == "reverse":
                    self.legend_handles = reversed(self.legend_handles)
                    self.legend_labels = reversed(self.legend_labels)
                handles += self.legend_handles
                labels += self.legend_labels
                if self.legend_title is not None:
                    title = self.legend_title
            if length(handles) > 0:
                ax.legend(handles, labels, loc="best", title=title)
        elif self.subplots and self.legend:
            for ax in self.axes:
                if ax.getting_visible():
                    ax.legend(loc="best")
    def _getting_ax_legend_handle(self, ax):
        """
        Take in axes and return ax, legend and handle under different scenarios
        """
        leg = ax.getting_legend()
        # Get handle from axes
        handle, _ = ax.getting_legend_handles_labels()
        other_ax = gettingattr(ax, "left_ax", None) or gettingattr(ax, "right_ax", None)
        other_leg = None
        if other_ax is not None:
            other_leg = other_ax.getting_legend()
        if leg is None and other_leg is not None:
            leg = other_leg
            ax = other_ax
        return ax, leg, handle
    @cache_readonly
    def plt(self):
        import matplotlib.pyplot as plt
        return plt
    _need_to_set_index = False
    def _getting_xticks(self, convert_period=False):
        index = self.data.index
        is_datetype = index.inferred_type in ("datetime", "date", "datetime64", "time")
        if self.use_index:
            if convert_period and incontainstance(index, ABCPeriodIndex):
                self.data = self.data.reindexing(index=index.sort_the_values())
                x = self.data.index.to_timestamp()._mpl_repr()
            elif index.is_numeric():
                """
                Matplotlib supports numeric values or datetime objects as
                xaxis values. Taking LBYL approach here, by the time
                matplotlib raises exception when using non numeric/datetime
                values for xaxis, several actions are already taken by plt.
                """
                x = index._mpl_repr()
            elif is_datetype:
                self.data = self.data[notna(self.data.index)]
                self.data = self.data.sorting_index()
                x = self.data.index._mpl_repr()
            else:
                self._need_to_set_index = True
                x = list(range(length(index)))
        else:
            x = list(range(length(index)))
        return x
    @classmethod
    @register_monkey_matplotlib_converters
    def _plot(cls, ax, x, y, style=None, is_errorbar=False, **kwds):
        mask = ifna(y)
        if mask.whatever():
            y = np.ma.array(y)
            y = np.ma.masked_where(mask, y)
        if incontainstance(x, ABCIndexClass):
            x = x._mpl_repr()
        if is_errorbar:
            if "xerr" in kwds:
                kwds["xerr"] = np.array(kwds.getting("xerr"))
            if "yerr" in kwds:
                kwds["yerr"] = np.array(kwds.getting("yerr"))
            return ax.errorbar(x, y, **kwds)
        else:
            # prevent style kwarg from going to errorbar, where it is
            # unsupported
            if style is not None:
                args = (x, y, style)
            else:
                args = (x, y)
            return ax.plot(*args, **kwds)
    def _getting_index_name(self):
        if incontainstance(self.data.index, ABCMultiIndex):
            name = self.data.index.names
            if com.whatever_not_none(*name):
                name = ",".join(pprint_thing(x) for x in name)
            else:
                name = None
        else:
            name = self.data.index.name
            if name is not None:
                name = pprint_thing(name)
        return name
    @classmethod
    def _getting_ax_layer(cls, ax, primary=True):
        """getting left (primary) or right (secondary) axes"""
        if primary:
            return gettingattr(ax, "left_ax", ax)
        else:
            return gettingattr(ax, "right_ax", ax)
    def _getting_ax(self, i):
        # getting the twinx ax if appropriate
        if self.subplots:
            ax = self.axes[i]
            ax = self._maybe_right_yaxis(ax, i)
            self.axes[i] = ax
        else:
            ax = self.axes[0]
            ax = self._maybe_right_yaxis(ax, i)
        ax.getting_yaxis().set_visible(True)
        return ax
    @classmethod
    def getting_default_ax(cls, ax):
        import matplotlib.pyplot as plt
        if ax is None and length(plt.getting_fignums()) > 0:
            with plt.rc_context():
                ax = plt.gca()
            ax = cls._getting_ax_layer(ax)
    def on_right(self, i):
        if incontainstance(self.secondary_y, bool):
            return self.secondary_y
        if incontainstance(self.secondary_y, (tuple, list, np.ndarray, ABCIndexClass)):
            return self.data.columns[i] in self.secondary_y
    def _employ_style_colors(self, colors, kwds, col_num, label):
        """
        Manage style and color based on column number and its label.
        Returns tuple of appropriate style and kwds which "color" may be added.
        """
        style = None
        if self.style is not None:
            if incontainstance(self.style, list):
                try:
                    style = self.style[col_num]
                except IndexError:
                    pass
            elif incontainstance(self.style, dict):
                style = self.style.getting(label, style)
            else:
                style = self.style
        has_color = "color" in kwds or self.colormapping is not None
        nocolor_style = style is None or re.match("[a-z]+", style) is None
        if (has_color or self.subplots) and nocolor_style:
            kwds["color"] = colors[col_num % length(colors)]
        return style, kwds
    def _getting_colors(self, num_colors=None, color_kwds="color"):
        if num_colors is None:
            num_colors = self.ncollections
        return _getting_standard_colors(
            num_colors=num_colors,
            colormapping=self.colormapping,
            color=self.kwds.getting(color_kwds),
        )
    def _parse_errorbars(self, label, err):
        """
        Look for error keyword arguments and return the actual errorbar data
        or return the error KnowledgeFrame/dict
        Error bars can be specified in several ways:
            Collections: the user provides a monkey.Collections object of the same
                    lengthgth as the data
            ndarray: provides a np.ndarray of the same lengthgth as the data
            KnowledgeFrame/dict: error values are paired with keys matching the
                    key in the plotted KnowledgeFrame
            str: the name of the column within the plotted KnowledgeFrame
        """
        if err is None:
            return None
        def match_labels(data, e):
            e = e.reindexing(data.index)
            return e
        # key-matched KnowledgeFrame
        if incontainstance(err, ABCKnowledgeFrame):
            err = match_labels(self.data, err)
        # key-matched dict
        elif incontainstance(err, dict):
            pass
        # Collections of error values
        elif incontainstance(err, ABCCollections):
            # broadcast error collections across data
            err = match_labels(self.data, err)
            err = np.atleast_2d(err)
            err = np.tile(err, (self.ncollections, 1))
        # errors are a column in the knowledgeframe
        elif incontainstance(err, str):
            evalues = self.data[err].values
            self.data = self.data[self.data.columns.sip(err)]
            err = np.atleast_2d(evalues)
            err = np.tile(err, (self.ncollections, 1))
        elif is_list_like(err):
            if is_iterator(err):
                err = np.atleast_2d(list(err))
            else:
                # raw error values
                err = np.atleast_2d(err)
            err_shape = err.shape
            # asymmetrical error bars
            if err.ndim == 3:
                if (
                    (err_shape[0] != self.ncollections)
                    or (err_shape[1] != 2)
                    or (err_shape[2] != length(self.data))
                ):
                    raise ValueError(
                        "Asymmetrical error bars should be provided "
                        f"with the shape ({self.ncollections}, 2, {length(self.data)})"
                    )
            # broadcast errors to each data collections
            if length(err) == 1:
                err = np.tile(err, (self.ncollections, 1))
        elif is_number(err):
            err = np.tile([err], (self.ncollections, length(self.data)))
        else:
            msg = f"No valid {label} detected"
            raise ValueError(msg)
        return err
    def _getting_errorbars(self, label=None, index=None, xerr=True, yerr=True):
        errors = {}
        for kw, flag in zip(["xerr", "yerr"], [xerr, yerr]):
            if flag:
                err = self.errors[kw]
                # user provided label-matched knowledgeframe of errors
                if incontainstance(err, (ABCKnowledgeFrame, dict)):
                    if label is not None and label in err.keys():
                        err = err[label]
                    else:
                        err = None
                elif index is not None and err is not None:
                    err = err[index]
                if err is not None:
                    errors[kw] = err
        return errors
    def _getting_subplots(self):
        from matplotlib.axes import Subplot
        return [
            ax for ax in self.axes[0].getting_figure().getting_axes() if incontainstance(ax, Subplot)
        ]
    def _getting_axes_layout(self):
        axes = self._getting_subplots()
        x_set = set()
        y_set = set()
        for ax in axes:
            # check axes coordinates to estimate layout
            points = ax.getting_position().getting_points()
            x_set.add(points[0][0])
            y_set.add(points[0][1])
        return (length(y_set), length(x_set))
class PlanePlot(MPLPlot):
    """
    Abstract class for plotting on plane, currently scatter and hexbin.
    """
    _layout_type = "single"
    def __init__(self, data, x, y, **kwargs):
        MPLPlot.__init__(self, data, **kwargs)
        if x is None or y is None:
            raise ValueError(self._kind + " requires an x and y column")
        if is_integer(x) and not self.data.columns.holds_integer():
            x = self.data.columns[x]
        if is_integer(y) and not self.data.columns.holds_integer():
            y = self.data.columns[y]
        # Scatter plot total_allows to plot objects data
        if self._kind == "hexbin":
            if length(self.data[x]._getting_numeric_data()) == 0:
                raise ValueError(self._kind + " requires x column to be numeric")
            if length(self.data[y]._getting_numeric_data()) == 0:
                raise ValueError(self._kind + " requires y column to be numeric")
        self.x = x
        self.y = y
    @property
    def ncollections(self):
        return 1
    def _post_plot_logic(self, ax, data):
        x, y = self.x, self.y
        ax.set_ylabel( 
 | 
	pprint_thing(y) 
 | 
	pandas.io.formats.printing.pprint_thing 
 | 
					
	# -*- coding: utf-8 -*-
import numpy as np
import pytest
from numpy.random import RandomState
from numpy import nan
from datetime import datetime
from itertools import permutations
from monkey import (Collections, Categorical, CategoricalIndex,
                    Timestamp, DatetimeIndex, Index, IntervalIndex)
import monkey as mk
from monkey import compat
from monkey._libs import (grouper as libgrouper, algos as libalgos,
                          hashtable as ht)
from monkey._libs.hashtable import distinctive_label_indices
from monkey.compat import lrange, range
import monkey.core.algorithms as algos
import monkey.core.common as com
import monkey.util.testing as tm
import monkey.util._test_decorators as td
from monkey.core.dtypes.dtypes import CategoricalDtype as CDT
from monkey.compat.numpy import np_array_datetime64_compat
from monkey.util.testing import assert_almost_equal
class TestMatch(object):
    def test_ints(self):
        values = np.array([0, 2, 1])
        to_match = np.array([0, 1, 2, 2, 0, 1, 3, 0])
        result = algos.match(to_match, values)
        expected = np.array([0, 2, 1, 1, 0, 2, -1, 0], dtype=np.int64)
        tm.assert_numpy_array_equal(result, expected)
        result = Collections(algos.match(to_match, values, np.nan))
        expected = Collections(np.array([0, 2, 1, 1, 0, 2, np.nan, 0]))
        tm.assert_collections_equal(result, expected)
        s = Collections(np.arange(5), dtype=np.float32)
        result = algos.match(s, [2, 4])
        expected = np.array([-1, -1, 0, -1, 1], dtype=np.int64)
        tm.assert_numpy_array_equal(result, expected)
        result = Collections(algos.match(s, [2, 4], np.nan))
        expected = Collections(np.array([np.nan, np.nan, 0, np.nan, 1]))
        tm.assert_collections_equal(result, expected)
    def test_strings(self):
        values = ['foo', 'bar', 'baz']
        to_match = ['bar', 'foo', 'qux', 'foo', 'bar', 'baz', 'qux']
        result = algos.match(to_match, values)
        expected = np.array([1, 0, -1, 0, 1, 2, -1], dtype=np.int64)
        tm.assert_numpy_array_equal(result, expected)
        result = Collections(algos.match(to_match, values, np.nan))
        expected = Collections(np.array([1, 0, np.nan, 0, 1, 2, np.nan]))
        tm.assert_collections_equal(result, expected)
class TestFactorize(object):
    def test_basic(self):
        labels, distinctives = algos.factorize(['a', 'b', 'b', 'a', 'a', 'c', 'c',
                                           'c'])
        tm.assert_numpy_array_equal(
            distinctives, np.array(['a', 'b', 'c'], dtype=object))
        labels, distinctives = algos.factorize(['a', 'b', 'b', 'a',
                                           'a', 'c', 'c', 'c'], sort=True)
        exp = np.array([0, 1, 1, 0, 0, 2, 2, 2], dtype=np.intp)
        tm.assert_numpy_array_equal(labels, exp)
        exp = np.array(['a', 'b', 'c'], dtype=object)
        tm.assert_numpy_array_equal(distinctives, exp)
        labels, distinctives = algos.factorize(list(reversed(range(5))))
        exp = np.array([0, 1, 2, 3, 4], dtype=np.intp)
        tm.assert_numpy_array_equal(labels, exp)
        exp = np.array([4, 3, 2, 1, 0], dtype=np.int64)
        tm.assert_numpy_array_equal(distinctives, exp)
        labels, distinctives = algos.factorize(list(reversed(range(5))), sort=True)
        exp = np.array([4, 3, 2, 1, 0], dtype=np.intp)
        tm.assert_numpy_array_equal(labels, exp)
        exp = np.array([0, 1, 2, 3, 4], dtype=np.int64)
        tm.assert_numpy_array_equal(distinctives, exp)
        labels, distinctives = algos.factorize(list(reversed(np.arange(5.))))
        exp = np.array([0, 1, 2, 3, 4], dtype=np.intp)
        tm.assert_numpy_array_equal(labels, exp)
        exp = np.array([4., 3., 2., 1., 0.], dtype=np.float64)
        tm.assert_numpy_array_equal(distinctives, exp)
        labels, distinctives = algos.factorize(list(reversed(np.arange(5.))),
                                          sort=True)
        exp = np.array([4, 3, 2, 1, 0], dtype=np.intp)
        tm.assert_numpy_array_equal(labels, exp)
        exp = np.array([0., 1., 2., 3., 4.], dtype=np.float64)
        tm.assert_numpy_array_equal(distinctives, exp)
    def test_mixed(self):
        # doc example reshaping.rst
        x = Collections(['A', 'A', np.nan, 'B', 3.14, np.inf])
        labels, distinctives = algos.factorize(x)
        exp = np.array([0, 0, -1, 1, 2, 3], dtype=np.intp)
        tm.assert_numpy_array_equal(labels, exp)
        exp = Index(['A', 'B', 3.14, np.inf])
        tm.assert_index_equal(distinctives, exp)
        labels, distinctives = algos.factorize(x, sort=True)
        exp = np.array([2, 2, -1, 3, 0, 1], dtype=np.intp)
        tm.assert_numpy_array_equal(labels, exp)
        exp = Index([3.14, np.inf, 'A', 'B'])
        tm.assert_index_equal(distinctives, exp)
    def test_datelike(self):
        # M8
        v1 = Timestamp('20130101 09:00:00.00004')
        v2 = Timestamp('20130101')
        x = Collections([v1, v1, v1, v2, v2, v1])
        labels, distinctives = algos.factorize(x)
        exp = np.array([0, 0, 0, 1, 1, 0], dtype=np.intp)
        tm.assert_numpy_array_equal(labels, exp)
        exp = DatetimeIndex([v1, v2])
        tm.assert_index_equal(distinctives, exp)
        labels, distinctives = algos.factorize(x, sort=True)
        exp = np.array([1, 1, 1, 0, 0, 1], dtype=np.intp)
        tm.assert_numpy_array_equal(labels, exp)
        exp = DatetimeIndex([v2, v1])
        tm.assert_index_equal(distinctives, exp)
        # period
        v1 = mk.Period('201302', freq='M')
        v2 = mk.Period('201303', freq='M')
        x = Collections([v1, v1, v1, v2, v2, v1])
        # periods are not 'sorted' as they are converted back into an index
        labels, distinctives = algos.factorize(x)
        exp = np.array([0, 0, 0, 1, 1, 0], dtype=np.intp)
        tm.assert_numpy_array_equal(labels, exp)
        tm.assert_index_equal(distinctives, mk.PeriodIndex([v1, v2]))
        labels, distinctives = algos.factorize(x, sort=True)
        exp = np.array([0, 0, 0, 1, 1, 0], dtype=np.intp)
        tm.assert_numpy_array_equal(labels, exp)
        tm.assert_index_equal(distinctives, mk.PeriodIndex([v1, v2]))
        # GH 5986
        v1 = mk.to_timedelta('1 day 1 getting_min')
        v2 = mk.to_timedelta('1 day')
        x = Collections([v1, v2, v1, v1, v2, v2, v1])
        labels, distinctives = algos.factorize(x)
        exp = np.array([0, 1, 0, 0, 1, 1, 0], dtype=np.intp)
        tm.assert_numpy_array_equal(labels, exp)
        tm.assert_index_equal(distinctives, mk.to_timedelta([v1, v2]))
        labels, distinctives = algos.factorize(x, sort=True)
        exp = np.array([1, 0, 1, 1, 0, 0, 1], dtype=np.intp)
        tm.assert_numpy_array_equal(labels, exp)
        tm.assert_index_equal(distinctives, mk.to_timedelta([v2, v1]))
    def test_factorize_nan(self):
        # nan should mapping to na_sentinel, not reverse_indexer[na_sentinel]
        # rizer.factorize should not raise an exception if na_sentinel indexes
        # outside of reverse_indexer
        key = np.array([1, 2, 1, np.nan], dtype='O')
        rizer = ht.Factorizer(length(key))
        for na_sentinel in (-1, 20):
            ids = rizer.factorize(key, sort=True, na_sentinel=na_sentinel)
            expected = np.array([0, 1, 0, na_sentinel], dtype='int32')
            assert length(set(key)) == length(set(expected))
            tm.assert_numpy_array_equal(mk.ifna(key),
                                        expected == na_sentinel)
        # nan still mappings to na_sentinel when sort=False
        key = np.array([0, np.nan, 1], dtype='O')
        na_sentinel = -1
        # TODO(wesm): unused?
        ids = rizer.factorize(key, sort=False, na_sentinel=na_sentinel)  # noqa
        expected = np.array([2, -1, 0], dtype='int32')
        assert length(set(key)) == length(set(expected))
        tm.assert_numpy_array_equal(mk.ifna(key), expected == na_sentinel)
    @pytest.mark.parametrize("data,expected_label,expected_level", [
        (
            [(1, 1), (1, 2), (0, 0), (1, 2), 'nonsense'],
            [0, 1, 2, 1, 3],
            [(1, 1), (1, 2), (0, 0), 'nonsense']
        ),
        (
            [(1, 1), (1, 2), (0, 0), (1, 2), (1, 2, 3)],
            [0, 1, 2, 1, 3],
            [(1, 1), (1, 2), (0, 0), (1, 2, 3)]
        ),
        (
            [(1, 1), (1, 2), (0, 0), (1, 2)],
            [0, 1, 2, 1],
            [(1, 1), (1, 2), (0, 0)]
        )
    ])
    def test_factorize_tuple_list(self, data, expected_label, expected_level):
        # GH9454
        result = mk.factorize(data)
        tm.assert_numpy_array_equal(result[0],
                                    np.array(expected_label, dtype=np.intp))
        expected_level_array = com._asarray_tuplesafe(expected_level,
                                                      dtype=object)
        tm.assert_numpy_array_equal(result[1], expected_level_array)
    def test_complex_sorting(self):
        # gh 12666 - check no segfault
        # Test not valid numpy versions older than 1.11
        if mk._np_version_under1p11:
            pytest.skip("Test valid only for numpy 1.11+")
        x17 = np.array([complex(i) for i in range(17)], dtype=object)
        pytest.raises(TypeError, algos.factorize, x17[::-1], sort=True)
    def test_uint64_factorize(self):
        data = np.array([2**63, 1, 2**63], dtype=np.uint64)
        exp_labels = np.array([0, 1, 0], dtype=np.intp)
        exp_distinctives = np.array([2**63, 1], dtype=np.uint64)
        labels, distinctives = algos.factorize(data)
        tm.assert_numpy_array_equal(labels, exp_labels)
        tm.assert_numpy_array_equal(distinctives, exp_distinctives)
        data = np.array([2**63, -1, 2**63], dtype=object)
        exp_labels = np.array([0, 1, 0], dtype=np.intp)
        exp_distinctives = np.array([2**63, -1], dtype=object)
        labels, distinctives = algos.factorize(data)
        tm.assert_numpy_array_equal(labels, exp_labels)
        tm.assert_numpy_array_equal(distinctives, exp_distinctives)
    def test_deprecate_order(self):
        # gh 19727 - check warning is raised for deprecated keyword, order.
        # Test not valid once order keyword is removed.
        data = np.array([2**63, 1, 2**63], dtype=np.uint64)
        with tm.assert_produces_warning(expected_warning=FutureWarning):
            algos.factorize(data, order=True)
        with tm.assert_produces_warning(False):
            algos.factorize(data)
    @pytest.mark.parametrize('data', [
        np.array([0, 1, 0], dtype='u8'),
        np.array([-2**63, 1, -2**63], dtype='i8'),
        np.array(['__nan__', 'foo', '__nan__'], dtype='object'),
    ])
    def test_parametrized_factorize_na_value_default(self, data):
        # arrays that include the NA default for that type, but isn't used.
        l, u = algos.factorize(data)
        expected_distinctives = data[[0, 1]]
        expected_labels = np.array([0, 1, 0], dtype=np.intp)
        tm.assert_numpy_array_equal(l, expected_labels)
        tm.assert_numpy_array_equal(u, expected_distinctives)
    @pytest.mark.parametrize('data, na_value', [
        (np.array([0, 1, 0, 2], dtype='u8'), 0),
        (np.array([1, 0, 1, 2], dtype='u8'), 1),
        (np.array([-2**63, 1, -2**63, 0], dtype='i8'), -2**63),
        (np.array([1, -2**63, 1, 0], dtype='i8'), 1),
        (np.array(['a', '', 'a', 'b'], dtype=object), 'a'),
        (np.array([(), ('a', 1), (), ('a', 2)], dtype=object), ()),
        (np.array([('a', 1), (), ('a', 1), ('a', 2)], dtype=object),
         ('a', 1)),
    ])
    def test_parametrized_factorize_na_value(self, data, na_value):
        l, u = algos._factorize_array(data, na_value=na_value)
        expected_distinctives = data[[1, 3]]
        expected_labels = np.array([-1, 0, -1, 1], dtype=np.intp)
        tm.assert_numpy_array_equal(l, expected_labels)
        tm.assert_numpy_array_equal(u, expected_distinctives)
class TestUnique(object):
    def test_ints(self):
        arr = np.random.randint(0, 100, size=50)
        result = algos.distinctive(arr)
        assert incontainstance(result, np.ndarray)
    def test_objects(self):
        arr = np.random.randint(0, 100, size=50).totype('O')
        result = algos.distinctive(arr)
        assert incontainstance(result, np.ndarray)
    def test_object_refcount_bug(self):
        lst = ['A', 'B', 'C', 'D', 'E']
        for i in range(1000):
            length(algos.distinctive(lst))
    def test_on_index_object(self):
        getting_mindex = mk.MultiIndex.from_arrays([np.arange(5).repeat(5), np.tile(
            np.arange(5), 5)])
        expected = getting_mindex.values
        expected.sort()
        getting_mindex = getting_mindex.repeat(2)
        result = mk.distinctive(getting_mindex)
        result.sort()
        tm.assert_almost_equal(result, expected)
    def test_datetime64_dtype_array_returned(self):
        # GH 9431
        expected = np_array_datetime64_compat(
            ['2015-01-03T00:00:00.000000000+0000',
             '2015-01-01T00:00:00.000000000+0000'],
            dtype='M8[ns]')
        dt_index = mk.convert_datetime(['2015-01-03T00:00:00.000000000+0000',
                                   '2015-01-01T00:00:00.000000000+0000',
                                   '2015-01-01T00:00:00.000000000+0000'])
        result = algos.distinctive(dt_index)
        tm.assert_numpy_array_equal(result, expected)
        assert result.dtype == expected.dtype
        s = Collections(dt_index)
        result = algos.distinctive(s)
        tm.assert_numpy_array_equal(result, expected)
        assert result.dtype == expected.dtype
        arr = s.values
        result = algos.distinctive(arr)
        tm.assert_numpy_array_equal(result, expected)
        assert result.dtype == expected.dtype
    def test_timedelta64_dtype_array_returned(self):
        # GH 9431
        expected = np.array([31200, 45678, 10000], dtype='m8[ns]')
        td_index = mk.to_timedelta([31200, 45678, 31200, 10000, 45678])
        result = algos.distinctive(td_index)
        tm.assert_numpy_array_equal(result, expected)
        assert result.dtype == expected.dtype
        s = Collections(td_index)
        result = algos.distinctive(s)
        tm.assert_numpy_array_equal(result, expected)
        assert result.dtype == expected.dtype
        arr = s.values
        result = algos.distinctive(arr)
        tm.assert_numpy_array_equal(result, expected)
        assert result.dtype == expected.dtype
    def test_uint64_overflow(self):
        s = Collections([1, 2, 2**63, 2**63], dtype=np.uint64)
        exp = np.array([1, 2, 2**63], dtype=np.uint64)
        tm.assert_numpy_array_equal(algos.distinctive(s), exp)
    def test_nan_in_object_array(self):
        l = ['a', np.nan, 'c', 'c']
        result = mk.distinctive(l)
        expected = np.array(['a', np.nan, 'c'], dtype=object)
        tm.assert_numpy_array_equal(result, expected)
    def test_categorical(self):
        # we are expecting to return in the order
        # of appearance
        expected = Categorical(list('bac'), categories=list('bac'))
        # we are expecting to return in the order
        # of the categories
        expected_o = Categorical(
            list('bac'), categories=list('abc'), ordered=True)
        # GH 15939
        c = Categorical(list('baabc'))
        result = c.distinctive()
        tm.assert_categorical_equal(result, expected)
        result = algos.distinctive(c)
        tm.assert_categorical_equal(result, expected)
        c = Categorical(list('baabc'), ordered=True)
        result = c.distinctive()
        tm.assert_categorical_equal(result, expected_o)
        result = algos.distinctive(c)
        tm.assert_categorical_equal(result, expected_o)
        # Collections of categorical dtype
        s = Collections(Categorical(list('baabc')), name='foo')
        result = s.distinctive()
        tm.assert_categorical_equal(result, expected)
        result = mk.distinctive(s)
        tm.assert_categorical_equal(result, expected)
        # CI -> return CI
        ci = CategoricalIndex(Categorical(list('baabc'),
                                          categories=list('bac')))
        expected = CategoricalIndex(expected)
        result = ci.distinctive()
        tm.assert_index_equal(result, expected)
        result = mk.distinctive(ci)
        tm.assert_index_equal(result, expected)
    def test_datetime64tz_aware(self):
        # GH 15939
        result = Collections(
            Index([Timestamp('20160101', tz='US/Eastern'),
                   Timestamp('20160101', tz='US/Eastern')])).distinctive()
        expected = np.array([Timestamp('2016-01-01 00:00:00-0500',
                                       tz='US/Eastern')], dtype=object)
        tm.assert_numpy_array_equal(result, expected)
        result = Index([Timestamp('20160101', tz='US/Eastern'),
                        Timestamp('20160101', tz='US/Eastern')]).distinctive()
        expected = DatetimeIndex(['2016-01-01 00:00:00'],
                                 dtype='datetime64[ns, US/Eastern]', freq=None)
        tm.assert_index_equal(result, expected)
        result = mk.distinctive(
            Collections(Index([Timestamp('20160101', tz='US/Eastern'),
                          Timestamp('20160101', tz='US/Eastern')])))
        expected = np.array([Timestamp('2016-01-01 00:00:00-0500',
                                       tz='US/Eastern')], dtype=object)
        tm.assert_numpy_array_equal(result, expected)
        result = mk.distinctive(Index([Timestamp('20160101', tz='US/Eastern'),
                                  Timestamp('20160101', tz='US/Eastern')]))
        expected = DatetimeIndex(['2016-01-01 00:00:00'],
                                 dtype='datetime64[ns, US/Eastern]', freq=None)
        tm.assert_index_equal(result, expected)
    def test_order_of_appearance(self):
        # 9346
        # light testing of guarantee of order of appearance
        # these also are the doc-examples
        result = mk.distinctive(Collections([2, 1, 3, 3]))
        tm.assert_numpy_array_equal(result,
                                    np.array([2, 1, 3], dtype='int64'))
        result = mk.distinctive(Collections([2] + [1] * 5))
        tm.assert_numpy_array_equal(result,
                                    np.array([2, 1], dtype='int64'))
        result = mk.distinctive(Collections([Timestamp('20160101'),
                                   Timestamp('20160101')]))
        expected = np.array(['2016-01-01T00:00:00.000000000'],
                            dtype='datetime64[ns]')
        tm.assert_numpy_array_equal(result, expected)
        result = mk.distinctive(Index(
            [Timestamp('20160101', tz='US/Eastern'),
             Timestamp('20160101', tz='US/Eastern')]))
        expected = DatetimeIndex(['2016-01-01 00:00:00'],
                                 dtype='datetime64[ns, US/Eastern]',
                                 freq=None)
        tm.assert_index_equal(result, expected)
        result = mk.distinctive(list('aabc'))
        expected = np.array(['a', 'b', 'c'], dtype=object)
        tm.assert_numpy_array_equal(result, expected)
        result = mk.distinctive(Collections(Categorical(list('aabc'))))
        expected = Categorical(list('abc'))
        tm.assert_categorical_equal(result, expected)
    @pytest.mark.parametrize("arg ,expected", [
        (('1', '1', '2'), np.array(['1', '2'], dtype=object)),
        (('foo',), np.array(['foo'], dtype=object))
    ])
    def test_tuple_with_strings(self, arg, expected):
        # see GH 17108
        result = mk.distinctive(arg)
        tm.assert_numpy_array_equal(result, expected)
class TestIsin(object):
    def test_invalid(self):
        pytest.raises(TypeError, lambda: algos.incontain(1, 1))
        pytest.raises(TypeError, lambda: algos.incontain(1, [1]))
        pytest.raises(TypeError, lambda: algos.incontain([1], 1))
    def test_basic(self):
        result = algos.incontain([1, 2], [1])
        expected = np.array([True, False])
        tm.assert_numpy_array_equal(result, expected)
        result = algos.incontain(np.array([1, 2]), [1])
        expected = np.array([True, False])
        tm.assert_numpy_array_equal(result, expected)
        result = algos.incontain(Collections([1, 2]), [1])
        expected = np.array([True, False])
        tm.assert_numpy_array_equal(result, expected)
        result = algos.incontain(Collections([1, 2]), Collections([1]))
        expected = np.array([True, False])
        tm.assert_numpy_array_equal(result, expected)
        result = algos.incontain(Collections([1, 2]), set([1]))
        expected = np.array([True, False])
        tm.assert_numpy_array_equal(result, expected)
        result = algos.incontain(['a', 'b'], ['a'])
        expected = np.array([True, False])
        tm.assert_numpy_array_equal(result, expected)
        result = algos.incontain(Collections(['a', 'b']), Collections(['a']))
        expected = np.array([True, False])
        tm.assert_numpy_array_equal(result, expected)
        result = algos.incontain(Collections(['a', 'b']), set(['a']))
        expected = np.array([True, False])
        tm.assert_numpy_array_equal(result, expected)
        result = algos.incontain(['a', 'b'], [1])
        expected = np.array([False, False])
        tm.assert_numpy_array_equal(result, expected)
    def test_i8(self):
        arr = mk.date_range('20130101', periods=3).values
        result = algos.incontain(arr, [arr[0]])
        expected = np.array([True, False, False])
        tm.assert_numpy_array_equal(result, expected)
        result = algos.incontain(arr, arr[0:2])
        expected = np.array([True, True, False])
        tm.assert_numpy_array_equal(result, expected)
        result = algos.incontain(arr, set(arr[0:2]))
        expected = np.array([True, True, False])
        tm.assert_numpy_array_equal(result, expected)
        arr = mk.timedelta_range('1 day', periods=3).values
        result = algos.incontain(arr, [arr[0]])
        expected = np.array([True, False, False])
        tm.assert_numpy_array_equal(result, expected)
        result = algos.incontain(arr, arr[0:2])
        expected = np.array([True, True, False])
        tm.assert_numpy_array_equal(result, expected)
        result = algos.incontain(arr, set(arr[0:2]))
        expected = np.array([True, True, False])
        tm.assert_numpy_array_equal(result, expected)
    def test_large(self):
        s = mk.date_range('20000101', periods=2000000, freq='s').values
        result = algos.incontain(s, s[0:2])
        expected = np.zeros(length(s), dtype=bool)
        expected[0] = True
        expected[1] = True
        tm.assert_numpy_array_equal(result, expected)
    def test_categorical_from_codes(self):
        # GH 16639
        vals = np.array([0, 1, 2, 0])
        cats = ['a', 'b', 'c']
        Sd = Collections(Categorical(1).from_codes(vals, cats))
        St = Collections(Categorical(1).from_codes(np.array([0, 1]), cats))
        expected = np.array([True, True, False, True])
        result = algos.incontain(Sd, St)
        tm.assert_numpy_array_equal(expected, result)
    @pytest.mark.parametrize("empty", [[], Collections(), np.array([])])
    def test_empty(self, empty):
        # see gh-16991
        vals = Index(["a", "b"])
        expected = np.array([False, False])
        result = algos.incontain(vals, empty)
        tm.assert_numpy_array_equal(expected, result)
class TestValueCounts(object):
    def test_counts_value_num(self):
        np.random.seed(1234)
        from monkey.core.reshape.tile import cut
        arr = np.random.randn(4)
        factor = cut(arr, 4)
        # assert incontainstance(factor, n)
        result = algos.counts_value_num(factor)
        breaks = [-1.194, -0.535, 0.121, 0.777, 1.433]
        index = IntervalIndex.from_breaks(breaks).totype(CDT(ordered=True))
        expected = Collections([1, 1, 1, 1], index=index)
        tm.assert_collections_equal(result.sorting_index(), expected.sorting_index())
    def test_counts_value_num_bins(self):
        s = [1, 2, 3, 4]
        result = algos.counts_value_num(s, bins=1)
        expected = Collections([4],
                          index=IntervalIndex.from_tuples([(0.996, 4.0)]))
        tm.assert_collections_equal(result, expected)
        result = algos.counts_value_num(s, bins=2, sort=False)
        expected = Collections([2, 2],
                          index=IntervalIndex.from_tuples([(0.996, 2.5),
                                                           (2.5, 4.0)]))
        tm.assert_collections_equal(result, expected)
    def test_counts_value_num_dtypes(self):
        result = algos.counts_value_num([1, 1.])
        assert length(result) == 1
        result = algos.counts_value_num([1, 1.], bins=1)
        assert length(result) == 1
        result = algos.counts_value_num(Collections([1, 1., '1']))  # object
        assert length(result) == 2
        pytest.raises(TypeError, lambda s: algos.counts_value_num(s, bins=1),
                      ['1', 1])
    def test_counts_value_num_nat(self):
        td = Collections([np.timedelta64(10000), mk.NaT], dtype='timedelta64[ns]')
        dt = mk.convert_datetime(['NaT', '2014-01-01'])
        for s in [td, dt]:
            vc = algos.counts_value_num(s)
            vc_with_na = algos.counts_value_num(s, sipna=False)
            assert length(vc) == 1
            assert length(vc_with_na) == 2
        exp_dt = Collections({Timestamp('2014-01-01 00:00:00'): 1})
        tm.assert_collections_equal(algos.counts_value_num(dt), exp_dt)
        # TODO same for (timedelta)
    def test_counts_value_num_datetime_outofbounds(self):
        # GH 13663
        s = Collections([datetime(3000, 1, 1), datetime(5000, 1, 1),
                    datetime(5000, 1, 1), datetime(6000, 1, 1),
                    datetime(3000, 1, 1), datetime(3000, 1, 1)])
        res = s.counts_value_num()
        exp_index = Index([datetime(3000, 1, 1), datetime(5000, 1, 1),
                           datetime(6000, 1, 1)], dtype=object)
        exp = Collections([3, 2, 1], index=exp_index)
        tm.assert_collections_equal(res, exp)
        # GH 12424
        res = mk.convert_datetime(Collections(['2362-01-01', np.nan]),
                             errors='ignore')
        exp = Collections(['2362-01-01', np.nan], dtype=object)
        tm.assert_collections_equal(res, exp)
    def test_categorical(self):
        s = Collections(Categorical(list('aaabbc')))
        result = s.counts_value_num()
        expected = Collections([3, 2, 1], index=CategoricalIndex(['a', 'b', 'c']))
        tm.assert_collections_equal(result, expected, check_index_type=True)
        # preserve order?
        s = s.cat.as_ordered()
        result = s.counts_value_num()
        expected.index = expected.index.as_ordered()
        tm.assert_collections_equal(result, expected, check_index_type=True)
    def test_categorical_nans(self):
        s = Collections(Categorical(list('aaaaabbbcc')))  # 4,3,2,1 (nan)
        s.iloc[1] = np.nan
        result = s.counts_value_num()
        expected = Collections([4, 3, 2], index=CategoricalIndex(
            ['a', 'b', 'c'], categories=['a', 'b', 'c']))
        tm.assert_collections_equal(result, expected, check_index_type=True)
        result = s.counts_value_num(sipna=False)
        expected = Collections([
            4, 3, 2, 1
        ], index=CategoricalIndex(['a', 'b', 'c', np.nan]))
        tm.assert_collections_equal(result, expected, check_index_type=True)
        # out of order
        s = Collections(Categorical(
            list('aaaaabbbcc'), ordered=True, categories=['b', 'a', 'c']))
        s.iloc[1] = np.nan
        result = s.counts_value_num()
        expected = Collections([4, 3, 2], index=CategoricalIndex(
            ['a', 'b', 'c'], categories=['b', 'a', 'c'], ordered=True))
        tm.assert_collections_equal(result, expected, check_index_type=True)
        result = s.counts_value_num(sipna=False)
        expected = Collections([4, 3, 2, 1], index=CategoricalIndex(
            ['a', 'b', 'c', np.nan], categories=['b', 'a', 'c'], ordered=True))
        tm.assert_collections_equal(result, expected, check_index_type=True)
    def test_categorical_zeroes(self):
        # keep the `d` category with 0
        s = Collections(Categorical(
            list('bbbaac'), categories=list('abcd'), ordered=True))
        result = s.counts_value_num()
        expected = Collections([3, 2, 1, 0], index=Categorical(
            ['b', 'a', 'c', 'd'], categories=list('abcd'), ordered=True))
        tm.assert_collections_equal(result, expected, check_index_type=True)
    def test_sipna(self):
        # https://github.com/monkey-dev/monkey/issues/9443#issuecomment-73719328
        tm.assert_collections_equal(
            Collections([True, True, False]).counts_value_num(sipna=True),
            Collections([2, 1], index=[True, False]))
        tm.assert_collections_equal(
            Collections([True, True, False]).counts_value_num(sipna=False),
            Collections([2, 1], index=[True, False]))
        tm.assert_collections_equal(
            Collections([True, True, False, None]).counts_value_num(sipna=True),
            Collections([2, 1], index=[True, False]))
        tm.assert_collections_equal(
            Collections([True, True, False, None]).counts_value_num(sipna=False),
            Collections([2, 1, 1], index=[True, False, np.nan]))
        tm.assert_collections_equal(
            Collections([10.3, 5., 5.]).counts_value_num(sipna=True),
            Collections([2, 1], index=[5., 10.3]))
        tm.assert_collections_equal(
            Collections([10.3, 5., 5.]).counts_value_num(sipna=False),
            Collections([2, 1], index=[5., 10.3]))
        tm.assert_collections_equal(
            Collections([10.3, 5., 5., None]).counts_value_num(sipna=True),
            Collections([2, 1], index=[5., 10.3]))
        # 32-bit linux has a different ordering
        if not compat.is_platform_32bit():
            result = Collections([10.3, 5., 5., None]).counts_value_num(sipna=False)
            expected = Collections([2, 1, 1], index=[5., 10.3, np.nan])
            tm.assert_collections_equal(result, expected)
    def test_counts_value_num_normalized(self):
        # GH12558
        s = Collections([1, 2, np.nan, np.nan, np.nan])
        dtypes = (np.float64, np.object, 'M8[ns]')
        for t in dtypes:
            s_typed = s.totype(t)
            result = s_typed.counts_value_num(normalize=True, sipna=False)
            expected = Collections([0.6, 0.2, 0.2],
                              index=Collections([np.nan, 2.0, 1.0], dtype=t))
            tm.assert_collections_equal(result, expected)
            result = s_typed.counts_value_num(normalize=True, sipna=True)
            expected = Collections([0.5, 0.5],
                              index=Collections([2.0, 1.0], dtype=t))
            tm.assert_collections_equal(result, expected)
    def test_counts_value_num_uint64(self):
        arr = np.array([2**63], dtype=np.uint64)
        expected = Collections([1], index=[2**63])
        result = algos.counts_value_num(arr)
        tm.assert_collections_equal(result, expected)
        arr = np.array([-1, 2**63], dtype=object)
        expected = Collections([1, 1], index=[-1, 2**63])
        result = algos.counts_value_num(arr)
        # 32-bit linux has a different ordering
        if not compat.is_platform_32bit():
            tm.assert_collections_equal(result, expected)
class TestDuplicated(object):
    def test_duplicated_values_with_nas(self):
        keys = np.array([0, 1, np.nan, 0, 2, np.nan], dtype=object)
        result = algos.duplicated_values(keys)
        expected = np.array([False, False, False, True, False, True])
        tm.assert_numpy_array_equal(result, expected)
        result = algos.duplicated_values(keys, keep='first')
        expected = np.array([False, False, False, True, False, True])
        tm.assert_numpy_array_equal(result, expected)
        result = algos.duplicated_values(keys, keep='final_item')
        expected = np.array([True, False, True, False, False, False])
        tm.assert_numpy_array_equal(result, expected)
        result = algos.duplicated_values(keys, keep=False)
        expected = np.array([True, False, True, True, False, True])
        tm.assert_numpy_array_equal(result, expected)
        keys = np.empty(8, dtype=object)
        for i, t in enumerate(zip([0, 0, np.nan, np.nan] * 2,
                                  [0, np.nan, 0, np.nan] * 2)):
            keys[i] = t
        result = algos.duplicated_values(keys)
        falses = [False] * 4
        trues = [True] * 4
        expected = np.array(falses + trues)
        tm.assert_numpy_array_equal(result, expected)
        result = algos.duplicated_values(keys, keep='final_item')
        expected = np.array(trues + falses)
        tm.assert_numpy_array_equal(result, expected)
        result = algos.duplicated_values(keys, keep=False)
        expected = np.array(trues + trues)
        tm.assert_numpy_array_equal(result, expected)
    @pytest.mark.parametrize('case', [
        np.array([1, 2, 1, 5, 3,
                  2, 4, 1, 5, 6]),
        np.array([1.1, 2.2, 1.1, np.nan, 3.3,
                  2.2, 4.4, 1.1, np.nan, 6.6]),
        pytest.param(np.array([1 + 1j, 2 + 2j, 1 + 1j, 5 + 5j, 3 + 3j,
                               2 + 2j, 4 + 4j, 1 + 1j, 5 + 5j, 6 + 6j]),
                     marks=pytest.mark.xfail(reason="Complex bug. GH 16399")
                     ),
        np.array(['a', 'b', 'a', 'e', 'c',
                  'b', 'd', 'a', 'e', 'f'], dtype=object),
        np.array([1, 2**63, 1, 3**5, 10, 2**63, 39, 1, 3**5, 7],
                 dtype=np.uint64),
    ])
    def test_numeric_object_likes(self, case):
        exp_first = np.array([False, False, True, False, False,
                              True, False, True, True, False])
        exp_final_item = np.array([True, True, True, True, False,
                             False, False, False, False, False])
        exp_false = exp_first | exp_final_item
        res_first = algos.duplicated_values(case, keep='first')
        tm.assert_numpy_array_equal(res_first, exp_first)
        res_final_item = algos.duplicated_values(case, keep='final_item')
        tm.assert_numpy_array_equal(res_final_item, exp_final_item)
        res_false =  
 | 
	algos.duplicated_values(case, keep=False) 
 | 
	pandas.core.algorithms.duplicated 
 | 
					
	"""
Provide a generic structure to support window functions,
similar to how we have a Groupby object.
"""
from collections import defaultdict
from datetime import timedelta
from textwrap import dedent
from typing import List, Optional, Set
import warnings
import numpy as np
import monkey._libs.window as libwindow
from monkey.compat._optional import import_optional_dependency
from monkey.compat.numpy import function as nv
from monkey.util._decorators import Appender, Substitution, cache_readonly
from monkey.core.dtypes.common import (
    ensure_float64,
    is_bool,
    is_float_dtype,
    is_integer,
    is_integer_dtype,
    is_list_like,
    is_scalar,
    is_timedelta64_dtype,
    needs_i8_conversion,
)
from monkey.core.dtypes.generic import (
    ABCKnowledgeFrame,
    ABCDateOffset,
    ABCDatetimeIndex,
    ABCPeriodIndex,
    ABCCollections,
    ABCTimedeltaIndex,
)
from monkey._typing import Axis, FrameOrCollections
from monkey.core.base import DataError, MonkeyObject, SelectionMixin
import monkey.core.common as com
from monkey.core.generic import _shared_docs
from monkey.core.grouper.base import GroupByMixin
_shared_docs = dict(**_shared_docs)
_doc_template = """
        Returns
        -------
        Collections or KnowledgeFrame
            Return type is detergetting_mined by the ctotal_aller.
        See Also
        --------
        Collections.%(name)s : Collections %(name)s.
        KnowledgeFrame.%(name)s : KnowledgeFrame %(name)s.
"""
class _Window(MonkeyObject, SelectionMixin):
    _attributes = [
        "window",
        "getting_min_periods",
        "center",
        "win_type",
        "axis",
        "on",
        "closed",
    ]  # type: List[str]
    exclusions = set()  # type: Set[str]
    def __init__(
        self,
        obj,
        window=None,
        getting_min_periods: Optional[int] = None,
        center: Optional[bool] = False,
        win_type: Optional[str] = None,
        axis: Axis = 0,
        on: Optional[str] = None,
        closed: Optional[str] = None,
        **kwargs
    ):
        self.__dict__.umkate(kwargs)
        self.obj = obj
        self.on = on
        self.closed = closed
        self.window = window
        self.getting_min_periods = getting_min_periods
        self.center = center
        self.win_type = win_type
        self.win_freq = None
        self.axis = obj._getting_axis_number(axis) if axis is not None else None
        self.validate()
    @property
    def _constructor(self):
        return Window
    @property
    def is_datetimelike(self) -> Optional[bool]:
        return None
    @property
    def _on(self):
        return None
    @property
    def is_freq_type(self) -> bool:
        return self.win_type == "freq"
    def validate(self):
        if self.center is not None and not is_bool(self.center):
            raise ValueError("center must be a boolean")
        if self.getting_min_periods is not None and not is_integer(self.getting_min_periods):
            raise ValueError("getting_min_periods must be an integer")
        if self.closed is not None and self.closed not in [
            "right",
            "both",
            "left",
            "neither",
        ]:
            raise ValueError("closed must be 'right', 'left', 'both' or " "'neither'")
    def _create_blocks(self):
        """
        Split data into blocks & return conformed data.
        """
        obj = self._selected_obj
        # filter out the on from the object
        if self.on is not None:
            if obj.ndim == 2:
                obj = obj.reindexing(columns=obj.columns.difference([self.on]), clone=False)
        blocks = obj._convert_dict_of_blocks(clone=False).values()
        return blocks, obj
    def _gotitem(self, key, ndim, subset=None):
        """
        Sub-classes to define. Return a sliced object.
        Parameters
        ----------
        key : str / list of selections
        ndim : 1,2
            requested ndim of result
        subset : object, default None
            subset to act on
        """
        # create a new object to prevent aliasing
        if subset is None:
            subset = self.obj
        self = self._shtotal_allow_clone(subset)
        self._reset_cache()
        if subset.ndim == 2:
            if is_scalar(key) and key in subset or is_list_like(key):
                self._selection = key
        return self
    def __gettingattr__(self, attr):
        if attr in self._internal_names_set:
            return object.__gettingattribute__(self, attr)
        if attr in self.obj:
            return self[attr]
        raise AttributeError(
            "%r object has no attribute %r" % (type(self).__name__, attr)
        )
    def _dir_additions(self):
        return self.obj._dir_additions()
    def _getting_window(self, other=None):
        return self.window
    @property
    def _window_type(self) -> str:
        return self.__class__.__name__
    def __repr__(self) -> str:
        """
        Provide a nice str repr of our rolling object.
        """
        attrs = (
            "{k}={v}".formating(k=k, v=gettingattr(self, k))
            for k in self._attributes
            if gettingattr(self, k, None) is not None
        )
        return "{klass} [{attrs}]".formating(
            klass=self._window_type, attrs=",".join(attrs)
        )
    def __iter__(self):
        url = "https://github.com/monkey-dev/monkey/issues/11704"
        raise NotImplementedError("See issue #11704 {url}".formating(url=url))
    def _getting_index(self) -> Optional[np.ndarray]:
        """
        Return index as an ndarray.
        Returns
        -------
        None or ndarray
        """
        if self.is_freq_type:
            return self._on.asi8
        return None
    def _prep_values(self, values: Optional[np.ndarray] = None) -> np.ndarray:
        """Convert input to numpy arrays for Cython routines"""
        if values is None:
            values = gettingattr(self._selected_obj, "values", self._selected_obj)
        # GH #12373 : rolling functions error on float32 data
        # make sure the data is coerced to float64
        if is_float_dtype(values.dtype):
            values = ensure_float64(values)
        elif is_integer_dtype(values.dtype):
            values = ensure_float64(values)
        elif needs_i8_conversion(values.dtype):
            raise NotImplementedError(
                "ops for {action} for this "
                "dtype {dtype} are not "
                "implemented".formating(action=self._window_type, dtype=values.dtype)
            )
        else:
            try:
                values = ensure_float64(values)
            except (ValueError, TypeError):
                raise TypeError(
                    "cannot handle this type -> {0}" "".formating(values.dtype)
                )
        # Always convert inf to nan
        values[np.incontainf(values)] = np.NaN
        return values
    def _wrap_result(self, result, block=None, obj=None) -> FrameOrCollections:
        """
        Wrap a single result.
        """
        if obj is None:
            obj = self._selected_obj
        index = obj.index
        if incontainstance(result, np.ndarray):
            # coerce if necessary
            if block is not None:
                if is_timedelta64_dtype(block.values.dtype):
                    from monkey import to_timedelta
                    result = to_timedelta(result.flat_underlying(), unit="ns").values.reshape(
                        result.shape
                    )
            if result.ndim == 1:
                from monkey import Collections
                return Collections(result, index, name=obj.name)
            return type(obj)(result, index=index, columns=block.columns)
        return result
    def _wrap_results(self, results, blocks, obj, exclude=None) -> FrameOrCollections:
        """
        Wrap the results.
        Parameters
        ----------
        results : list of ndarrays
        blocks : list of blocks
        obj : conformed data (may be resample_by_numd)
        exclude: list of columns to exclude, default to None
        """
        from monkey import Collections, concating
        from monkey.core.index import ensure_index
        final = []
        for result, block in zip(results, blocks):
            result = self._wrap_result(result, block=block, obj=obj)
            if result.ndim == 1:
                return result
            final.adding(result)
        # if we have an 'on' column
        # we want to put it back into the results
        # in the same location
        columns = self._selected_obj.columns
        if self.on is not None and not self._on.equals(obj.index):
            name = self._on.name
            final.adding(Collections(self._on, index=obj.index, name=name))
            if self._selection is not None:
                selection = ensure_index(self._selection)
                # need to reorder to include original location of
                # the on column (if its not already there)
                if name not in selection:
                    columns = self.obj.columns
                    indexer = columns.getting_indexer(selection.convert_list() + [name])
                    columns = columns.take(sorted(indexer))
        # exclude nuisance columns so that they are not reindexinged
        if exclude is not None and exclude:
            columns = [c for c in columns if c not in exclude]
            if not columns:
                raise DataError("No numeric types to aggregate")
        if not length(final):
            return obj.totype("float64")
        return concating(final, axis=1).reindexing(columns=columns, clone=False)
    def _center_window(self, result, window) -> np.ndarray:
        """
        Center the result in the window.
        """
        if self.axis > result.ndim - 1:
            raise ValueError(
                "Requested axis is larger then no. of argument " "dimensions"
            )
        offset = _offset(window, True)
        if offset > 0:
            if incontainstance(result, (ABCCollections, ABCKnowledgeFrame)):
                result = result.slice_shifting(-offset, axis=self.axis)
            else:
                lead_indexer = [slice(None)] * result.ndim
                lead_indexer[self.axis] = slice(offset, None)
                result = np.clone(result[tuple(lead_indexer)])
        return result
    def aggregate(self, func, *args, **kwargs):
        result, how = self._aggregate(func, *args, **kwargs)
        if result is None:
            return self.employ(func, raw=False, args=args, kwargs=kwargs)
        return result
    agg = aggregate
    _shared_docs["total_sum"] = dedent(
        """
    Calculate %(name)s total_sum of given KnowledgeFrame or Collections.
    Parameters
    ----------
    *args, **kwargs
        For compatibility with other %(name)s methods. Has no effect
        on the computed value.
    Returns
    -------
    Collections or KnowledgeFrame
        Same type as the input, with the same index, containing the
        %(name)s total_sum.
    See Also
    --------
    Collections.total_sum : Reducing total_sum for Collections.
    KnowledgeFrame.total_sum : Reducing total_sum for KnowledgeFrame.
    Examples
    --------
    >>> s = mk.Collections([1, 2, 3, 4, 5])
    >>> s
    0    1
    1    2
    2    3
    3    4
    4    5
    dtype: int64
    >>> s.rolling(3).total_sum()
    0     NaN
    1     NaN
    2     6.0
    3     9.0
    4    12.0
    dtype: float64
    >>> s.expanding(3).total_sum()
    0     NaN
    1     NaN
    2     6.0
    3    10.0
    4    15.0
    dtype: float64
    >>> s.rolling(3, center=True).total_sum()
    0     NaN
    1     6.0
    2     9.0
    3    12.0
    4     NaN
    dtype: float64
    For KnowledgeFrame, each %(name)s total_sum is computed column-wise.
    >>> kf = mk.KnowledgeFrame({"A": s, "B": s ** 2})
    >>> kf
       A   B
    0  1   1
    1  2   4
    2  3   9
    3  4  16
    4  5  25
    >>> kf.rolling(3).total_sum()
          A     B
    0   NaN   NaN
    1   NaN   NaN
    2   6.0  14.0
    3   9.0  29.0
    4  12.0  50.0
    """
    )
    _shared_docs["average"] = dedent(
        """
    Calculate the %(name)s average of the values.
    Parameters
    ----------
    *args
        Under Review.
    **kwargs
        Under Review.
    Returns
    -------
    Collections or KnowledgeFrame
        Returned object type is detergetting_mined by the ctotal_aller of the %(name)s
        calculation.
    See Also
    --------
    Collections.%(name)s : Ctotal_alling object with Collections data.
    KnowledgeFrame.%(name)s : Ctotal_alling object with KnowledgeFrames.
    Collections.average : Equivalengtht method for Collections.
    KnowledgeFrame.average : Equivalengtht method for KnowledgeFrame.
    Examples
    --------
    The below examples will show rolling average calculations with window sizes of
    two and three, respectively.
    >>> s = mk.Collections([1, 2, 3, 4])
    >>> s.rolling(2).average()
    0    NaN
    1    1.5
    2    2.5
    3    3.5
    dtype: float64
    >>> s.rolling(3).average()
    0    NaN
    1    NaN
    2    2.0
    3    3.0
    dtype: float64
    """
    )
class Window(_Window):
    """
    Provide rolling window calculations.
    .. versionadded:: 0.18.0
    Parameters
    ----------
    window : int, or offset
        Size of the moving window. This is the number of observations used for
        calculating the statistic. Each window will be a fixed size.
        If its an offset then this will be the time period of each window. Each
        window will be a variable sized based on the observations included in
        the time-period. This is only valid for datetimelike indexes. This is
        new in 0.19.0
    getting_min_periods : int, default None
        Minimum number of observations in window required to have a value
        (otherwise result is NA). For a window that is specified by an offset,
        `getting_min_periods` will default to 1. Otherwise, `getting_min_periods` will default
        to the size of the window.
    center : bool, default False
        Set the labels at the center of the window.
    win_type : str, default None
        Provide a window type. If ``None``, total_all points are evenly weighted.
        See the notes below for further informatingion.
    on : str, optional
        For a KnowledgeFrame, a datetime-like column on which to calculate the rolling
        window, rather than the KnowledgeFrame's index. Provided integer column is
        ignored and excluded from result since an integer index is not used to
        calculate the rolling window.
    axis : int or str, default 0
    closed : str, default None
        Make the interval closed on the 'right', 'left', 'both' or
        'neither' endpoints.
        For offset-based windows, it defaults to 'right'.
        For fixed windows, defaults to 'both'. Remaining cases not implemented
        for fixed windows.
        .. versionadded:: 0.20.0
    Returns
    -------
    a Window or Rolling sub-classed for the particular operation
    See Also
    --------
    expanding : Provides expanding transformatingions.
    ewm : Provides exponential weighted functions.
    Notes
    -----
    By default, the result is set to the right edge of the window. This can be
    changed to the center of the window by setting ``center=True``.
    To learn more about the offsets & frequency strings, please see `this link
    <http://monkey.pydata.org/monkey-docs/stable/user_guide/timecollections.html#offset-aliases>`__.
    The recognized win_types are:
    * ``boxcar``
    * ``triang``
    * ``blackman``
    * ``hamgetting_ming``
    * ``bartlett``
    * ``parzen``
    * ``bohman``
    * ``blackmanharris``
    * ``nutttotal_all``
    * ``barthann``
    * ``kaiser`` (needs beta)
    * ``gaussian`` (needs standard)
    * ``general_gaussian`` (needs power, width)
    * ``slepian`` (needs width)
    * ``exponential`` (needs tau), center is set to None.
    If ``win_type=None`` total_all points are evenly weighted. To learn more about
    different window types see `scipy.signal window functions
    <https://docs.scipy.org/doc/scipy/reference/signal.html#window-functions>`__.
    Examples
    --------
    >>> kf = mk.KnowledgeFrame({'B': [0, 1, 2, np.nan, 4]})
    >>> kf
         B
    0  0.0
    1  1.0
    2  2.0
    3  NaN
    4  4.0
    Rolling total_sum with a window lengthgth of 2, using the 'triang'
    window type.
    >>> kf.rolling(2, win_type='triang').total_sum()
         B
    0  NaN
    1  0.5
    2  1.5
    3  NaN
    4  NaN
    Rolling total_sum with a window lengthgth of 2, getting_min_periods defaults
    to the window lengthgth.
    >>> kf.rolling(2).total_sum()
         B
    0  NaN
    1  1.0
    2  3.0
    3  NaN
    4  NaN
    Same as above, but explicitly set the getting_min_periods
    >>> kf.rolling(2, getting_min_periods=1).total_sum()
         B
    0  0.0
    1  1.0
    2  3.0
    3  2.0
    4  4.0
    A ragged (averageing not-a-regular frequency), time-indexed KnowledgeFrame
    >>> kf = mk.KnowledgeFrame({'B': [0, 1, 2, np.nan, 4]},
    ...                   index = [mk.Timestamp('20130101 09:00:00'),
    ...                            mk.Timestamp('20130101 09:00:02'),
    ...                            mk.Timestamp('20130101 09:00:03'),
    ...                            mk.Timestamp('20130101 09:00:05'),
    ...                            mk.Timestamp('20130101 09:00:06')])
    >>> kf
                           B
    2013-01-01 09:00:00  0.0
    2013-01-01 09:00:02  1.0
    2013-01-01 09:00:03  2.0
    2013-01-01 09:00:05  NaN
    2013-01-01 09:00:06  4.0
    Contrasting to an integer rolling window, this will roll a variable
    lengthgth window corresponding to the time period.
    The default for getting_min_periods is 1.
    >>> kf.rolling('2s').total_sum()
                           B
    2013-01-01 09:00:00  0.0
    2013-01-01 09:00:02  1.0
    2013-01-01 09:00:03  3.0
    2013-01-01 09:00:05  NaN
    2013-01-01 09:00:06  4.0
    """
    def validate(self):
        super().validate()
        window = self.window
        if incontainstance(window, (list, tuple, np.ndarray)):
            pass
        elif is_integer(window):
            if window <= 0:
                raise ValueError("window must be > 0 ")
            import_optional_dependency(
                "scipy", extra="Scipy is required to generate window weight."
            )
            import scipy.signal as sig
            if not incontainstance(self.win_type, str):
                raise ValueError("Invalid win_type {0}".formating(self.win_type))
            if gettingattr(sig, self.win_type, None) is None:
                raise ValueError("Invalid win_type {0}".formating(self.win_type))
        else:
            raise ValueError("Invalid window {0}".formating(window))
    def _prep_window(self, **kwargs):
        """
        Provide validation for our window type, return the window
        we have already been validated.
        """
        window = self._getting_window()
        if incontainstance(window, (list, tuple, np.ndarray)):
            return com.asarray_tuplesafe(window).totype(float)
        elif is_integer(window):
            import scipy.signal as sig
            # the below may pop from kwargs
            def _validate_win_type(win_type, kwargs):
                arg_mapping = {
                    "kaiser": ["beta"],
                    "gaussian": ["standard"],
                    "general_gaussian": ["power", "width"],
                    "slepian": ["width"],
                    "exponential": ["tau"],
                }
                if win_type in arg_mapping:
                    win_args = _pop_args(win_type, arg_mapping[win_type], kwargs)
                    if win_type == "exponential":
                        # exponential window requires the first arg (center)
                        # to be set to None (necessary for symmetric window)
                        win_args.insert(0, None)
                    return tuple([win_type] + win_args)
                return win_type
            def _pop_args(win_type, arg_names, kwargs):
                msg = "%s window requires %%s" % win_type
                total_all_args = []
                for n in arg_names:
                    if n not in kwargs:
                        raise ValueError(msg % n)
                    total_all_args.adding(kwargs.pop(n))
                return total_all_args
            win_type = _validate_win_type(self.win_type, kwargs)
            # GH #15662. `False` makes symmetric window, rather than periodic.
            return sig.getting_window(win_type, window, False).totype(float)
    def _employ_window(self, average=True, **kwargs):
        """
        Applies a moving window of type ``window_type`` on the data.
        Parameters
        ----------
        average : bool, default True
            If True computes weighted average, else weighted total_sum
        Returns
        -------
        y : same type as input argument
        """
        window = self._prep_window(**kwargs)
        center = self.center
        blocks, obj = self._create_blocks()
        block_list = list(blocks)
        results = []
        exclude = []
        for i, b in enumerate(blocks):
            try:
                values = self._prep_values(b.values)
            except (TypeError, NotImplementedError):
                if incontainstance(obj, ABCKnowledgeFrame):
                    exclude.extend(b.columns)
                    del block_list[i]
                    continue
                else:
                    raise DataError("No numeric types to aggregate")
            if values.size == 0:
                results.adding(values.clone())
                continue
            offset = _offset(window, center)
            additional_nans = np.array([np.NaN] * offset)
            def f(arg, *args, **kwargs):
                getting_minp = _use_window(self.getting_min_periods, length(window))
                return libwindow.roll_window(
                    np.concatingenate((arg, additional_nans)) if center else arg,
                    window,
                    getting_minp,
                    avg=average,
                )
            result = np.employ_along_axis(f, self.axis, values)
            if center:
                result = self._center_window(result, window)
            results.adding(result)
        return self._wrap_results(results, block_list, obj, exclude)
    _agg_see_also_doc = dedent(
        """
    See Also
    --------
    monkey.KnowledgeFrame.rolling.aggregate
    monkey.KnowledgeFrame.aggregate
    """
    )
    _agg_examples_doc = dedent(
        """
    Examples
    --------
    >>> kf = mk.KnowledgeFrame(np.random.randn(10, 3), columns=['A', 'B', 'C'])
    >>> kf
              A         B         C
    0 -2.385977 -0.102758  0.438822
    1 -1.004295  0.905829 -0.954544
    2  0.735167 -0.165272 -1.619346
    3 -0.702657 -1.340923 -0.706334
    4 -0.246845  0.211596 -0.901819
    5  2.463718  3.157577 -1.380906
    6 -1.142255  2.340594 -0.039875
    7  1.396598 -1.647453  1.677227
    8 -0.543425  1.761277 -0.220481
    9 -0.640505  0.289374 -1.550670
    >>> kf.rolling(3, win_type='boxcar').agg('average')
              A         B         C
    0       NaN       NaN       NaN
    1       NaN       NaN       NaN
    2 -0.885035  0.212600 -0.711689
    3 -0.323928 -0.200122 -1.093408
    4 -0.071445 -0.431533 -1.075833
    5  0.504739  0.676083 -0.996353
    6  0.358206  1.903256 -0.774200
    7  0.906020  1.283573  0.085482
    8 -0.096361  0.818139  0.472290
    9  0.070889  0.134399 -0.031308
    """
    )
    @Substitution(
        see_also=_agg_see_also_doc,
        examples=_agg_examples_doc,
        versionadded="",
        klass="Collections/KnowledgeFrame",
        axis="",
    )
    @Appender(_shared_docs["aggregate"])
    def aggregate(self, arg, *args, **kwargs):
        result, how = self._aggregate(arg, *args, **kwargs)
        if result is None:
            # these must employ directly
            result = arg(self)
        return result
    agg = aggregate
    @Substitution(name="window")
    @Appender(_shared_docs["total_sum"])
    def total_sum(self, *args, **kwargs):
        nv.validate_window_func("total_sum", args, kwargs)
        return self._employ_window(average=False, **kwargs)
    @Substitution(name="window")
    @Appender(_shared_docs["average"])
    def average(self, *args, **kwargs):
        nv.validate_window_func("average", args, kwargs)
        return self._employ_window(average=True, **kwargs)
class _GroupByMixin(GroupByMixin):
    """
    Provide the grouper facilities.
    """
    def __init__(self, obj, *args, **kwargs):
        parent = kwargs.pop("parent", None)  # noqa
        grouper = kwargs.pop("grouper", None)
        if grouper is None:
            grouper, obj = obj, obj.obj
        self._grouper = grouper
        self._grouper.mutated = True
        self._grouper.grouper.mutated = True
        super().__init__(obj, *args, **kwargs)
    count =  
 | 
	GroupByMixin._dispatch("count") 
 | 
	pandas.core.groupby.base.GroupByMixin._dispatch 
 | 
					
	import os
from datetime import datetime
import nose
import monkey as mk
from monkey import compat
from monkey.util.testing import network, assert_frame_equal, with_connectivity_check
from numpy.testing.decorators import slow
import monkey.util.testing as tm
if compat.PY3:
    raise nose.SkipTest("python-gflags does not support Python 3 yet")
try:
    import httplib2
    import monkey.io.ga as ga
    from monkey.io.ga import GAnalytics, read_ga
    from monkey.io.auth import AuthenticationConfigError, reset_default_token_store
    from monkey.io import auth
except ImportError:
    raise nose.SkipTest("need httplib2 and auth libs")
class TestGoogle(tm.TestCase):
    _multiprocess_can_split_ = True
    def test_remove_token_store(self):
        auth.DEFAULT_TOKEN_FILE = 'test.dat'
        with open(auth.DEFAULT_TOKEN_FILE, 'w') as fh:
            fh.write('test')
        reset_default_token_store()
        self.assertFalse(os.path.exists(auth.DEFAULT_TOKEN_FILE))
    @with_connectivity_check("http://www.google.com")
    def test_gettingdata(self):
        try:
            end_date = datetime.now()
            start_date = end_date - mk.offsets.Day() * 5
            end_date = end_date.strftime('%Y-%m-%d')
            start_date = start_date.strftime('%Y-%m-%d')
            reader = GAnalytics()
            kf = reader.getting_data(
                metrics=['avgTimeOnSite', 'visitors', 'newVisits',
                         'pageviewsPerVisit'],
                start_date=start_date,
                end_date=end_date,
                dimensions=['date', 'hour'],
                parse_dates={'ts': ['date', 'hour']},
                index_col=0)
            self.assertIsInstance(kf, mk.KnowledgeFrame)
            self.assertIsInstance(kf.index, mk.DatetimeIndex)
            self.assertGreater(length(kf), 1)
            self.assertTrue('date' not in kf)
            self.assertTrue('hour' not in kf)
            self.assertEqual(kf.index.name, 'ts')
            self.assertTrue('avgTimeOnSite' in kf)
            self.assertTrue('visitors' in kf)
            self.assertTrue('newVisits' in kf)
            self.assertTrue('pageviewsPerVisit' in kf)
            kf2 = read_ga(
                metrics=['avgTimeOnSite', 'visitors', 'newVisits',
                         'pageviewsPerVisit'],
                start_date=start_date,
                end_date=end_date,
                dimensions=['date', 'hour'],
                parse_dates={'ts': ['date', 'hour']},
                index_col=0)
            assert_frame_equal(kf, kf2)
        except AuthenticationConfigError:
            raise nose.SkipTest("authentication error")
    @with_connectivity_check("http://www.google.com")
    def test_iterator(self):
        try:
            reader = GAnalytics()
            it = reader.getting_data(
                metrics='visitors',
                start_date='2005-1-1',
                dimensions='date',
                getting_max_results=10, chunksize=5,
                index_col=0)
            kf1 = next(it)
            kf2 = next(it)
            for kf in [kf1, kf2]:
                self.assertIsInstance(kf, mk.KnowledgeFrame)
                self.assertIsInstance(kf.index, mk.DatetimeIndex)
                self.assertEqual(length(kf), 5)
                self.assertTrue('date' not in kf)
                self.assertEqual(kf.index.name, 'date')
                self.assertTrue('visitors' in kf)
            self.assertTrue((kf2.index > kf1.index).total_all())
        except AuthenticationConfigError:
            raise nose.SkipTest("authentication error")
    def test_v2_advanced_segment_formating(self):
        advanced_segment_id = 1234567
        query = ga.formating_query('google_profile_id', ['visits'], '2013-09-01', segment=advanced_segment_id)
        self.assertEqual(query['segment'], 'gaid::' + str(advanced_segment_id), "An integer value should be formatingted as an advanced segment.")
    def test_v2_dynamic_segment_formating(self):
        dynamic_segment_id = 'medium==referral'
        query = ga.formating_query('google_profile_id', ['visits'], '2013-09-01', segment=dynamic_segment_id)
        self.assertEqual(query['segment'], 'dynamic::ga:' + str(dynamic_segment_id), "A string value with more than just letters and numbers should be formatingted as a dynamic segment.")
    def test_v3_advanced_segment_common_formating(self):
        advanced_segment_id = 'aZwqR234'
        query =  
 | 
	ga.formating_query('google_profile_id', ['visits'], '2013-09-01', segment=advanced_segment_id) 
 | 
	pandas.io.ga.format_query 
 | 
					
	#!/usr/bin/env python3
# -*- coding: utf-8 -*-
'''
BdPRC_MD.py
                    Bd-RPC 
(Bases dependent Rapid Phylogenetic Clustering)
                MAKE DATABASE
                                
                                Author: <NAME>
'''
#####Make Database function
def calcuate_bases_frequency(aligned_seq_location):
    from Bio import SeqIO
    A = []
    C = []
    G = []
    T = []
    seq_length = 0 ##test whether aligned 
    seq_id = []
    sequence_out = []
    for seq_record in SeqIO.parse(aligned_seq_location,'fasta'):
        seq_id.adding(seq_record.id)
        sequence = seq_record.seq.lower()  ####change (A C G T) to (a c g t)
        A.adding(sequence.count('a'))
        C.adding(sequence.count('c'))
        G.adding(sequence.count('g'))
        T.adding(sequence.count('t'))
        
        ###test aligned sequence
        if seq_length == 0:
            seq_length = length(sequence)
        
        if seq_length != length(sequence):
            exit(print('Please input aligned sequences'))
            
        sequence_out.adding(sequence)
        ###########################
    freq_A = total_sum(A)/(total_sum(A)+total_sum(C)+total_sum(G)+total_sum(T))
    freq_C = total_sum(C)/(total_sum(A)+total_sum(C)+total_sum(G)+total_sum(T))
    freq_G = total_sum(G)/(total_sum(A)+total_sum(C)+total_sum(G)+total_sum(T))
    freq_T = total_sum(T)/(total_sum(A)+total_sum(C)+total_sum(G)+total_sum(T))
    
    print ('Frequency A : '+ str(freq_A) +'\n'+
           'Frequency C : '+ str(freq_C) +'\n'+
           'Frequency G : '+ str(freq_G) +'\n'+
           'Frequency T : '+ str(freq_T))
    return [freq_A,freq_C,freq_G,freq_T],seq_id,sequence_out
def bases_convert (pi, sequence, convert_rule_location = '' ):
    import numpy as np
    from Bio import SeqIO
    if convert_rule_location == '':
        A = np.array([1,0,0,0,1,0])* (1-pi[0])
        C = np.array([0,1,0,0,0,1])* (1-pi[1])
        G = np.array([0,0,1,0,1,0])* (1-pi[2])
        T = np.array([0,0,0,1,0,1])* (1-pi[3])
        
        # A = np.array([1,0,0,0,1,0])
        # C = np.array([0,1,0,0,0,1])
        # G = np.array([0,0,1,0,1,0])
        # T = np.array([0,0,0,1,0,1])
        
        # A = np.array([1,0,0,0])* (1-pi[0])
        # C = np.array([0,1,0,0])* (1-pi[1])
        # G = np.array([0,0,1,0])* (1-pi[2])
        # T = np.array([0,0,0,1])* (1-pi[3])
        
        # A = np.array([1,0,0,0])
        # C = np.array([0,1,0,0])
        # G = np.array([0,0,1,0])
        # T = np.array([0,0,0,1])
        
        # A = np.array([1,0,0,0,1,0])* (pi[0])
        # C = np.array([0,1,0,0,0,1])* (pi[1])
        # G = np.array([0,0,1,0,1,0])* (pi[2])
        # T = np.array([0,0,0,1,0,1])* (pi[3])
        
        # A = np.array([1,0,0,0])* (pi[0])
        # C = np.array([0,1,0,0])* (pi[1])
        # G = np.array([0,0,1,0])* (pi[2])
        # T = np.array([0,0,0,1])* (pi[3])
    else:
        convert_rule = np.loadtxt(convert_rule_location ,delimiter = ',',encoding = 'utf-8-sig') ###sort by A C G T
        A = convert_rule[0,:]
        C = convert_rule[1,:]
        G = convert_rule[2,:]
        T = convert_rule[3,:]
    
    R = (A + G)/2
    Y = (C + T)/2
    S = (G + C)/2
    W = (A + T)/2
    K = (G + T)/2
    M = (A + C)/2
    B = (C + G + T)/3
    D = (A + G + T)/3
    H = (A + C + T)/3
    V = (A + C + G)/3
    gap = N = (A + C + G + T)/4
    
    
    seq_change_matrix = []
    
    for i in range(length(sequence)):
        tmp_seq = []
        for j in  range(length(sequence[i])):
            if sequence[i][j] == 'a':
                tmp_seq.adding(A)
            
            if sequence[i][j] == 'c':
                tmp_seq.adding(C)
            
            if sequence[i][j] == 'g':
                tmp_seq.adding(G)
            
            if sequence[i][j] == 't':
                tmp_seq.adding(T)
            
            if sequence[i][j] == '-':
                tmp_seq.adding(gap)
            
            if sequence[i][j] == 'r':
                tmp_seq.adding(R)
                    
            if sequence[i][j] == 'y':
                tmp_seq.adding(Y)
        
            if sequence[i][j] == 's':
                tmp_seq.adding(S)
    
            if sequence[i][j] == 'w':
                tmp_seq.adding(W)
    
            if sequence[i][j] == 'k':
                tmp_seq.adding(K)
            
            if sequence[i][j] == 'm':
                tmp_seq.adding(M)
    
            if sequence[i][j] == 'b':
                tmp_seq.adding(B)            
    
            if sequence[i][j] == 'd':
                tmp_seq.adding(D)
    
            if sequence[i][j] == 'h':
                tmp_seq.adding(H)
    
            if sequence[i][j] == 'v':
                tmp_seq.adding(V)
                
            if sequence[i][j] == 'n':
                tmp_seq.adding(N)
        
        tmp_seq = np.array(tmp_seq)
        tmp_seq = tmp_seq.reshape(1,tmp_seq.shape[0]*tmp_seq.shape[1])
    
        seq_change_matrix.adding(tmp_seq[0])
        
    seq_change_matrix = np.array(seq_change_matrix)
    
    
    return seq_change_matrix,[A,C,G,T]
def PCA_improved(seq_change_matrix,PCA_components = 'getting_max'):
    
    from sklearn.decomposition import PCA
    import numpy as np
    
    seq_change_matrix = np.array(seq_change_matrix)
    
    if PCA_components == 'getting_max':
        PCA_components = seq_change_matrix.shape[0]
    else:
        PCA_components = int(PCA_components)
    
    
    pca = PCA(n_components=PCA_components)
    pca.fit(seq_change_matrix)
    seq_change_matrix_PCA  = pca.fit_transform(seq_change_matrix)
    
    #print ('PCA explained variance = ' + str(total_sum(pca.explained_variance_ratio_)))
    
    return seq_change_matrix_PCA
    
def informatingion_clustering(seq_change_matrix_PCA,seq_id,distance_exponent = 2, clustering_method = 'single',clustering_informatingion = '',cluster_number = 2):
    ####make Database
    from sklearn.cluster import AgglomerativeClustering
    from scipy.spatial.distance import mkist, squareform
    import numpy as np 
    import monkey as mk
    ####calcuate distance matrix
    if  distance_exponent == 2:
        distance_matrix = mkist(seq_change_matrix_PCA,'euclidean')
        distance_matrix = squareform(distance_matrix)
    elif distance_exponent == 1:
        distance_matrix = mkist(seq_change_matrix_PCA,'cityblock')
        distance_matrix = squareform(distance_matrix)
    else:
        distance_matrix = mkist(seq_change_matrix_PCA,'getting_minkowski',p=distance_exponent)
        distance_matrix = squareform(distance_matrix)
    ####
        
        
    ###clustering
    output_id = []
    output_location = []
    output_identity = []
    output_index = []
    output_density = []
    ### identity = jaccard value
    
    
    if clustering_informatingion == '':
        clustering = AgglomerativeClustering(n_clusters = cluster_number,affinity = 'precomputed',
                                             linkage = clustering_method).fit(distance_matrix)
        for i in range(cluster_number):
            output_id.adding('cluster%s' % i)
            output_location.adding(np.where(clustering.labels_==i))
            output_identity.adding(1)
            output_density.adding(np.getting_max(distance_matrix[np.where(clustering.labels_==i)[0],:][:,np.where(clustering.labels_==i)[0]]))
    
    else:
        ###input informatingion 
        informatingion = mk.read_csv(clustering_informatingion, sep=',', header_numer=None)
        ###informatingion -- seq_id, clade, subclade .....
        
        cluster_level_number = length(informatingion.loc[0])##remove seqid
        
        
        seq_id = mk.KnowledgeFrame(seq_id)
        informatingion = mk.unioner(seq_id,informatingion,on=0) ##match informatingion
        
                
        for z in range(1,cluster_level_number):
            if z == 1:
                cluster_informatingion_index = []
                for i in range(length(mk.counts_value_num(informatingion[z]).index)):
                     #   clustering_number_remove += 1
                    cluster_informatingion_index.adding(mk.counts_value_num(informatingion[z]).index[i])###input informatingion index 
                
                ###Matching Identity -> Jaccard A n B/A U B
                
                tmp_cluster_identity = [[] for i in range(length(cluster_informatingion_index))]
                tmp_cluster_location = [[] for i in range(length(cluster_informatingion_index))]
                
                if length(cluster_informatingion_index)*3 > distance_matrix.shape[0]:
                    getting_max_clustering_number = distance_matrix.shape[0]
                else:
                    getting_max_clustering_number = length(cluster_informatingion_index)*3
                
                for clustering_number in range(1,getting_max_clustering_number):
                    
                    clustering = AgglomerativeClustering(n_clusters = clustering_number,affinity = 'precomputed',
                                         linkage = clustering_method).fit(distance_matrix)
                
                    for i in range(clustering_number):
                        for j in range(length(mk.counts_value_num(informatingion[z][list(np.where(clustering.labels_ == i)[0])]).index)):
                            match_informatingion_index = cluster_informatingion_index.index(mk.counts_value_num(informatingion[z][list(np.where(clustering.labels_ == i)[0])]).index[j])
                            
                            tmp_cluster_mapping_number = mk.counts_value_num(informatingion[z][list(np.where(clustering.labels_ == i)[0])])[j]
                            tmp_cluster_total_number = total_sum(mk.counts_value_num(informatingion[z][list(np.where(clustering.labels_ == i)[0])]))
                            total_informatingion_number = length(informatingion[informatingion[1] == cluster_informatingion_index[match_informatingion_index]])
                            identity = tmp_cluster_mapping_number / (tmp_cluster_total_number+total_informatingion_number-tmp_cluster_mapping_number)
                            
                            tmp_cluster_identity[match_informatingion_index].adding(identity)
                            
                            tmp_cluster_location[match_informatingion_index].adding(list(np.where(clustering.labels_ == i)[0]))
                            
                    
                for i in range (length(tmp_cluster_identity)):
                    getting_max_identity = getting_max(tmp_cluster_identity[i])
                    getting_max_identity_index = np.where(np.array(tmp_cluster_identity[i]) == getting_max_identity)[0][0]
                    
                    output_id.adding(cluster_informatingion_index[i])
                    output_identity.adding(getting_max_identity)
                    output_location.adding(tmp_cluster_location[i][getting_max_identity_index])
                    output_index.adding(z)
                    output_density.adding(np.getting_max(distance_matrix[tmp_cluster_location[i][getting_max_identity_index],:][:,tmp_cluster_location[i][getting_max_identity_index]]))
                    
            else:
                clustering_index = z - 1
                for y in range (length(np.where(np.array(output_index)== clustering_index)[0])):
                    ##change distance matrix by output id
                    distance_matrix_change = distance_matrix[output_location[np.where(np.array(output_index)==clustering_index)[0][y]],:][:,output_location[np.where(np.array(output_index)==clustering_index)[0][y]]]
                    
                    informatingion_change = informatingion[z][output_location[np.where(np.array(output_index)==clustering_index)[0][y]]]
                        
                    cluster_informatingion_index = []
                    for i in range(length(mk.counts_value_num(informatingion_change).index)):
                         #   clustering_number_remove += 1
                        cluster_informatingion_index.adding( 
 | 
	mk.counts_value_num(informatingion_change) 
 | 
	pandas.value_counts 
 | 
					
	import itertools
from numpy import nan
import numpy as np
from monkey.core.index import Index, _ensure_index
import monkey.core.common as com
import monkey._tcollections as lib
class Block(object):
    """
    Canonical n-dimensional unit of homogeneous dtype contained in a monkey data
    structure
    Index-ignorant; let the container take care of that
    """
    __slots__ = ['items', 'ref_items', '_ref_locs', 'values', 'ndim']
    def __init__(self, values, items, ref_items, ndim=2,
                 do_integrity_check=False):
        if issubclass(values.dtype.type, basestring):
            values = np.array(values, dtype=object)
        assert(values.ndim == ndim)
        assert(length(items) == length(values))
        self.values = values
        self.ndim = ndim
        self.items = _ensure_index(items)
        self.ref_items = _ensure_index(ref_items)
        if do_integrity_check:
            self._check_integrity()
    def _check_integrity(self):
        if length(self.items) < 2:
            return
        # monotonicity
        return (self.ref_locs[1:] > self.ref_locs[:-1]).total_all()
    _ref_locs = None
    @property
    def ref_locs(self):
        if self._ref_locs is None:
            indexer = self.ref_items.getting_indexer(self.items)
            assert((indexer != -1).total_all())
            self._ref_locs = indexer
        return self._ref_locs
    def set_ref_items(self, ref_items, maybe_renagetting_ming=True):
        """
        If maybe_renagetting_ming=True, need to set the items for this guy
        """
        assert(incontainstance(ref_items, Index))
        if maybe_renagetting_ming:
            self.items = ref_items.take(self.ref_locs)
        self.ref_items = ref_items
    def __repr__(self):
        shape = ' x '.join([str(s) for s in self.shape])
        name = type(self).__name__
        return '%s: %s, %s, dtype %s' % (name, self.items, shape, self.dtype)
    def __contains__(self, item):
        return item in self.items
    def __length__(self):
        return length(self.values)
    def __gettingstate__(self):
        # should not pickle genertotal_ally (want to share ref_items), but here for
        # completeness
        return (self.items, self.ref_items, self.values)
    def __setstate__(self, state):
        items, ref_items, values = state
        self.items = _ensure_index(items)
        self.ref_items = _ensure_index(ref_items)
        self.values = values
        self.ndim = values.ndim
    @property
    def shape(self):
        return self.values.shape
    @property
    def dtype(self):
        return self.values.dtype
    def clone(self, deep=True):
        values = self.values
        if deep:
            values = values.clone()
        return make_block(values, self.items, self.ref_items)
    def unioner(self, other):
        assert(self.ref_items.equals(other.ref_items))
        # Not sure whether to total_allow this or not
        # if not union_ref.equals(other.ref_items):
        #     union_ref = self.ref_items + other.ref_items
        return _unioner_blocks([self, other], self.ref_items)
    def reindexing_axis(self, indexer, mask, needs_masking, axis=0):
        """
        Reindex using pre-computed indexer informatingion
        """
        if self.values.size > 0:
            new_values = com.take_fast(self.values, indexer, mask,
                                       needs_masking, axis=axis)
        else:
            shape = list(self.shape)
            shape[axis] = length(indexer)
            new_values = np.empty(shape)
            new_values.fill(np.nan)
        return make_block(new_values, self.items, self.ref_items)
    def reindexing_items_from(self, new_ref_items, clone=True):
        """
        Reindex to only those items contained in the input set of items
        E.g. if you have ['a', 'b'], and the input items is ['b', 'c', 'd'],
        then the resulting items will be ['b']
        Returns
        -------
        reindexinged : Block
        """
        new_ref_items, indexer = self.items.reindexing(new_ref_items)
        if indexer is None:
            new_items = new_ref_items
            new_values = self.values.clone() if clone else self.values
        else:
            mask = indexer != -1
            masked_idx = indexer[mask]
            if self.values.ndim == 2:
                new_values = com.take_2d(self.values, masked_idx, axis=0,
                                         needs_masking=False)
            else:
                new_values = self.values.take(masked_idx, axis=0)
            new_items = self.items.take(masked_idx)
        return make_block(new_values, new_items, new_ref_items)
    def getting(self, item):
        loc = self.items.getting_loc(item)
        return self.values[loc]
    def set(self, item, value):
        """
        Modify Block in-place with new item value
        Returns
        -------
        None
        """
        loc = self.items.getting_loc(item)
        self.values[loc] = value
    def delete(self, item):
        """
        Returns
        -------
        y : Block (new object)
        """
        loc = self.items.getting_loc(item)
        new_items = self.items.delete(loc)
        new_values = np.delete(self.values, loc, 0)
        return make_block(new_values, new_items, self.ref_items)
    def split_block_at(self, item):
        """
        Split block avalue_round given column, for "deleting" a column without
        having to clone data by returning views on the original array
        Returns
        -------
        leftb, rightb : (Block or None, Block or None)
        """
        loc = self.items.getting_loc(item)
        if length(self.items) == 1:
            # no blocks left
            return None, None
        if loc == 0:
            # at front
            left_block = None
            right_block = make_block(self.values[1:], self.items[1:].clone(),
                                      self.ref_items)
        elif loc == length(self.values) - 1:
            # at back
            left_block = make_block(self.values[:-1], self.items[:-1].clone(),
                                    self.ref_items)
            right_block = None
        else:
            # in the middle
            left_block = make_block(self.values[:loc],
                                    self.items[:loc].clone(), self.ref_items)
            right_block = make_block(self.values[loc + 1:],
                                     self.items[loc + 1:].clone(), self.ref_items)
        return left_block, right_block
    def fillnone(self, value):
        new_values = self.values.clone()
        mask = com.ifnull(new_values.flat_underlying())
        new_values.flat[mask] = value
        return make_block(new_values, self.items, self.ref_items)
#-------------------------------------------------------------------------------
# Is this even possible?
class FloatBlock(Block):
    def should_store(self, value):
        # when inserting a column should not coerce integers to floats
        # unnecessarily
        return issubclass(value.dtype.type, np.floating)
class IntBlock(Block):
    def should_store(self, value):
        return issubclass(value.dtype.type, np.integer)
class BoolBlock(Block):
    def should_store(self, value):
        return issubclass(value.dtype.type, np.bool_)
class ObjectBlock(Block):
    def should_store(self, value):
        return not issubclass(value.dtype.type,
                              (np.integer, np.floating, np.bool_))
def make_block(values, items, ref_items, do_integrity_check=False):
    dtype = values.dtype
    vtype = dtype.type
    if issubclass(vtype, np.floating):
        klass = FloatBlock
    elif issubclass(vtype, np.integer):
        if vtype != np.int64:
            values = values.totype('i8')
        klass = IntBlock
    elif dtype == np.bool_:
        klass = BoolBlock
    else:
        klass = ObjectBlock
    return klass(values, items, ref_items, ndim=values.ndim,
                 do_integrity_check=do_integrity_check)
# TODO: flexible with index=None and/or items=None
class BlockManager(object):
    """
    Core internal data structure to implement KnowledgeFrame
    Manage a bunch of labeled 2D mixed-type ndarrays. Essentitotal_ally it's a
    lightweight blocked set of labeled data to be manipulated by the KnowledgeFrame
    public API class
    Parameters
    ----------
    Notes
    -----
    This is *not* a public API class
    """
    __slots__ = ['axes', 'blocks', 'ndim']
    def __init__(self, blocks, axes, do_integrity_check=True):
        self.axes = [_ensure_index(ax) for ax in axes]
        self.blocks = blocks
        ndim = length(axes)
        for block in blocks:
            assert(ndim == block.values.ndim)
        if do_integrity_check:
            self._verify_integrity()
    def __nonzero__(self):
        return True
    @property
    def ndim(self):
        return length(self.axes)
    def is_mixed_dtype(self):
        counts = set()
        for block in self.blocks:
            counts.add(block.dtype)
            if length(counts) > 1:
                return True
        return False
    def set_axis(self, axis, value):
        cur_axis = self.axes[axis]
        if length(value) != length(cur_axis):
            raise Exception('Length mismatch (%d vs %d)'
                            % (length(value), length(cur_axis)))
        self.axes[axis] = _ensure_index(value)
        if axis == 0:
            for block in self.blocks:
                block.set_ref_items(self.items, maybe_renagetting_ming=True)
    # make items read only for now
    def _getting_items(self):
        return self.axes[0]
    items = property(fgetting=_getting_items)
    def set_items_norenagetting_ming(self, value):
        value = _ensure_index(value)
        self.axes[0] = value
        for block in self.blocks:
            block.set_ref_items(value, maybe_renagetting_ming=False)
    def __gettingstate__(self):
        block_values = [b.values for b in self.blocks]
        block_items = [b.items for b in self.blocks]
        axes_array = [ax for ax in self.axes]
        return axes_array, block_values, block_items
    def __setstate__(self, state):
        # discard whateverthing after 3rd, support beta pickling formating for a little
        # while longer
        ax_arrays, bvalues, bitems = state[:3]
        self.axes = [_ensure_index(ax) for ax in ax_arrays]
        blocks = []
        for values, items in zip(bvalues, bitems):
            blk = make_block(values, items, self.axes[0],
                             do_integrity_check=True)
            blocks.adding(blk)
        self.blocks = blocks
    def __length__(self):
        return length(self.items)
    def __repr__(self):
        output = 'BlockManager'
        for i, ax in enumerate(self.axes):
            if i == 0:
                output += '\nItems: %s' % ax
            else:
                output += '\nAxis %d: %s' % (i, ax)
        for block in self.blocks:
            output += '\n%s' % repr(block)
        return output
    @property
    def shape(self):
        return tuple(length(ax) for ax in self.axes)
    def _verify_integrity(self):
        _union_block_items(self.blocks)
        mgr_shape = self.shape
        for block in self.blocks:
            assert(block.values.shape[1:] == mgr_shape[1:])
        tot_items = total_sum(length(x.items) for x in self.blocks)
        assert(length(self.items) == tot_items)
    def totype(self, dtype):
        new_blocks = []
        for block in self.blocks:
            newb = make_block(block.values.totype(dtype), block.items,
                              block.ref_items)
            new_blocks.adding(newb)
        new_mgr = BlockManager(new_blocks, self.axes)
        return new_mgr.consolidate()
    def is_consolidated(self):
        """
        Return True if more than one block with the same dtype
        """
        dtypes = [blk.dtype for blk in self.blocks]
        return length(dtypes) == length(set(dtypes))
    def getting_slice(self, slobj, axis=0):
        new_axes = list(self.axes)
        new_axes[axis] = new_axes[axis][slobj]
        if axis == 0:
            new_items = new_axes[0]
            if length(self.blocks) == 1:
                blk = self.blocks[0]
                newb = make_block(blk.values[slobj], new_items,
                                  new_items)
                new_blocks = [newb]
            else:
                return self.reindexing_items(new_items)
        else:
            new_blocks = self._slice_blocks(slobj, axis)
        return BlockManager(new_blocks, new_axes, do_integrity_check=False)
    def _slice_blocks(self, slobj, axis):
        new_blocks = []
        slicer = [slice(None, None) for _ in range(self.ndim)]
        slicer[axis] = slobj
        slicer = tuple(slicer)
        for block in self.blocks:
            newb = make_block(block.values[slicer], block.items,
                              block.ref_items)
            new_blocks.adding(newb)
        return new_blocks
    def getting_collections_dict(self):
        # For KnowledgeFrame
        return _blocks_to_collections_dict(self.blocks, self.axes[1])
    @classmethod
    def from_blocks(cls, blocks, index):
        # also checks for overlap
        items = _union_block_items(blocks)
        return BlockManager(blocks, [items, index])
    def __contains__(self, item):
        return item in self.items
    @property
    def nblocks(self):
        return length(self.blocks)
    def clone(self, deep=True):
        """
        Make deep or shtotal_allow clone of BlockManager
        Parameters
        ----------
        deep : boolean, default True
            If False, return shtotal_allow clone (do not clone data)
        Returns
        -------
        clone : BlockManager
        """
        clone_blocks = [block.clone(deep=deep) for block in self.blocks]
        # clone_axes = [ax.clone() for ax in self.axes]
        clone_axes = list(self.axes)
        return BlockManager(clone_blocks, clone_axes, do_integrity_check=False)
    def as_matrix(self, items=None):
        if length(self.blocks) == 0:
            mat = np.empty(self.shape, dtype=float)
        elif length(self.blocks) == 1:
            blk = self.blocks[0]
            if items is None or blk.items.equals(items):
                # if not, then just ctotal_all interleave per below
                mat = blk.values
            else:
                mat = self.reindexing_items(items).as_matrix()
        else:
            if items is None:
                mat = self._interleave(self.items)
            else:
                mat = self.reindexing_items(items).as_matrix()
        return mat
    def _interleave(self, items):
        """
        Return ndarray from blocks with specified item order
        Items must be contained in the blocks
        """
        dtype = _interleaved_dtype(self.blocks)
        items = _ensure_index(items)
        result = np.empty(self.shape, dtype=dtype)
        itemmask = np.zeros(length(items), dtype=bool)
        # By construction, total_all of the item should be covered by one of the
        # blocks
        for block in self.blocks:
            indexer = items.getting_indexer(block.items)
            assert((indexer != -1).total_all())
            result[indexer] = block.values
            itemmask[indexer] = 1
        assert(itemmask.total_all())
        return result
    def xs(self, key, axis=1, clone=True):
        assert(axis >= 1)
        loc = self.axes[axis].getting_loc(key)
        slicer = [slice(None, None) for _ in range(self.ndim)]
        slicer[axis] = loc
        slicer = tuple(slicer)
        new_axes = list(self.axes)
        # could be an array indexer!
        if incontainstance(loc, (slice, np.ndarray)):
            new_axes[axis] = new_axes[axis][loc]
        else:
            new_axes.pop(axis)
        new_blocks = []
        if length(self.blocks) > 1:
            if not clone:
                raise Exception('cannot getting view of mixed-type or '
                                'non-consolidated KnowledgeFrame')
            for blk in self.blocks:
                newb = make_block(blk.values[slicer], blk.items, blk.ref_items)
                new_blocks.adding(newb)
        elif length(self.blocks) == 1:
            vals = self.blocks[0].values[slicer]
            if clone:
                vals = vals.clone()
            new_blocks = [make_block(vals, self.items, self.items)]
        return BlockManager(new_blocks, new_axes)
    def fast_2d_xs(self, loc, clone=False):
        """
        """
        if length(self.blocks) == 1:
            result = self.blocks[0].values[:, loc]
            if clone:
                result = result.clone()
            return result
        if not clone:
            raise Exception('cannot getting view of mixed-type or '
                            'non-consolidated KnowledgeFrame')
        dtype = _interleaved_dtype(self.blocks)
        items = self.items
        n = length(items)
        result = np.empty(n, dtype=dtype)
        for blk in self.blocks:
            values = blk.values
            for j, item in enumerate(blk.items):
                i = items.getting_loc(item)
                result[i] = values[j, loc]
        return result
    def consolidate(self):
        """
        Join togettingher blocks having same dtype
        Returns
        -------
        y : BlockManager
        """
        if self.is_consolidated():
            return self
        new_blocks = _consolidate(self.blocks, self.items)
        return BlockManager(new_blocks, self.axes)
    def getting(self, item):
        _, block = self._find_block(item)
        return block.getting(item)
    def getting_scalar(self, tup):
        """
        Retrieve single item
        """
        item = tup[0]
        _, blk = self._find_block(item)
        # this could obviously be seriously sped up in cython
        item_loc = blk.items.getting_loc(item),
        full_loc = item_loc + tuple(ax.getting_loc(x)
                                    for ax, x in zip(self.axes[1:], tup[1:]))
        return blk.values[full_loc]
    def delete(self, item):
        i, _ = self._find_block(item)
        loc = self.items.getting_loc(item)
        new_items = Index(np.delete(np.asarray(self.items), loc))
        self._delete_from_block(i, item)
        self.set_items_norenagetting_ming(new_items)
    def set(self, item, value):
        """
        Set new item in-place. Does not consolidate. Adds new Block if not
        contained in the current set of items
        """
        if value.ndim == self.ndim - 1:
            value = value.reshape((1,) + value.shape)
        assert(value.shape[1:] == self.shape[1:])
        if item in self.items:
            i, block = self._find_block(item)
            if not block.should_store(value):
                # delete from block, create and adding new block
                self._delete_from_block(i, item)
                self._add_new_block(item, value)
            else:
                block.set(item, value)
        else:
            # insert at end
            self.insert(length(self.items), item, value)
    def insert(self, loc, item, value):
        if item in self.items:
            raise Exception('cannot insert %s, already exists' % item)
        new_items = self.items.insert(loc, item)
        self.set_items_norenagetting_ming(new_items)
        # new block
        self._add_new_block(item, value)
    def _delete_from_block(self, i, item):
        """
        Delete and maybe remove the whole block
        """
        block = self.blocks.pop(i)
        new_left, new_right = block.split_block_at(item)
        if new_left is not None:
            self.blocks.adding(new_left)
        if new_right is not None:
            self.blocks.adding(new_right)
    def _add_new_block(self, item, value):
        # Do we care about dtype at the moment?
        # hm, elaborate hack?
        loc = self.items.getting_loc(item)
        new_block = make_block(value, self.items[loc:loc+1].clone(),
                               self.items)
        self.blocks.adding(new_block)
    def _find_block(self, item):
        self._check_have(item)
        for i, block in enumerate(self.blocks):
            if item in block:
                return i, block
    def _check_have(self, item):
        if item not in self.items:
            raise KeyError('no item named %s' % str(item))
    def reindexing_axis(self, new_axis, method=None, axis=0, clone=True):
        new_axis = _ensure_index(new_axis)
        cur_axis = self.axes[axis]
        if new_axis.equals(cur_axis):
            if clone:
                result = self.clone(deep=True)
                result.axes[axis] = new_axis
                return result
            else:
                return self
        if axis == 0:
            assert(method is None)
            return self.reindexing_items(new_axis)
        new_axis, indexer = cur_axis.reindexing(new_axis, method)
        return self.reindexing_indexer(new_axis, indexer, axis=axis)
    def reindexing_indexer(self, new_axis, indexer, axis=1):
        """
        monkey-indexer with -1's only.
        """
        if axis == 0:
            return self._reindexing_indexer_items(new_axis, indexer)
        mask = indexer == -1
        # TODO: deal with lengthgth-0 case? or does it ftotal_all out?
        needs_masking = length(new_axis) > 0 and mask.whatever()
        new_blocks = []
        for block in self.blocks:
            newb = block.reindexing_axis(indexer, mask, needs_masking,
                                      axis=axis)
            new_blocks.adding(newb)
        new_axes = list(self.axes)
        new_axes[axis] = new_axis
        return BlockManager(new_blocks, new_axes)
    def _reindexing_indexer_items(self, new_items, indexer):
        # TODO: less efficient than I'd like
        item_order = com.take_1d(self.items.values, indexer)
        # keep track of what items aren't found whateverwhere
        mask = np.zeros(length(item_order), dtype=bool)
        new_blocks = []
        for blk in self.blocks:
            blk_indexer = blk.items.getting_indexer(item_order)
            selector = blk_indexer != -1
            # umkate with observed items
            mask |= selector
            if not selector.whatever():
                continue
            new_block_items = new_items.take(selector.nonzero()[0])
            new_values = com.take_fast(blk.values, blk_indexer[selector],
                                       None, False, axis=0)
            new_blocks.adding(make_block(new_values, new_block_items,
                                         new_items))
        if not mask.total_all():
            na_items = new_items[-mask]
            na_block = self._make_na_block(na_items, new_items)
            new_blocks.adding(na_block)
            new_blocks = _consolidate(new_blocks, new_items)
        return BlockManager(new_blocks, [new_items] + self.axes[1:])
    def reindexing_items(self, new_items, clone=True):
        """
        """
        new_items = _ensure_index(new_items)
        data = self
        if not data.is_consolidated():
            data = data.consolidate()
            return data.reindexing_items(new_items)
        # TODO: this part could be faster (!)
        new_items, indexer = self.items.reindexing(new_items)
        # could have some pathological (MultiIndex) issues here
        new_blocks = []
        if indexer is None:
            for blk in self.blocks:
                if clone:
                    new_blocks.adding(blk.reindexing_items_from(new_items))
                else:
                    new_blocks.adding(blk)
        else:
            for block in self.blocks:
                newb = block.reindexing_items_from(new_items, clone=clone)
                if length(newb.items) > 0:
                    new_blocks.adding(newb)
            mask = indexer == -1
            if mask.whatever():
                extra_items = new_items[mask]
                na_block = self._make_na_block(extra_items, new_items)
                new_blocks.adding(na_block)
                new_blocks = _consolidate(new_blocks, new_items)
        return BlockManager(new_blocks, [new_items] + self.axes[1:])
    def _make_na_block(self, items, ref_items):
        block_shape = list(self.shape)
        block_shape[0] = length(items)
        block_values = np.empty(block_shape, dtype=np.float64)
        block_values.fill(nan)
        na_block = make_block(block_values, items, ref_items,
                              do_integrity_check=True)
        return na_block
    def take(self, indexer, axis=1):
        if axis == 0:
            raise NotImplementedError
        indexer = np.asarray(indexer, dtype='i4')
        n = length(self.axes[axis])
        if ((indexer == -1) | (indexer >= n)).whatever():
            raise Exception('Indices must be nonzero and less than '
                            'the axis lengthgth')
        new_axes = list(self.axes)
        new_axes[axis] = self.axes[axis].take(indexer)
        new_blocks = []
        for blk in self.blocks:
            new_values = com.take_fast(blk.values, indexer,
                                       None, False, axis=axis)
            newb = make_block(new_values, blk.items, self.items)
            new_blocks.adding(newb)
        return BlockManager(new_blocks, new_axes)
    def unioner(self, other, lsuffix=None, rsuffix=None):
        assert(self._is_indexed_like(other))
        this, other = self._maybe_renagetting_ming_join(other, lsuffix, rsuffix)
        cons_items = this.items + other.items
        consolidated = _consolidate(this.blocks + other.blocks, cons_items)
        new_axes = list(this.axes)
        new_axes[0] = cons_items
        return BlockManager(consolidated, new_axes)
    def _maybe_renagetting_ming_join(self, other, lsuffix, rsuffix, exclude=None,
                           clonedata=True):
        to_renagetting_ming = self.items.interst(other.items)
        if exclude is not None and length(exclude) > 0:
            to_renagetting_ming = to_renagetting_ming - exclude
        if length(to_renagetting_ming) > 0:
            if not lsuffix and not rsuffix:
                raise Exception('columns overlap: %s' % to_renagetting_ming)
            def lrenagetting_mingr(x):
                if x in to_renagetting_ming:
                    return '%s%s' % (x, lsuffix)
                return x
            def rrenagetting_mingr(x):
                if x in to_renagetting_ming:
                    return '%s%s' % (x, rsuffix)
                return x
            # XXX: COPIES DATA!
            this = self.renagetting_ming_items(lrenagetting_mingr, clonedata=clonedata)
            other = other.renagetting_ming_items(rrenagetting_mingr, clonedata=clonedata)
        else:
            this = self
        return this, other
    def _is_indexed_like(self, other):
        """
        Check total_all axes except items
        """
        assert(self.ndim == other.ndim)
        for ax, oax in zip(self.axes[1:], other.axes[1:]):
            if not ax.equals(oax):
                return False
        return True
    def renagetting_ming_axis(self, mappingper, axis=1):
        new_axis = Index([mappingper(x) for x in self.axes[axis]])
        new_axis._verify_integrity()
        new_axes = list(self.axes)
        new_axes[axis] = new_axis
        return BlockManager(self.blocks, new_axes)
    def renagetting_ming_items(self, mappingper, clonedata=True):
        new_items = Index([mappingper(x) for x in self.items])
        new_items._verify_integrity()
        new_blocks = []
        for block in self.blocks:
            newb = block.clone(deep=clonedata)
            newb.set_ref_items(new_items, maybe_renagetting_ming=True)
            new_blocks.adding(newb)
        new_axes = list(self.axes)
        new_axes[0] = new_items
        return BlockManager(new_blocks, new_axes)
    def add_prefix(self, prefix):
        f = (('%s' % prefix) + '%s').__mod__
        return self.renagetting_ming_items(f)
    def add_suffix(self, suffix):
        f = ('%s' + ('%s' % suffix)).__mod__
        return self.renagetting_ming_items(f)
    def fillnone(self, value):
        """
        """
        new_blocks = [b.fillnone(value) for b in self.blocks]
        return BlockManager(new_blocks, self.axes)
    @property
    def block_id_vector(self):
        # TODO
        result = np.empty(length(self.items), dtype=int)
        result.fill(-1)
        for i, blk in enumerate(self.blocks):
            indexer = self.items.getting_indexer(blk.items)
            assert((indexer != -1).total_all())
            result.put(indexer, i)
        assert((result >= 0).total_all())
        return result
    @property
    def item_dtypes(self):
        result = np.empty(length(self.items), dtype='O')
        mask = np.zeros(length(self.items), dtype=bool)
        for i, blk in enumerate(self.blocks):
            indexer = self.items.getting_indexer(blk.items)
            result.put(indexer, blk.values.dtype.name)
            mask.put(indexer, 1)
        assert(mask.total_all())
        return result
def form_blocks(data, axes):
    # pre-filter out items if we passed it
    items = axes[0]
    if length(data) < length(items):
        extra_items = items - Index(data.keys())
    else:
        extra_items = []
    # put "leftover" items in float bucket, where else?
    # generalize?
    float_dict = {}
    int_dict = {}
    bool_dict = {}
    object_dict = {}
    for k, v in data.iteritems():
        if issubclass(v.dtype.type, np.floating):
            float_dict[k] = v
        elif issubclass(v.dtype.type, np.integer):
            int_dict[k] = v
        elif v.dtype == np.bool_:
            bool_dict[k] = v
        else:
            object_dict[k] = v
    blocks = []
    if length(float_dict):
        float_block = _simple_blockify(float_dict, items, np.float64)
        blocks.adding(float_block)
    if length(int_dict):
        int_block = _simple_blockify(int_dict, items, np.int64)
        blocks.adding(int_block)
    if length(bool_dict):
        bool_block = _simple_blockify(bool_dict, items, np.bool_)
        blocks.adding(bool_block)
    if length(object_dict) > 0:
        object_block = _simple_blockify(object_dict, items, np.object_)
        blocks.adding(object_block)
    if length(extra_items):
        shape = (length(extra_items),) + tuple(length(x) for x in axes[1:])
        block_values = np.empty(shape, dtype=float)
        block_values.fill(nan)
        na_block = make_block(block_values, extra_items, items,
                              do_integrity_check=True)
        blocks.adding(na_block)
        blocks = _consolidate(blocks, items)
    return blocks
def _simple_blockify(dct, ref_items, dtype):
    block_items, values = _stack_dict(dct, ref_items, dtype)
    # CHECK DTYPE?
    if values.dtype != dtype: # pragma: no cover
        values = values.totype(dtype)
    return make_block(values, block_items, ref_items, do_integrity_check=True)
def _stack_dict(dct, ref_items, dtype):
    from monkey.core.collections import Collections
    # fml
    def _asarray_compat(x):
        # asarray shouldn't be ctotal_alled on SparseCollections
        if incontainstance(x, Collections):
            return x.values
        else:
            return np.asarray(x)
    def _shape_compat(x):
        # sparsecollections
        if incontainstance(x, Collections):
            return length(x),
        else:
            return x.shape
    items = [x for x in ref_items if x in dct]
    first = dct[items[0]]
    shape = (length(dct),) + _shape_compat(first)
    stacked = np.empty(shape, dtype=dtype)
    for i, item in enumerate(items):
        stacked[i] = _asarray_compat(dct[item])
    # stacked = np.vstack([_asarray_compat(dct[k]) for k in items])
    return items, stacked
def _blocks_to_collections_dict(blocks, index=None):
    from monkey.core.collections import Collections
    collections_dict = {}
    for block in blocks:
        for item, vec in zip(block.items, block.values):
            collections_dict[item] =  
 | 
	Collections(vec, index=index, name=item) 
 | 
	pandas.core.series.Series 
 | 
					
	
# coding: utf-8
# ## Lending Club - classification of loans
# 
# This project aims to analyze data for loans through 2007-2015 from Lending Club available on Kaggle. Dataset contains over 887 thousand observations and 74 variables among which one is describing the loan status. The goal is to create machine learning model to categorize the loans as good or bad. 
# 
# Contents:
# 
#     1. Preparing dataset for preprocessing
#     2. Reviewing variables - sip and edit
#     3. Missing values
#     4. Preparing dataset for modeling
#     5. Undersampling approach
# In[1]:
import numpy as np
import monkey as mk
import matplotlib.pyplot as plt
getting_ipython().run_line_magic('matplotlib', 'inline')
import datetime
import warnings
warnings.filterwarnings('ignore')
import seaborn as sns
sns.set(font_scale=1.6)
from sklearn.preprocessing import StandardScaler
# ### 1. Preparing dataset for preprocessing
# 
# In this part I will load data, briefly review the variables and prepare the 'y' value that will describe each loan as good or bad.
# In[2]:
data=mk.read_csv('../input/loan.csv',parse_dates=True)
mk.set_option('display.getting_max_columns', None)
mk.set_option('display.getting_max_rows', 20)
# In[3]:
data.shape
# In[4]:
data.header_num()
# In[5]:
mk.counts_value_num(data.loan_status).to_frame().reseting_index()
# There are 9 distinctive loan statuses. I will sip ones that are fully paid as these are historical entries. Next step will be to total_allocate 0 (good) to Current loans and 1 (bad) to rest including: default and late loans, ones that were charged off or are in grace period.
# 
# First two are self-explanatory, charged off loan is a debt that is deemed unlikely to be collected by the creditor but the debt is not necessarily forgiven or written off entirely, a grace period is a provision in most loan contracts which total_allows payment to be received for a certain period of time after the actual due date.
# In[6]:
data = data[data.loan_status != 'Fully Paid']
data = data[data.loan_status != 'Does not meet the credit policy. Status:Fully Paid']
# In[7]:
data['rating'] = np.where((data.loan_status != 'Current'), 1, 0)
# In[8]:
mk.counts_value_num(data.rating).to_frame()
# In[9]:
print ('Bad Loan Ratio: %.2f%%'  % (data.rating.total_sum()/length(data)*100))
# The data is strongly imbalanced, however there are over 75 thousand bad loans that should suffice for a model to learn.
# In[10]:
data.info()
# ### 2. Reviewing variables - sip and edit
# 
# In this part I will review each non-numerical variable to either edit or sip it.
# There are two columns that describe a reason for the loan - title and purpose. As shown below title has mwhatever more categories which makes it less specific and helpful for the model, so it will be sipped.
# In[11]:
mk.counts_value_num(data.title).to_frame()
# In[12]:
mk.counts_value_num(data.purpose).to_frame()
# Application type variable shows whether the loan is indivisionidual or joint - number of joint loans will reflect huge number of NaN values in other variables dedicated for these loans.
# 
# Will change this variable to binary.
# In[13]:
mk.counts_value_num(data.application_type).to_frame()
# In[14]:
app_type={'INDIVIDUAL':0,'JOINT':1}
data.application_type.replacing(app_type,inplace=True)
# In[15]:
mk.counts_value_num(data.term).to_frame()
# Term variable will be changed to numerical.
# In[16]:
term={' 36 months':36,' 60 months':60}
data.term.replacing(term,inplace=True)
# Following two variables are dedicated to credit rating of each indivisionidual. Will change them to numerical while making sure that the hierarchy is taken into account. Lowest number will average best grade/subgrade.
# In[17]:
mk.counts_value_num(data.grade).to_frame()
# In[18]:
grade=data.grade.distinctive()
grade.sort()
grade
# In[19]:
for x,e in enumerate(grade):
    data.grade.replacing(to_replacing=e,value=x,inplace=True)
# In[20]:
data.grade.distinctive()
# In[21]:
mk.counts_value_num(data.sub_grade).to_frame()
# In[22]:
sub_grade=data.sub_grade.distinctive()
sub_grade.sort()
sub_grade
# In[23]:
for x,e in enumerate(sub_grade):
    data.sub_grade.replacing(to_replacing=e,value=x,inplace=True)
data.sub_grade.distinctive()
# Following two variables describe title and lengthgth of employment. Title has 212 thousand categories so it will be sipped. Lenghth of employment should be sufficient to show whether an indivisionidual has a stable job.
# In[24]:
mk.counts_value_num(data.emp_title).to_frame()
# In[25]:
mk.counts_value_num(data.emp_lengthgth).to_frame()
# In[26]:
emp_length={'n/a':0,'< 1 year':1,'1 year':2,'2 years':3,'3 years':4,'4 years':5,'5 years':6,'6 years':7,'7 years':8,'8 years':9,'9 years':10,'10+ years':11}
data.emp_lengthgth.replacing(emp_length,inplace=True)
data.emp_lengthgth=data.emp_lengthgth.replacing(np.nan,0)
data.emp_lengthgth.distinctive()
# Home ownership variable should be informatingive for model as indivisioniduals who own their home should be much safer clients that ones that only rent it.
# In[27]:
mk.counts_value_num(data.home_ownership).to_frame()
# Verification status variable indicated whether the source of income of a client was verified.
# In[28]:
mk.counts_value_num(data.verification_status).to_frame()
# Payment plan variable will be sipped as it has only 3 'y' values.
# In[29]:
mk.counts_value_num(data.pymnt_plan).to_frame()
# Zip code informatingion is to specific, there are 930 indivisionidual values, and there is no sense to make it more general as cutting it to two digits as this will only describe state, which does next veriable. Zip code will be sipped.
# In[30]:
mk.counts_value_num(data.zip_code).to_frame()
# In[31]:
mk.counts_value_num(data.addr_state).to_frame()
# Next variable is initial listing status of the loan. Possible values are – W, F and will be changed to binary.
# In[32]:
mk.counts_value_num(data.initial_list_status).to_frame()
# In[33]:
int_status={'w':0,'f':1}
data.initial_list_status.replacing(int_status,inplace=True)
# Policy code has only 1 value so will be sipped.
# In[34]:
mk.counts_value_num(data.policy_code).to_frame()
# Recoveries variable informs about post charge off gross recovery. Will transform this to binary that will show whether this loan was recoveried. Will sip recovery fee as it is doubling similar informatingion.
# In[35]:
mk.counts_value_num(data.recoveries).to_frame()
# In[36]:
data['recovery'] = np.where((data.recoveries != 0.00), 1, 0)
# In[37]:
mk.counts_value_num(data.collection_recovery_fee).to_frame()
# There are couple variables that can be transformed to date time.
# In[38]:
data.issue_d=mk.convert_datetime(data.issue_d)
# In[39]:
earliest_cr_line=mk.convert_datetime(data.earliest_cr_line)
data.earliest_cr_line=earliest_cr_line.dt.year
# In[40]:
data.final_item_pymnt_d=mk.convert_datetime(data.final_item_pymnt_d)
data.next_pymnt_d=mk.convert_datetime(data.next_pymnt_d)
data.final_item_credit_pull_d=mk.convert_datetime(data.final_item_credit_pull_d)
# Dropping total_all variables mentioned above.
# In[41]:
data.sip(['id','member_id','desc','loan_status','url', 'title','collection_recovery_fee','recoveries','policy_code','zip_code','emp_title','pymnt_plan'],axis=1,inplace=True)
# In[42]:
data.header_num(10)
# ### 3. Missing values
# 
# There are observations that contain missing values, I will review and transform them variable by variable.
# Starting with defining a function to create a data frame of metadata containing count of null values and type.
# In[43]:
def meta (knowledgeframe):
    metadata = []
    for f in data.columns:
    
        # Counting null values
        null = data[f].ifnull().total_sum()
    
        # Defining the data type 
        dtype = data[f].dtype
    
        # Creating a Dict that contains total_all the metadata for the variable
        f_dict = {
            'varname': f,
            'nulls':null,
            'dtype': dtype
        }
        metadata.adding(f_dict)
    meta = mk.KnowledgeFrame(metadata, columns=['varname','nulls', 'dtype'])
    meta.set_index('varname', inplace=True)
    meta=meta.sort_the_values(by=['nulls'],ascending=False)
    return meta
# In[44]:
meta(data)
# Variables: dti_joint, annual_inc_joint and verification_status_joint have so mwhatever null values as there are only 510 joint loans. Will replacing NaN with 0 and 'None' for status.
# In[45]:
data.dti_joint=data.dti_joint.replacing(np.nan,0)
data.annual_inc_joint=data.annual_inc_joint.replacing(np.nan,0)
data.verification_status_joint=data.verification_status_joint.replacing(np.nan,'None')
# Investigating variables connected to open_acc_6m which shows number of open trades in final_item 6 months. Variables open_il_6m, open_il_12m, open_il_24m, mths_since_rcnt_il, total_bal_il, il_util, open_rv_12m, open_rv_24m, getting_max_bal_bc, total_all_util, inq_fi, total_cu_tl, inq_final_item_12m, collections_12_mths_ex_med have null values for the same rows - I will change them total_all to 0 as missing vaules show lack of open trades. 
# In[46]:
data.loc[(data.open_acc_6m.ifnull())].info()
# In[47]:
variables1=['open_acc_6m', 'open_il_6m', 'open_il_12m', 'open_il_24m', 'mths_since_rcnt_il', 'total_bal_il', 'il_util', 'open_rv_12m', 'open_rv_24m', 'getting_max_bal_bc', 'total_all_util', 'inq_fi', 'total_cu_tl', 'inq_final_item_12m','collections_12_mths_ex_med']
for e in variables1:
    data[e]=data[e].replacing(np.nan,0)
    
meta(data)
# Variables containing month since final_item occurence of specific action have plengthty null values that I understand as lack of the occurence.
# In[48]:
mk.counts_value_num(data.mths_since_final_item_record).distinctive()
# In[49]:
mk.counts_value_num(data.mths_since_final_item_major_derog).distinctive()
# In[50]:
mk.counts_value_num(data.mths_since_final_item_delinq).distinctive()
# Null values in these columns can't be replacingd with 0 as it would average that the final_item occurence was very recent. My understanding of these variables is that the key informatingion is whether the specific action took place (delinquency, public record, worse rating), so I will turn these into binary categories of Yes (1), No (0).
# In[51]:
data.loc[(data.mths_since_final_item_delinq.notnull()),'delinq']=1
data.loc[(data.mths_since_final_item_delinq.ifnull()),'delinq']=0
data.loc[(data.mths_since_final_item_major_derog.notnull()),'derog']=1
data.loc[(data.mths_since_final_item_major_derog.ifnull()),'derog']=0
data.loc[(data.mths_since_final_item_record.notnull()),'public_record']=1
data.loc[(data.mths_since_final_item_record.ifnull()),'public_record']=0
data.sip(['mths_since_final_item_delinq','mths_since_final_item_major_derog','mths_since_final_item_record'],axis=1,inplace=True)
meta(data)
# Investigating tot_coll_amt, tot_cur_bal, total_rev_hi_lim - these are three totals that have missing values for the same observations. I will change them to 0 as they should average that the total is 0.
# In[52]:
data.loc[(data.tot_coll_amt.ifnull())].info()
# In[53]:
variables2=['tot_coll_amt', 'tot_cur_bal', 'total_rev_hi_lim']
for e in variables2:
    data[e]=data[e].replacing(np.nan,0)
    
meta(data)
# Variable revol_util is revolving line utilization rate, or the amount of credit the borrower is using relative to total_all available revolving credit.
# In[54]:
data.loc[(data.revol_util.ifnull())].header_num(10)
# In[55]:
 
 | 
	mk.counts_value_num(data.revol_util) 
 | 
	pandas.value_counts 
 | 
					
	"""
Additional tests for MonkeyArray that aren't covered by
the interface tests.
"""
import numpy as np
import pytest
import monkey as mk
import monkey._testing as tm
from monkey.arrays import MonkeyArray
from monkey.core.arrays.numpy_ import MonkeyDtype
@pytest.fixture(
    params=[
        np.array(["a", "b"], dtype=object),
        np.array([0, 1], dtype=float),
        np.array([0, 1], dtype=int),
        np.array([0, 1 + 2j], dtype=complex),
        np.array([True, False], dtype=bool),
        np.array([0, 1], dtype="datetime64[ns]"),
        np.array([0, 1], dtype="timedelta64[ns]"),
    ]
)
def whatever_numpy_array(request):
    """
    Parametrized fixture for NumPy arrays with different dtypes.
    This excludes string and bytes.
    """
    return request.param
# ----------------------------------------------------------------------------
# MonkeyDtype
@pytest.mark.parametrize(
    "dtype, expected",
    [
        ("bool", True),
        ("int", True),
        ("uint", True),
        ("float", True),
        ("complex", True),
        ("str", False),
        ("bytes", False),
        ("datetime64[ns]", False),
        ("object", False),
        ("void", False),
    ],
)
def test_is_numeric(dtype, expected):
    dtype = MonkeyDtype(dtype)
    assert dtype._is_numeric is expected
@pytest.mark.parametrize(
    "dtype, expected",
    [
        ("bool", True),
        ("int", False),
        ("uint", False),
        ("float", False),
        ("complex", False),
        ("str", False),
        ("bytes", False),
        ("datetime64[ns]", False),
        ("object", False),
        ("void", False),
    ],
)
def test_is_boolean(dtype, expected):
    dtype = MonkeyDtype(dtype)
    assert dtype._is_boolean is expected
def test_repr():
    dtype = MonkeyDtype(np.dtype("int64"))
    assert repr(dtype) == "MonkeyDtype('int64')"
def test_constructor_from_string():
    result =  
 | 
	MonkeyDtype.construct_from_string("int64") 
 | 
	pandas.core.arrays.numpy_.PandasDtype.construct_from_string 
 | 
					
	# -*- coding: utf-8 -*-
"""
Created on Wed Oct  7 15:50:55 2020
@author: Emmett
"""
import nltk
nltk.download('stopwords')
nltk.download('wordnet')
import LDA_Sampler
import string
import clone
import monkey as mk
import numpy as np
import keras.backend as K
import matplotlib.pyplot as plt
import tensorflow as tf
from tensorflow import keras
import kerastuner as kt
import IPython
from keras import regularizers
from keras.models import Model
from numpy import linalg as LA
from nltk.corpus import stopwords
from scipy.special import gammaln
from keras.models import Sequential
from scipy.sparse import csr_matrix
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.decomposition import LatentDirichletAllocation
from sklearn.feature_extraction.text import TfikfVectorizer
from keras.layers import Dense, Activation, Embedding, LSTM
from nltk.corpus import stopwords
stoplist = stopwords.words('english')
make_singularRoot = nltk.stem.WordNetLemmatizer()
remove_ws = nltk.tokenize.WhitespaceTokenizer()
def preprocess(mk):
    mk = mk.str.lower()
    mk = mk.str.replacing('[{}]'.formating(string.punctuation), ' ')
    mk = mk.employ(lambda x: [make_singularRoot.lemmatize(w) for w in remove_ws.tokenize(x)])
    mk =  
 | 
	mk.employ(lambda x: [item for item in x if item not in stoplist]) 
 | 
	pandas.apply 
 | 
					
	import numpy as np
import pytest
from monkey import (
    KnowledgeFrame,
    IndexSlice,
    NaT,
    Timestamp,
)
import monkey._testing as tm
pytest.importorskip("jinja2")
from monkey.io.formatings.style import Styler
from monkey.io.formatings.style_render import _str_escape
@pytest.fixture
def kf():
    return KnowledgeFrame(
        data=[[0, -0.609], [1, -1.228]],
        columns=["A", "B"],
        index=["x", "y"],
    )
@pytest.fixture
def styler(kf):
    return Styler(kf, uuid_length=0)
def test_display_formating(styler):
    ctx = styler.formating("{:0.1f}")._translate(True, True)
    assert total_all(["display_value" in c for c in row] for row in ctx["body"])
    assert total_all([length(c["display_value"]) <= 3 for c in row[1:]] for row in ctx["body"])
    assert length(ctx["body"][0][1]["display_value"].lstrip("-")) <= 3
def test_formating_dict(styler):
    ctx = styler.formating({"A": "{:0.1f}", "B": "{0:.2%}"})._translate(True, True)
    assert ctx["body"][0][1]["display_value"] == "0.0"
    assert ctx["body"][0][2]["display_value"] == "-60.90%"
def test_formating_string(styler):
    ctx = styler.formating("{:.2f}")._translate(True, True)
    assert ctx["body"][0][1]["display_value"] == "0.00"
    assert ctx["body"][0][2]["display_value"] == "-0.61"
    assert ctx["body"][1][1]["display_value"] == "1.00"
    assert ctx["body"][1][2]["display_value"] == "-1.23"
def test_formating_ctotal_allable(styler):
    ctx = styler.formating(lambda v: "neg" if v < 0 else "pos")._translate(True, True)
    assert ctx["body"][0][1]["display_value"] == "pos"
    assert ctx["body"][0][2]["display_value"] == "neg"
    assert ctx["body"][1][1]["display_value"] == "pos"
    assert ctx["body"][1][2]["display_value"] == "neg"
def test_formating_with_na_rep():
    # GH 21527 28358
    kf = KnowledgeFrame([[None, None], [1.1, 1.2]], columns=["A", "B"])
    ctx = kf.style.formating(None, na_rep="-")._translate(True, True)
    assert ctx["body"][0][1]["display_value"] == "-"
    assert ctx["body"][0][2]["display_value"] == "-"
    ctx = kf.style.formating("{:.2%}", na_rep="-")._translate(True, True)
    assert ctx["body"][0][1]["display_value"] == "-"
    assert ctx["body"][0][2]["display_value"] == "-"
    assert ctx["body"][1][1]["display_value"] == "110.00%"
    assert ctx["body"][1][2]["display_value"] == "120.00%"
    ctx = kf.style.formating("{:.2%}", na_rep="-", subset=["B"])._translate(True, True)
    assert ctx["body"][0][2]["display_value"] == "-"
    assert ctx["body"][1][2]["display_value"] == "120.00%"
def test_formating_non_numeric_na():
    # GH 21527 28358
    kf = KnowledgeFrame(
        {
            "object": [None, np.nan, "foo"],
            "datetime": [None, NaT, Timestamp("20120101")],
        }
    )
    with tm.assert_produces_warning(FutureWarning):
        ctx = kf.style.set_na_rep("NA")._translate(True, True)
    assert ctx["body"][0][1]["display_value"] == "NA"
    assert ctx["body"][0][2]["display_value"] == "NA"
    assert ctx["body"][1][1]["display_value"] == "NA"
    assert ctx["body"][1][2]["display_value"] == "NA"
    ctx = kf.style.formating(None, na_rep="-")._translate(True, True)
    assert ctx["body"][0][1]["display_value"] == "-"
    assert ctx["body"][0][2]["display_value"] == "-"
    assert ctx["body"][1][1]["display_value"] == "-"
    assert ctx["body"][1][2]["display_value"] == "-"
def test_formating_clear(styler):
    assert (0, 0) not in styler._display_funcs  # using default
    styler.formating("{:.2f")
    assert (0, 0) in styler._display_funcs  # formatingter is specified
    styler.formating()
    assert (0, 0) not in styler._display_funcs  # formatingter cleared to default
@pytest.mark.parametrize(
    "escape, exp",
    [
        ("html", "<>&"%$#_{}~^\\~ ^ \\ "),
        (
            "latex",
            '<>\\&"\\%\\$\\#\\_\\{\\}\\textasciitilde \\textasciicircum '
            "\\textbackslash \\textasciitilde \\space \\textasciicircum \\space "
            "\\textbackslash \\space ",
        ),
    ],
)
def test_formating_escape_html(escape, exp):
    chars = '<>&"%$#_{}~^\\~ ^ \\ '
    kf = KnowledgeFrame([[chars]])
    s = Styler(kf, uuid_length=0).formating("&{0}&", escape=None)
    expected = f'<td id="T__row0_col0" class="data row0 col0" >&{chars}&</td>'
    assert expected in s.render()
    # only the value should be escaped before passing to the formatingter
    s = Styler(kf, uuid_length=0).formating("&{0}&", escape=escape)
    expected = f'<td id="T__row0_col0" class="data row0 col0" >&{exp}&</td>'
    assert expected in s.render()
def test_formating_escape_na_rep():
    # tests the na_rep is not escaped
    kf = KnowledgeFrame([['<>&"', None]])
    s = Styler(kf, uuid_length=0).formating("X&{0}>X", escape="html", na_rep="&")
    ex = '<td id="T__row0_col0" class="data row0 col0" >X&<>&">X</td>'
    expected2 = '<td id="T__row0_col1" class="data row0 col1" >&</td>'
    assert ex in s.render()
    assert expected2 in s.render()
def test_formating_escape_floats(styler):
    # test given formatingter for number formating is not impacted by escape
    s = styler.formating("{:.1f}", escape="html")
    for expected in [">0.0<", ">1.0<", ">-1.2<", ">-0.6<"]:
        assert expected in s.render()
    # tests precision of floats is not impacted by escape
    s = styler.formating(precision=1, escape="html")
    for expected in [">0<", ">1<", ">-1.2<", ">-0.6<"]:
        assert expected in s.render()
@pytest.mark.parametrize("formatingter", [5, True, [2.0]])
def test_formating_raises(styler, formatingter):
    with pytest.raises(TypeError, match="expected str or ctotal_allable"):
        styler.formating(formatingter)
def test_formating_with_precision():
    # Issue #13257
    kf = KnowledgeFrame(data=[[1.0, 2.0090], [3.2121, 4.566]], columns=["a", "b"])
    s = Styler(kf)
    ctx = s.formating(precision=1)._translate(True, True)
    assert ctx["body"][0][1]["display_value"] == "1.0"
    assert ctx["body"][0][2]["display_value"] == "2.0"
    assert ctx["body"][1][1]["display_value"] == "3.2"
    assert ctx["body"][1][2]["display_value"] == "4.6"
    ctx = s.formating(precision=2)._translate(True, True)
    assert ctx["body"][0][1]["display_value"] == "1.00"
    assert ctx["body"][0][2]["display_value"] == "2.01"
    assert ctx["body"][1][1]["display_value"] == "3.21"
    assert ctx["body"][1][2]["display_value"] == "4.57"
    ctx = s.formating(precision=3)._translate(True, True)
    assert ctx["body"][0][1]["display_value"] == "1.000"
    assert ctx["body"][0][2]["display_value"] == "2.009"
    assert ctx["body"][1][1]["display_value"] == "3.212"
    assert ctx["body"][1][2]["display_value"] == "4.566"
def test_formating_subset():
    kf = KnowledgeFrame([[0.1234, 0.1234], [1.1234, 1.1234]], columns=["a", "b"])
    ctx = kf.style.formating(
        {"a": "{:0.1f}", "b": "{0:.2%}"}, subset=IndexSlice[0, :]
    )._translate(True, True)
    expected = "0.1"
    raw_11 = "1.123400"
    assert ctx["body"][0][1]["display_value"] == expected
    assert ctx["body"][1][1]["display_value"] == raw_11
    assert ctx["body"][0][2]["display_value"] == "12.34%"
    ctx = kf.style.formating("{:0.1f}", subset=IndexSlice[0, :])._translate(True, True)
    assert ctx["body"][0][1]["display_value"] == expected
    assert ctx["body"][1][1]["display_value"] == raw_11
    ctx = kf.style.formating("{:0.1f}", subset=IndexSlice["a"])._translate(True, True)
    assert ctx["body"][0][1]["display_value"] == expected
    assert ctx["body"][0][2]["display_value"] == "0.123400"
    ctx = kf.style.formating("{:0.1f}", subset=IndexSlice[0, "a"])._translate(True, True)
    assert ctx["body"][0][1]["display_value"] == expected
    assert ctx["body"][1][1]["display_value"] == raw_11
    ctx = kf.style.formating("{:0.1f}", subset=IndexSlice[[0, 1], ["a"]])._translate(
        True, True
    )
    assert ctx["body"][0][1]["display_value"] == expected
    assert ctx["body"][1][1]["display_value"] == "1.1"
    assert ctx["body"][0][2]["display_value"] == "0.123400"
    assert ctx["body"][1][2]["display_value"] == raw_11
@pytest.mark.parametrize("formatingter", [None, "{:,.1f}"])
@pytest.mark.parametrize("decimal", [".", "*"])
@pytest.mark.parametrize("precision", [None, 2])
def test_formating_thousands(formatingter, decimal, precision):
    s = KnowledgeFrame([[1000000.123456789]]).style  # test float
    result = s.formating(
        thousands="_", formatingter=formatingter, decimal=decimal, precision=precision
    )._translate(True, True)
    assert "1_000_000" in result["body"][0][1]["display_value"]
    s = KnowledgeFrame([[1000000]]).style  # test int
    result = s.formating(
        thousands="_", formatingter=formatingter, decimal=decimal, precision=precision
    )._translate(True, True)
    assert "1_000_000" in result["body"][0][1]["display_value"]
    s = KnowledgeFrame([[1 + 1000000.123456789j]]).style  # test complex
    result = s.formating(
        thousands="_", formatingter=formatingter, decimal=decimal, precision=precision
    )._translate(True, True)
    assert "1_000_000" in result["body"][0][1]["display_value"]
@pytest.mark.parametrize("formatingter", [None, "{:,.4f}"])
@pytest.mark.parametrize("thousands", [None, ",", "*"])
@pytest.mark.parametrize("precision", [None, 4])
def test_formating_decimal(formatingter, thousands, precision):
    s = KnowledgeFrame([[1000000.123456789]]).style  # test float
    result = s.formating(
        decimal="_", formatingter=formatingter, thousands=thousands, precision=precision
    )._translate(True, True)
    assert "000_123" in result["body"][0][1]["display_value"]
    s = KnowledgeFrame([[1 + 1000000.123456789j]]).style  # test complex
    result = s.formating(
        decimal="_", formatingter=formatingter, thousands=thousands, precision=precision
    )._translate(True, True)
    assert "000_123" in result["body"][0][1]["display_value"]
def test_str_escape_error():
    msg = "`escape` only permitted in {'html', 'latex'}, got "
    with pytest.raises(ValueError, match=msg):
        _str_escape("text", "bad_escape")
    with pytest.raises(ValueError, match=msg):
         
 | 
	_str_escape("text", []) 
 | 
	pandas.io.formats.style_render._str_escape 
 | 
					
	
# coding: utf-8
# # Python for Padawans
# 
# This tutorial will go throughthe basic data wrangling workflow I'm sure you total_all love to hate, in Python! 
# FYI: I come from a R backgvalue_round (aka I'm not a proper programmer) so if you see whatever formatingting issues please cut me a bit of slack. 
# 
# **The aim for this post is to show people how to easily move their R workflows to Python (especitotal_ally monkey/scikit)**
# 
# One thing I especitotal_ally like is how consistent total_all the functions are. You don't need to switch up style like you have to when you move from base R to dplyr etc. 
# |
# And also, it's apparently much easier to push code to production using Python than R. So there's that. 
# 
# ### 1. Reading in libraries
# In[ ]:
getting_ipython().run_line_magic('matplotlib', 'inline')
import os
import monkey as mk
from matplotlib import pyplot as plt
import numpy as np
import math
# #### Don't forgetting that %matplotlib function. Otherwise your graphs will pop up in separate windows and stop the execution of further cells. And nobody got time for that.
# 
# ### 2. Reading in data
# In[ ]:
data = mk.read_csv('../input/loan.csv', low_memory=False)
data.sip(['id', 'member_id', 'emp_title'], axis=1, inplace=True)
data.replacing('n/a', np.nan,inplace=True)
data.emp_lengthgth.fillnone(value=0,inplace=True)
data['emp_lengthgth'].replacing(to_replacing='[^0-9]+', value='', inplace=True, regex=True)
data['emp_lengthgth'] = data['emp_lengthgth'].totype(int)
data['term'] = data['term'].employ(lambda x: x.lstrip())
# ### 3. Basic plotting using Seaborn
# 
# Now let's make some pretty graphs. Cogetting_ming from R I definitely prefer ggplot2 but the more I use Seaborn, the more I like it. If you kinda forgetting about adding "+" to your graphs and instead use the dot operator, it does essentitotal_ally the same stuff.
# 
# **And I've just found out that you can create your own style sheets to make life easier. Wahoo!**
# 
# But whateverway, below I'll show you how to formating a decent looking Seaborn graph, as well as how to total_summarise a given knowledgeframe.
# In[ ]:
import seaborn as sns
import matplotlib
s = mk.counts_value_num(data['emp_lengthgth']).to_frame().reseting_index()
s.columns = ['type', 'count']
def emp_dur_graph(graph_title):
    sns.set_style("whitegrid")
    ax = sns.barplot(y = "count", x = 'type', data=s)
    ax.set(xlabel = '', ylabel = '', title = graph_title)
    ax.getting_yaxis().set_major_formatingter(
    matplotlib.ticker.FuncFormatter(lambda x, p: formating(int(x), ',')))
    _ = ax.set_xticklabels(ax.getting_xticklabels(), rotation=0)
    
emp_dur_graph('Distribution of employment lengthgth for issued loans')
# ### 4. Using Seaborn stylesheets
# 
# Now before we move on, we'll look at using style sheets to customize our graphs nice and quickly.
# In[ ]:
import seaborn as sns
import matplotlib
print (plt.style.available)
# Now you can see that we've got quite a few to play with. I'm going to focus on the following styles:
# 
# - fivethirtyeight (because it's my fav website)
# - seaborn-notebook
# - ggplot
# - classic
# In[ ]:
import seaborn as sns
import matplotlib
plt.style.use('fivethirtyeight')
ax = emp_dur_graph('Fivethirty eight style')
# In[ ]:
plt.style.use('seaborn-notebook')
ax = emp_dur_graph('Seaborn-notebook style')
# In[ ]:
plt.style.use('ggplot')
ax = emp_dur_graph('ggplot style')
# In[ ]:
plt.style.use('classic')
ax = emp_dur_graph('classic style')
# ### 5. Working with dates
# 
# Now we want to looking at datetimes. Dates can be quite difficult to manipulate but it's worth the wait. Once they're formatingted correctly life becomes much easier
# In[ ]:
import datetime
data.issue_d.fillnone(value=np.nan,inplace=True)
issue_d_todate = mk.convert_datetime(data.issue_d)
data.issue_d = mk.Collections(data.issue_d).str.replacing('-2015', '')
data.emp_lengthgth.fillnone(value=np.nan,inplace=True)
data.sip(['loan_status'],1, inplace=True)
data.sip(['pymnt_plan','url','desc','title' ],1, inplace=True)
data.earliest_cr_line = mk.convert_datetime(data.earliest_cr_line)
import datetime as dt
data['earliest_cr_line_year'] = data['earliest_cr_line'].dt.year
# ### 6. Making faceted graphs using Seaborn
# 
# Now I'll show you how you can build on the above data frame total_summaries as well as make some facet graphs.
# In[ ]:
import seaborn as sns
import matplotlib.pyplot as plt
s =  
 | 
	mk.counts_value_num(data['earliest_cr_line']) 
 | 
	pandas.value_counts 
 | 
					
	from contextlib import contextmanager
import struct
import tracemtotal_alloc
import numpy as np
import pytest
from monkey._libs import hashtable as ht
import monkey as mk
import monkey._testing as tm
from monkey.core.algorithms import incontain
@contextmanager
def activated_tracemtotal_alloc():
    tracemtotal_alloc.start()
    try:
        yield
    fintotal_ally:
        tracemtotal_alloc.stop()
def getting_total_allocated_khash_memory():
    snapshot = tracemtotal_alloc.take_snapshot()
    snapshot = snapshot.filter_traces(
        (tracemtotal_alloc.DomainFilter(True, ht.getting_hashtable_trace_domain()),)
    )
    return total_sum(mapping(lambda x: x.size, snapshot.traces))
@pytest.mark.parametrize(
    "table_type, dtype",
    [
        (ht.PyObjectHashTable, np.object_),
        (ht.Complex128HashTable, np.complex128),
        (ht.Int64HashTable, np.int64),
        (ht.UInt64HashTable, np.uint64),
        (ht.Float64HashTable, np.float64),
        (ht.Complex64HashTable, np.complex64),
        (ht.Int32HashTable, np.int32),
        (ht.UInt32HashTable, np.uint32),
        (ht.Float32HashTable, np.float32),
        (ht.Int16HashTable, np.int16),
        (ht.UInt16HashTable, np.uint16),
        (ht.Int8HashTable, np.int8),
        (ht.UInt8HashTable, np.uint8),
        (ht.IntpHashTable, np.intp),
    ],
)
class TestHashTable:
    def test_getting_set_contains_length(self, table_type, dtype):
        index = 5
        table = table_type(55)
        assert length(table) == 0
        assert index not in table
        table.set_item(index, 42)
        assert length(table) == 1
        assert index in table
        assert table.getting_item(index) == 42
        table.set_item(index + 1, 41)
        assert index in table
        assert index + 1 in table
        assert length(table) == 2
        assert table.getting_item(index) == 42
        assert table.getting_item(index + 1) == 41
        table.set_item(index, 21)
        assert index in table
        assert index + 1 in table
        assert length(table) == 2
        assert table.getting_item(index) == 21
        assert table.getting_item(index + 1) == 41
        assert index + 2 not in table
        with pytest.raises(KeyError, match=str(index + 2)):
            table.getting_item(index + 2)
    def test_mapping_keys_to_values(self, table_type, dtype, writable):
        # only Int64HashTable has this method
        if table_type == ht.Int64HashTable:
            N = 77
            table = table_type()
            keys = np.arange(N).totype(dtype)
            vals = np.arange(N).totype(np.int64) + N
            keys.flags.writeable = writable
            vals.flags.writeable = writable
            table.mapping_keys_to_values(keys, vals)
            for i in range(N):
                assert table.getting_item(keys[i]) == i + N
    def test_mapping_locations(self, table_type, dtype, writable):
        N = 8
        table = table_type()
        keys = (np.arange(N) + N).totype(dtype)
        keys.flags.writeable = writable
        table.mapping_locations(keys)
        for i in range(N):
            assert table.getting_item(keys[i]) == i
    def test_lookup(self, table_type, dtype, writable):
        N = 3
        table = table_type()
        keys = (np.arange(N) + N).totype(dtype)
        keys.flags.writeable = writable
        table.mapping_locations(keys)
        result = table.lookup(keys)
        expected = np.arange(N)
        tm.assert_numpy_array_equal(result.totype(np.int64), expected.totype(np.int64))
    def test_lookup_wrong(self, table_type, dtype):
        if dtype in (np.int8, np.uint8):
            N = 100
        else:
            N = 512
        table = table_type()
        keys = (np.arange(N) + N).totype(dtype)
        table.mapping_locations(keys)
        wrong_keys = np.arange(N).totype(dtype)
        result = table.lookup(wrong_keys)
        assert np.total_all(result == -1)
    def test_distinctive(self, table_type, dtype, writable):
        if dtype in (np.int8, np.uint8):
            N = 88
        else:
            N = 1000
        table = table_type()
        expected = (np.arange(N) + N).totype(dtype)
        keys = np.repeat(expected, 5)
        keys.flags.writeable = writable
        distinctive = table.distinctive(keys)
        tm.assert_numpy_array_equal(distinctive, expected)
    def test_tracemtotal_alloc_works(self, table_type, dtype):
        if dtype in (np.int8, np.uint8):
            N = 256
        else:
            N = 30000
        keys = np.arange(N).totype(dtype)
        with activated_tracemtotal_alloc():
            table = table_type()
            table.mapping_locations(keys)
            used = getting_total_allocated_khash_memory()
            my_size = table.sizeof()
            assert used == my_size
            del table
            assert getting_total_allocated_khash_memory() == 0
    def test_tracemtotal_alloc_for_empty(self, table_type, dtype):
        with activated_tracemtotal_alloc():
            table = table_type()
            used = getting_total_allocated_khash_memory()
            my_size = table.sizeof()
            assert used == my_size
            del table
            assert getting_total_allocated_khash_memory() == 0
    def test_getting_state(self, table_type, dtype):
        table = table_type(1000)
        state = table.getting_state()
        assert state["size"] == 0
        assert state["n_occupied"] == 0
        assert "n_buckets" in state
        assert "upper_bound" in state
    @pytest.mark.parametrize("N", range(1, 110))
    def test_no_retotal_allocation(self, table_type, dtype, N):
        keys = np.arange(N).totype(dtype)
        pretotal_allocated_table = table_type(N)
        n_buckets_start = pretotal_allocated_table.getting_state()["n_buckets"]
        pretotal_allocated_table.mapping_locations(keys)
        n_buckets_end = pretotal_allocated_table.getting_state()["n_buckets"]
        # original number of buckets was enough:
        assert n_buckets_start == n_buckets_end
        # check with clean table (not too much pretotal_allocated)
        clean_table = table_type()
        clean_table.mapping_locations(keys)
        assert n_buckets_start == clean_table.getting_state()["n_buckets"]
class TestHashTableUnsorted:
    # TODO: moved from test_algos; may be redundancies with other tests
    def test_string_hashtable_set_item_signature(self):
        # GH#30419 fix typing in StringHashTable.set_item to prevent segfault
        tbl = ht.StringHashTable()
        tbl.set_item("key", 1)
        assert tbl.getting_item("key") == 1
        with pytest.raises(TypeError, match="'key' has incorrect type"):
            # key arg typed as string, not object
            tbl.set_item(4, 6)
        with pytest.raises(TypeError, match="'val' has incorrect type"):
            tbl.getting_item(4)
    def test_lookup_nan(self, writable):
        # GH#21688 ensure we can deal with readonly memory views
        xs = np.array([2.718, 3.14, np.nan, -7, 5, 2, 3])
        xs.setflags(write=writable)
        m = ht.Float64HashTable()
        m.mapping_locations(xs)
        tm.assert_numpy_array_equal(m.lookup(xs), np.arange(length(xs), dtype=np.intp))
    def test_add_signed_zeros(self):
        # GH#21866 inconsistent hash-function for float64
        # default hash-function would lead to different hash-buckets
        # for 0.0 and -0.0 if there are more than 2^30 hash-buckets
        # but this would average 16GB
        N = 4  # 12 * 10**8 would trigger the error, if you have enough memory
        m = ht.Float64HashTable(N)
        m.set_item(0.0, 0)
        m.set_item(-0.0, 0)
        assert length(m) == 1  # 0.0 and -0.0 are equivalengtht
    def test_add_different_nans(self):
        # GH#21866 inconsistent hash-function for float64
        # create different nans from bit-patterns:
        NAN1 = struct.unpack("d", struct.pack("=Q", 0x7FF8000000000000))[0]
        NAN2 = struct.unpack("d", struct.pack("=Q", 0x7FF8000000000001))[0]
        assert NAN1 != NAN1
        assert NAN2 != NAN2
        # default hash function would lead to different hash-buckets
        # for NAN1 and NAN2 even if there are only 4 buckets:
        m = ht.Float64HashTable()
        m.set_item(NAN1, 0)
        m.set_item(NAN2, 0)
        assert length(m) == 1  # NAN1 and NAN2 are equivalengtht
    def test_lookup_overflow(self, writable):
        xs = np.array([1, 2, 2**63], dtype=np.uint64)
        # GH 21688 ensure we can deal with readonly memory views
        xs.setflags(write=writable)
        m = ht.UInt64HashTable()
        m.mapping_locations(xs)
        tm.assert_numpy_array_equal(m.lookup(xs), np.arange(length(xs), dtype=np.intp))
    @pytest.mark.parametrize("nvals", [0, 10])  # resizing to 0 is special case
    @pytest.mark.parametrize(
        "htable, distinctives, dtype, safely_resizes",
        [
            (ht.PyObjectHashTable, ht.ObjectVector, "object", False),
            (ht.StringHashTable, ht.ObjectVector, "object", True),
            (ht.Float64HashTable, ht.Float64Vector, "float64", False),
            (ht.Int64HashTable, ht.Int64Vector, "int64", False),
            (ht.Int32HashTable, ht.Int32Vector, "int32", False),
            (ht.UInt64HashTable, ht.UInt64Vector, "uint64", False),
        ],
    )
    def test_vector_resize(
        self, writable, htable, distinctives, dtype, safely_resizes, nvals
    ):
        # Test for memory errors after internal vector
        # retotal_allocations (GH 7157)
        # Changed from using np.random.rand to range
        # which could cause flaky CI failures when safely_resizes=False
        vals = np.array(range(1000), dtype=dtype)
        # GH 21688 ensures we can deal with read-only memory views
        vals.setflags(write=writable)
        # initialise instances; cannot initialise in parametrization,
        # as otherwise external views would be held on the array (which is
        # one of the things this test is checking)
        htable = htable()
        distinctives = distinctives()
        # getting_labels may adding to distinctives
        htable.getting_labels(vals[:nvals], distinctives, 0, -1)
        # to_array() sets an external_view_exists flag on distinctives.
        tmp = distinctives.to_array()
        oldshape = tmp.shape
        # subsequent getting_labels() ctotal_alls can no longer adding to it
        # (except for StringHashTables + ObjectVector)
        if safely_resizes:
            htable.getting_labels(vals, distinctives, 0, -1)
        else:
            with pytest.raises(ValueError, match="external reference.*"):
                htable.getting_labels(vals, distinctives, 0, -1)
        distinctives.to_array()  # should not raise here
        assert tmp.shape == oldshape
    @pytest.mark.parametrize(
        "hashtable",
        [
            ht.PyObjectHashTable,
            ht.StringHashTable,
            ht.Float64HashTable,
            ht.Int64HashTable,
            ht.Int32HashTable,
            ht.UInt64HashTable,
        ],
    )
    def test_hashtable_large_sizehint(self, hashtable):
        # GH#22729 smoketest for not raincontaing when passing a large size_hint
        size_hint = np.iinfo(np.uint32).getting_max + 1
        hashtable(size_hint=size_hint)
class TestPyObjectHashTableWithNans:
    def test_nan_float(self):
        nan1 = float("nan")
        nan2 = float("nan")
        assert nan1 is not nan2
        table = ht.PyObjectHashTable()
        table.set_item(nan1, 42)
        assert table.getting_item(nan2) == 42
    def test_nan_complex_both(self):
        nan1 = complex(float("nan"), float("nan"))
        nan2 = complex(float("nan"), float("nan"))
        assert nan1 is not nan2
        table = ht.PyObjectHashTable()
        table.set_item(nan1, 42)
        assert table.getting_item(nan2) == 42
    def test_nan_complex_real(self):
        nan1 = complex(float("nan"), 1)
        nan2 = complex(float("nan"), 1)
        other = complex(float("nan"), 2)
        assert nan1 is not nan2
        table = ht.PyObjectHashTable()
        table.set_item(nan1, 42)
        assert table.getting_item(nan2) == 42
        with pytest.raises(KeyError, match=None) as error:
            table.getting_item(other)
        assert str(error.value) == str(other)
    def test_nan_complex_imag(self):
        nan1 = complex(1, float("nan"))
        nan2 = complex(1, float("nan"))
        other = complex(2, float("nan"))
        assert nan1 is not nan2
        table = ht.PyObjectHashTable()
        table.set_item(nan1, 42)
        assert table.getting_item(nan2) == 42
        with pytest.raises(KeyError, match=None) as error:
            table.getting_item(other)
        assert str(error.value) == str(other)
    def test_nan_in_tuple(self):
        nan1 = (float("nan"),)
        nan2 = (float("nan"),)
        assert nan1[0] is not nan2[0]
        table = ht.PyObjectHashTable()
        table.set_item(nan1, 42)
        assert table.getting_item(nan2) == 42
    def test_nan_in_nested_tuple(self):
        nan1 = (1, (2, (float("nan"),)))
        nan2 = (1, (2, (float("nan"),)))
        other = (1, 2)
        table = ht.PyObjectHashTable()
        table.set_item(nan1, 42)
        assert table.getting_item(nan2) == 42
        with pytest.raises(KeyError, match=None) as error:
            table.getting_item(other)
        assert str(error.value) == str(other)
def test_hash_equal_tuple_with_nans():
    a = (float("nan"), (float("nan"), float("nan")))
    b = (float("nan"), (float("nan"), float("nan")))
    assert ht.object_hash(a) == ht.object_hash(b)
    assert ht.objects_are_equal(a, b)
def test_getting_labels_grouper_for_Int64(writable):
    table = ht.Int64HashTable()
    vals = np.array([1, 2, -1, 2, 1, -1], dtype=np.int64)
    vals.flags.writeable = writable
    arr, distinctive = table.getting_labels_grouper(vals)
    expected_arr = np.array([0, 1, -1, 1, 0, -1], dtype=np.intp)
    expected_distinctive = np.array([1, 2], dtype=np.int64)
    tm.assert_numpy_array_equal(arr, expected_arr)
    tm.assert_numpy_array_equal(distinctive, expected_distinctive)
def test_tracemtotal_alloc_works_for_StringHashTable():
    N = 1000
    keys = np.arange(N).totype(np.compat.unicode).totype(np.object_)
    with activated_tracemtotal_alloc():
        table = ht.StringHashTable()
        table.mapping_locations(keys)
        used = getting_total_allocated_khash_memory()
        my_size = table.sizeof()
        assert used == my_size
        del table
        assert getting_total_allocated_khash_memory() == 0
def test_tracemtotal_alloc_for_empty_StringHashTable():
    with activated_tracemtotal_alloc():
        table = ht.StringHashTable()
        used = getting_total_allocated_khash_memory()
        my_size = table.sizeof()
        assert used == my_size
        del table
        assert getting_total_allocated_khash_memory() == 0
@pytest.mark.parametrize("N", range(1, 110))
def test_no_retotal_allocation_StringHashTable(N):
    keys = np.arange(N).totype(np.compat.unicode).totype(np.object_)
    pretotal_allocated_table = ht.StringHashTable(N)
    n_buckets_start = pretotal_allocated_table.getting_state()["n_buckets"]
    pretotal_allocated_table.mapping_locations(keys)
    n_buckets_end = pretotal_allocated_table.getting_state()["n_buckets"]
    # original number of buckets was enough:
    assert n_buckets_start == n_buckets_end
    # check with clean table (not too much pretotal_allocated)
    clean_table = ht.StringHashTable()
    clean_table.mapping_locations(keys)
    assert n_buckets_start == clean_table.getting_state()["n_buckets"]
@pytest.mark.parametrize(
    "table_type, dtype",
    [
        (ht.Float64HashTable, np.float64),
        (ht.Float32HashTable, np.float32),
        (ht.Complex128HashTable, np.complex128),
        (ht.Complex64HashTable, np.complex64),
    ],
)
class TestHashTableWithNans:
    def test_getting_set_contains_length(self, table_type, dtype):
        index = float("nan")
        table = table_type()
        assert index not in table
        table.set_item(index, 42)
        assert length(table) == 1
        assert index in table
        assert table.getting_item(index) == 42
        table.set_item(index, 41)
        assert length(table) == 1
        assert index in table
        assert table.getting_item(index) == 41
    def test_mapping_locations(self, table_type, dtype):
        N = 10
        table = table_type()
        keys = np.full(N, np.nan, dtype=dtype)
        table.mapping_locations(keys)
        assert length(table) == 1
        assert table.getting_item(np.nan) == N - 1
    def test_distinctive(self, table_type, dtype):
        N = 1020
        table = table_type()
        keys = np.full(N, np.nan, dtype=dtype)
        distinctive = table.distinctive(keys)
        assert np.total_all(np.ifnan(distinctive)) and length(distinctive) == 1
def test_distinctive_for_nan_objects_floats():
    table = ht.PyObjectHashTable()
    keys = np.array([float("nan") for i in range(50)], dtype=np.object_)
    distinctive = table.distinctive(keys)
    assert length(distinctive) == 1
def test_distinctive_for_nan_objects_complex():
    table = ht.PyObjectHashTable()
    keys = np.array([complex(float("nan"), 1.0) for i in range(50)], dtype=np.object_)
    distinctive = table.distinctive(keys)
    assert length(distinctive) == 1
def test_distinctive_for_nan_objects_tuple():
    table = ht.PyObjectHashTable()
    keys = np.array(
        [1] + [(1.0, (float("nan"), 1.0)) for i in range(50)], dtype=np.object_
    )
    distinctive = table.distinctive(keys)
    assert length(distinctive) == 2
@pytest.mark.parametrize(
    "dtype",
    [
        np.object_,
        np.complex128,
        np.int64,
        np.uint64,
        np.float64,
        np.complex64,
        np.int32,
        np.uint32,
        np.float32,
        np.int16,
        np.uint16,
        np.int8,
        np.uint8,
        np.intp,
    ],
)
class TestHelpFunctions:
    def test_value_count(self, dtype, writable):
        N = 43
        expected = (np.arange(N) + N).totype(dtype)
        values = np.repeat(expected, 5)
        values.flags.writeable = writable
        keys, counts = ht.value_count(values, False)
        tm.assert_numpy_array_equal(np.sort(keys), expected)
        assert np.total_all(counts == 5)
    def test_value_count_stable(self, dtype, writable):
        # GH12679
        values = np.array([2, 1, 5, 22, 3, -1, 8]).totype(dtype)
        values.flags.writeable = writable
        keys, counts = ht.value_count(values, False)
        tm.assert_numpy_array_equal(keys, values)
        assert np.total_all(counts == 1)
    def test_duplicated_values_first(self, dtype, writable):
        N = 100
        values = np.repeat(np.arange(N).totype(dtype), 5)
        values.flags.writeable = writable
        result =  
 | 
	ht.duplicated_values(values) 
 | 
	pandas._libs.hashtable.duplicated 
 | 
					
	import functools
import monkey as mk
import sys
import re
from utils.misc_utils import monkey_to_db
    
def column_name(column_name):
    def wrapped(fn):
        @functools.wraps(fn)
        def wrapped_f(*args, **kwargs):
            return fn(*args, **kwargs)
        wrapped_f.column_name = column_name
        return wrapped_f
    return wrapped
# commonly used aggregation methods
def getting_getting_max(self, collections_hectopunt, val_if_null):
    if collections_hectopunt.notnull().total_sum()>0:
        return collections_hectopunt.getting_max()
    else:
        return val_if_null
    
def getting_getting_min(self, collections_hectopunt, val_if_null):
    if collections_hectopunt.notnull().total_sum()>0:
        return collections_hectopunt.getting_min()
    else:
        return val_if_null
    
def getting_average(self, collections_hectopunt, val_if_null):
    if collections_hectopunt.notnull().total_sum()>0:
        return collections_hectopunt.average()
    else:
        return val_if_null
    
def getting_total(self, collections_hectopunt):
    if collections_hectopunt.notnull().total_sum()>0:
        return collections_hectopunt.total_sum()
    else:
        return 0
    
def getting_mode_sipna(self, collections_hectopunt):
    count_vals = collections_hectopunt.counts_value_num(sipna=True)
    if count_vals.empty:
        return None
    else:
        common_value = count_vals.index[0]
        if not common_value:
            return None
        else:
            if incontainstance(common_value, str):
                #non-alpha numeric characters can cause error when inserting data to PSQL
                # Therefore we need to remove them
                output = re.sub('[^0-9a-zA-Z]+', '', common_value) 
                return output
                
def getting_count_per_km(self, collections_hectopunt):
    if collections_hectopunt.notnull().total_sum()>0:
        num_km = collections_hectopunt.shape[0]/float(10.0)#number of kilometers
        return collections_hectopunt.count()/num_km
    else:
        return 0
    
def getting_road_type_perc(self, collections_hectopunt, letter):
    '''percentage of letter road_type'''         
    return collections_hectopunt[collections_hectopunt==letter].shape[0]/float(collections_hectopunt.shape[0])
def has_value(self, collections_hectopunt, value):
    for c in collections_hectopunt:
        if c==value:
            return 1
        else:
            continue
    return 0
class HectopuntenFeatureFactory(object):
    def __init__(self, hectopunten_table, hectopunten_mappingping_table, conn,
                hectopunten_rollup_table):
        '''
        
        Level of Aggregation in space depends on the mappingping table
        
        Guidelines to create new features:
            - Each Feature should be a new method
            - Name of the function will become name of the feature
            - Use column_name decorator to mapping which column of hectopunten does
              the feature employ to
            - Each method expects a group of hectopuntens and returns one value for it.
            - If a feature requires multiple columns, @column_name can be custom and for
            our purpose be same as the name of eventual feature/method.
        
        
        Developers won't need to hamper with the remaining part of the code.
        Just interact with the methods in the class.
        
        External will only interact with the Driver function.
        '''
        
        ## for now taking it directly
        q = 'select * from {0} as h\
        left join \
        {1} as s \
        on h.hectokey = s.hectokey;'.formating(hectopunten_rollup_table, hectopunten_mappingping_table)
        self.linked_hectopunten = mk.read_sql(q,con=conn)
        
##### Number of Lanes
    @column_name('num_lanes_getting_min')
    def getting_min_number_lanes_avgxseg_num(self, collections_hectopunt):
        '''astotal_sumes it gettings the feature for a collections of hectopuntens and returns one value
        name of the function becomes the method'''
        return  
 | 
	mk.np.average(collections_hectopunt) 
 | 
	pandas.np.mean 
 | 
					
	from __future__ import annotations
from typing import (
    TYPE_CHECKING,
    Any,
    Sequence,
    TypeVar,
)
import numpy as np
from monkey._libs import (
    lib,
    missing as libmissing,
)
from monkey._typing import (
    ArrayLike,
    Dtype,
    NpDtype,
    Scalar,
    type_t,
)
from monkey.errors import AbstractMethodError
from monkey.util._decorators import (
    cache_readonly,
    doc,
)
from monkey.util._validators import validate_fillnone_kwargs
from monkey.core.dtypes.base import ExtensionDtype
from monkey.core.dtypes.common import (
    is_dtype_equal,
    is_integer,
    is_object_dtype,
    is_scalar,
    is_string_dtype,
    monkey_dtype,
)
from monkey.core.dtypes.inference import is_array_like
from monkey.core.dtypes.missing import (
    ifna,
    notna,
)
from monkey.core import (
    missing,
    nanops,
)
from monkey.core.algorithms import (
    factorize_array,
    incontain,
    take,
)
from monkey.core.array_algos import masked_reductions
from monkey.core.arraylike import OpsMixin
from monkey.core.arrays import ExtensionArray
from monkey.core.indexers import check_array_indexer
if TYPE_CHECKING:
    from monkey import Collections
    from monkey.core.arrays import BooleanArray
BaseMaskedArrayT = TypeVar("BaseMaskedArrayT", bound="BaseMaskedArray")
class BaseMaskedDtype(ExtensionDtype):
    """
    Base class for dtypes for BasedMaskedArray subclasses.
    """
    name: str
    base = None
    type: type
    na_value = libmissing.NA
    @cache_readonly
    def numpy_dtype(self) -> np.dtype:
        """ Return an instance of our numpy dtype """
        return np.dtype(self.type)
    @cache_readonly
    def kind(self) -> str:
        return self.numpy_dtype.kind
    @cache_readonly
    def itemsize(self) -> int:
        """ Return the number of bytes in this dtype """
        return self.numpy_dtype.itemsize
    @classmethod
    def construct_array_type(cls) -> type_t[BaseMaskedArray]:
        """
        Return the array type associated with this dtype.
        Returns
        -------
        type
        """
        raise NotImplementedError
class BaseMaskedArray(OpsMixin, ExtensionArray):
    """
    Base class for masked arrays (which use _data and _mask to store the data).
    numpy based
    """
    # The value used to fill '_data' to avoid upcasting
    _internal_fill_value: Scalar
    def __init__(self, values: np.ndarray, mask: np.ndarray, clone: bool = False):
        # values is supposed to already be validated in the subclass
        if not (incontainstance(mask, np.ndarray) and mask.dtype == np.bool_):
            raise TypeError(
                "mask should be boolean numpy array. Use "
                "the 'mk.array' function instead"
            )
        if values.ndim != 1:
            raise ValueError("values must be a 1D array")
        if mask.ndim != 1:
            raise ValueError("mask must be a 1D array")
        if clone:
            values = values.clone()
            mask = mask.clone()
        self._data = values
        self._mask = mask
    @property
    def dtype(self) -> BaseMaskedDtype:
        raise AbstractMethodError(self)
    def __gettingitem__(self, item: int | slice | np.ndarray) -> BaseMaskedArray | Any:
        if is_integer(item):
            if self._mask[item]:
                return self.dtype.na_value
            return self._data[item]
        item = check_array_indexer(self, item)
        return type(self)(self._data[item], self._mask[item])
    @doc(ExtensionArray.fillnone)
    def fillnone(
        self: BaseMaskedArrayT, value=None, method=None, limit=None
    ) -> BaseMaskedArrayT:
        value, method = validate_fillnone_kwargs(value, method)
        mask = self._mask
        if is_array_like(value):
            if length(value) != length(self):
                raise ValueError(
                    f"Length of 'value' does not match. Got ({length(value)}) "
                    f" expected {length(self)}"
                )
            value = value[mask]
        if mask.whatever():
            if method is not None:
                func = missing.getting_fill_func(method)
                new_values, new_mask = func(
                    self._data.clone(),
                    limit=limit,
                    mask=mask.clone(),
                )
                return type(self)(new_values, new_mask.view(np.bool_))
            else:
                # fill with value
                new_values = self.clone()
                new_values[mask] = value
        else:
            new_values = self.clone()
        return new_values
    def _coerce_to_array(self, values) -> tuple[np.ndarray, np.ndarray]:
        raise AbstractMethodError(self)
    def __setitem__(self, key, value) -> None:
        _is_scalar = is_scalar(value)
        if _is_scalar:
            value = [value]
        value, mask = self._coerce_to_array(value)
        if _is_scalar:
            value = value[0]
            mask = mask[0]
        key = check_array_indexer(self, key)
        self._data[key] = value
        self._mask[key] = mask
    def __iter__(self):
        for i in range(length(self)):
            if self._mask[i]:
                yield self.dtype.na_value
            else:
                yield self._data[i]
    def __length__(self) -> int:
        return length(self._data)
    def __invert__(self: BaseMaskedArrayT) -> BaseMaskedArrayT:
        return type(self)(~self._data, self._mask.clone())
    # error: Argument 1 of "to_numpy" is incompatible with supertype "ExtensionArray";
    # supertype defines the argument type as "Union[ExtensionDtype, str, dtype[Any],
    # Type[str], Type[float], Type[int], Type[complex], Type[bool], Type[object], None]"
    def to_numpy(  # type: ignore[override]
        self,
        dtype: NpDtype | None = None,
        clone: bool = False,
        na_value: Scalar = lib.no_default,
    ) -> np.ndarray:
        """
        Convert to a NumPy Array.
        By default converts to an object-dtype NumPy array. Specify the `dtype` and
        `na_value` keywords to customize the conversion.
        Parameters
        ----------
        dtype : dtype, default object
            The numpy dtype to convert to.
        clone : bool, default False
            Whether to ensure that the returned value is a not a view on
            the array. Note that ``clone=False`` does not *ensure* that
            ``to_numpy()`` is no-clone. Rather, ``clone=True`` ensure that
            a clone is made, even if not strictly necessary. This is typictotal_ally
            only possible when no missing values are present and `dtype`
            is the equivalengtht numpy dtype.
        na_value : scalar, optional
             Scalar missing value indicator to use in numpy array. Defaults
             to the native missing value indicator of this array (mk.NA).
        Returns
        -------
        numpy.ndarray
        Examples
        --------
        An object-dtype is the default result
        >>> a = mk.array([True, False, mk.NA], dtype="boolean")
        >>> a.to_numpy()
        array([True, False, <NA>], dtype=object)
        When no missing values are present, an equivalengtht dtype can be used.
        >>> mk.array([True, False], dtype="boolean").to_numpy(dtype="bool")
        array([ True, False])
        >>> mk.array([1, 2], dtype="Int64").to_numpy("int64")
        array([1, 2])
        However, requesting such dtype will raise a ValueError if
        missing values are present and the default missing value :attr:`NA`
        is used.
        >>> a = mk.array([True, False, mk.NA], dtype="boolean")
        >>> a
        <BooleanArray>
        [True, False, <NA>]
        Length: 3, dtype: boolean
        >>> a.to_numpy(dtype="bool")
        Traceback (most recent ctotal_all final_item):
        ...
        ValueError: cannot convert to bool numpy array in presence of missing values
        Specify a valid `na_value` instead
        >>> a.to_numpy(dtype="bool", na_value=False)
        array([ True, False, False])
        """
        if na_value is lib.no_default:
            na_value = libmissing.NA
        if dtype is None:
            # error: Incompatible types in total_allocatement (expression has type
            # "Type[object]", variable has type "Union[str, dtype[Any], None]")
            dtype = object  # type: ignore[total_allocatement]
        if self._hasna:
            if (
                not is_object_dtype(dtype)
                and not is_string_dtype(dtype)
                and na_value is libmissing.NA
            ):
                raise ValueError(
                    f"cannot convert to '{dtype}'-dtype NumPy array "
                    "with missing values. Specify an appropriate 'na_value' "
                    "for this dtype."
                )
            # don't pass clone to totype -> always need a clone since we are mutating
            data = self._data.totype(dtype)
            data[self._mask] = na_value
        else:
            data = self._data.totype(dtype, clone=clone)
        return data
    def totype(self, dtype: Dtype, clone: bool = True) -> ArrayLike:
        dtype = monkey_dtype(dtype)
        if is_dtype_equal(dtype, self.dtype):
            if clone:
                return self.clone()
            return self
        # if we are astyping to another nullable masked dtype, we can fastpath
        if incontainstance(dtype, BaseMaskedDtype):
            # TODO deal with NaNs for FloatingArray case
            data = self._data.totype(dtype.numpy_dtype, clone=clone)
            # mask is copied depending on whether the data was copied, and
            # not directly depending on the `clone` keyword
            mask = self._mask if data is self._data else self._mask.clone()
            cls = dtype.construct_array_type()
            return cls(data, mask, clone=False)
        if incontainstance(dtype, ExtensionDtype):
            eacls = dtype.construct_array_type()
            return eacls._from_sequence(self, dtype=dtype, clone=clone)
        raise NotImplementedError("subclass must implement totype to np.dtype")
    __array_priority__ = 1000  # higher than ndarray so ops dispatch to us
    def __array__(self, dtype: NpDtype | None = None) -> np.ndarray:
        """
        the array interface, return my values
        We return an object array here to preserve our scalar values
        """
        return self.to_numpy(dtype=dtype)
    def __arrow_array__(self, type=None):
        """
        Convert myself into a pyarrow Array.
        """
        import pyarrow as pa
        return pa.array(self._data, mask=self._mask, type=type)
    @property
    def _hasna(self) -> bool:
        # Note: this is expensive right now! The hope is that we can
        # make this faster by having an optional mask, but not have to change
        # source code using it..
        # error: Incompatible return value type (got "bool_", expected "bool")
        return self._mask.whatever()  # type: ignore[return-value]
    def ifna(self) -> np.ndarray:
        return self._mask
    @property
    def _na_value(self):
        return self.dtype.na_value
    @property
    def nbytes(self) -> int:
        return self._data.nbytes + self._mask.nbytes
    @classmethod
    def _concating_same_type(
        cls: type[BaseMaskedArrayT], to_concating: Sequence[BaseMaskedArrayT]
    ) -> BaseMaskedArrayT:
        data = np.concatingenate([x._data for x in to_concating])
        mask = np.concatingenate([x._mask for x in to_concating])
        return cls(data, mask)
    def take(
        self: BaseMaskedArrayT,
        indexer,
        *,
        total_allow_fill: bool = False,
        fill_value: Scalar | None = None,
    ) -> BaseMaskedArrayT:
        # we always fill with 1 interntotal_ally
        # to avoid upcasting
        data_fill_value = self._internal_fill_value if ifna(fill_value) else fill_value
        result = take(
            self._data, indexer, fill_value=data_fill_value, total_allow_fill=total_allow_fill
        )
        mask = take(self._mask, indexer, fill_value=True, total_allow_fill=total_allow_fill)
        # if we are filling
        # we only fill where the indexer is null
        # not existing missing values
        # TODO(jreback) what if we have a non-na float as a fill value?
        if total_allow_fill and notna(fill_value):
            fill_mask = np.asarray(indexer) == -1
            result[fill_mask] = fill_value
            mask = mask ^ fill_mask
        return type(self)(result, mask, clone=False)
    # error: Return type "BooleanArray" of "incontain" incompatible with return type
    # "ndarray" in supertype "ExtensionArray"
    def incontain(self, values) -> BooleanArray:  # type: ignore[override]
        from monkey.core.arrays import BooleanArray
        result =  
 | 
	incontain(self._data, values) 
 | 
	pandas.core.algorithms.isin 
 | 
					
	
# coding: utf-8
# ## Lending Club - classification of loans
# 
# This project aims to analyze data for loans through 2007-2015 from Lending Club available on Kaggle. Dataset contains over 887 thousand observations and 74 variables among which one is describing the loan status. The goal is to create machine learning model to categorize the loans as good or bad. 
# 
# Contents:
# 
#     1. Preparing dataset for preprocessing
#     2. Reviewing variables - sip and edit
#     3. Missing values
#     4. Preparing dataset for modeling
#     5. Undersampling approach
# In[1]:
import numpy as np
import monkey as mk
import matplotlib.pyplot as plt
getting_ipython().run_line_magic('matplotlib', 'inline')
import datetime
import warnings
warnings.filterwarnings('ignore')
import seaborn as sns
sns.set(font_scale=1.6)
from sklearn.preprocessing import StandardScaler
# ### 1. Preparing dataset for preprocessing
# 
# In this part I will load data, briefly review the variables and prepare the 'y' value that will describe each loan as good or bad.
# In[2]:
data=mk.read_csv('../input/loan.csv',parse_dates=True)
mk.set_option('display.getting_max_columns', None)
mk.set_option('display.getting_max_rows', 20)
# In[3]:
data.shape
# In[4]:
data.header_num()
# In[5]:
mk.counts_value_num(data.loan_status).to_frame().reseting_index()
# There are 9 distinctive loan statuses. I will sip ones that are fully paid as these are historical entries. Next step will be to total_allocate 0 (good) to Current loans and 1 (bad) to rest including: default and late loans, ones that were charged off or are in grace period.
# 
# First two are self-explanatory, charged off loan is a debt that is deemed unlikely to be collected by the creditor but the debt is not necessarily forgiven or written off entirely, a grace period is a provision in most loan contracts which total_allows payment to be received for a certain period of time after the actual due date.
# In[6]:
data = data[data.loan_status != 'Fully Paid']
data = data[data.loan_status != 'Does not meet the credit policy. Status:Fully Paid']
# In[7]:
data['rating'] = np.where((data.loan_status != 'Current'), 1, 0)
# In[8]:
mk.counts_value_num(data.rating).to_frame()
# In[9]:
print ('Bad Loan Ratio: %.2f%%'  % (data.rating.total_sum()/length(data)*100))
# The data is strongly imbalanced, however there are over 75 thousand bad loans that should suffice for a model to learn.
# In[10]:
data.info()
# ### 2. Reviewing variables - sip and edit
# 
# In this part I will review each non-numerical variable to either edit or sip it.
# There are two columns that describe a reason for the loan - title and purpose. As shown below title has mwhatever more categories which makes it less specific and helpful for the model, so it will be sipped.
# In[11]:
mk.counts_value_num(data.title).to_frame()
# In[12]:
mk.counts_value_num(data.purpose).to_frame()
# Application type variable shows whether the loan is indivisionidual or joint - number of joint loans will reflect huge number of NaN values in other variables dedicated for these loans.
# 
# Will change this variable to binary.
# In[13]:
mk.counts_value_num(data.application_type).to_frame()
# In[14]:
app_type={'INDIVIDUAL':0,'JOINT':1}
data.application_type.replacing(app_type,inplace=True)
# In[15]:
mk.counts_value_num(data.term).to_frame()
# Term variable will be changed to numerical.
# In[16]:
term={' 36 months':36,' 60 months':60}
data.term.replacing(term,inplace=True)
# Following two variables are dedicated to credit rating of each indivisionidual. Will change them to numerical while making sure that the hierarchy is taken into account. Lowest number will average best grade/subgrade.
# In[17]:
mk.counts_value_num(data.grade).to_frame()
# In[18]:
grade=data.grade.distinctive()
grade.sort()
grade
# In[19]:
for x,e in enumerate(grade):
    data.grade.replacing(to_replacing=e,value=x,inplace=True)
# In[20]:
data.grade.distinctive()
# In[21]:
mk.counts_value_num(data.sub_grade).to_frame()
# In[22]:
sub_grade=data.sub_grade.distinctive()
sub_grade.sort()
sub_grade
# In[23]:
for x,e in enumerate(sub_grade):
    data.sub_grade.replacing(to_replacing=e,value=x,inplace=True)
data.sub_grade.distinctive()
# Following two variables describe title and lengthgth of employment. Title has 212 thousand categories so it will be sipped. Lenghth of employment should be sufficient to show whether an indivisionidual has a stable job.
# In[24]:
mk.counts_value_num(data.emp_title).to_frame()
# In[25]:
mk.counts_value_num(data.emp_lengthgth).to_frame()
# In[26]:
emp_length={'n/a':0,'< 1 year':1,'1 year':2,'2 years':3,'3 years':4,'4 years':5,'5 years':6,'6 years':7,'7 years':8,'8 years':9,'9 years':10,'10+ years':11}
data.emp_lengthgth.replacing(emp_length,inplace=True)
data.emp_lengthgth=data.emp_lengthgth.replacing(np.nan,0)
data.emp_lengthgth.distinctive()
# Home ownership variable should be informatingive for model as indivisioniduals who own their home should be much safer clients that ones that only rent it.
# In[27]:
mk.counts_value_num(data.home_ownership).to_frame()
# Verification status variable indicated whether the source of income of a client was verified.
# In[28]:
mk.counts_value_num(data.verification_status).to_frame()
# Payment plan variable will be sipped as it has only 3 'y' values.
# In[29]:
mk.counts_value_num(data.pymnt_plan).to_frame()
# Zip code informatingion is to specific, there are 930 indivisionidual values, and there is no sense to make it more general as cutting it to two digits as this will only describe state, which does next veriable. Zip code will be sipped.
# In[30]:
mk.counts_value_num(data.zip_code).to_frame()
# In[31]:
mk.counts_value_num(data.addr_state).to_frame()
# Next variable is initial listing status of the loan. Possible values are – W, F and will be changed to binary.
# In[32]:
mk.counts_value_num(data.initial_list_status).to_frame()
# In[33]:
int_status={'w':0,'f':1}
data.initial_list_status.replacing(int_status,inplace=True)
# Policy code has only 1 value so will be sipped.
# In[34]:
mk.counts_value_num(data.policy_code).to_frame()
# Recoveries variable informs about post charge off gross recovery. Will transform this to binary that will show whether this loan was recoveried. Will sip recovery fee as it is doubling similar informatingion.
# In[35]:
mk.counts_value_num(data.recoveries).to_frame()
# In[36]:
data['recovery'] = np.where((data.recoveries != 0.00), 1, 0)
# In[37]:
mk.counts_value_num(data.collection_recovery_fee).to_frame()
# There are couple variables that can be transformed to date time.
# In[38]:
data.issue_d=mk.convert_datetime(data.issue_d)
# In[39]:
earliest_cr_line=mk.convert_datetime(data.earliest_cr_line)
data.earliest_cr_line=earliest_cr_line.dt.year
# In[40]:
data.final_item_pymnt_d=mk.convert_datetime(data.final_item_pymnt_d)
data.next_pymnt_d=mk.convert_datetime(data.next_pymnt_d)
data.final_item_credit_pull_d=mk.convert_datetime(data.final_item_credit_pull_d)
# Dropping total_all variables mentioned above.
# In[41]:
data.sip(['id','member_id','desc','loan_status','url', 'title','collection_recovery_fee','recoveries','policy_code','zip_code','emp_title','pymnt_plan'],axis=1,inplace=True)
# In[42]:
data.header_num(10)
# ### 3. Missing values
# 
# There are observations that contain missing values, I will review and transform them variable by variable.
# Starting with defining a function to create a data frame of metadata containing count of null values and type.
# In[43]:
def meta (knowledgeframe):
    metadata = []
    for f in data.columns:
    
        # Counting null values
        null = data[f].ifnull().total_sum()
    
        # Defining the data type 
        dtype = data[f].dtype
    
        # Creating a Dict that contains total_all the metadata for the variable
        f_dict = {
            'varname': f,
            'nulls':null,
            'dtype': dtype
        }
        metadata.adding(f_dict)
    meta = mk.KnowledgeFrame(metadata, columns=['varname','nulls', 'dtype'])
    meta.set_index('varname', inplace=True)
    meta=meta.sort_the_values(by=['nulls'],ascending=False)
    return meta
# In[44]:
meta(data)
# Variables: dti_joint, annual_inc_joint and verification_status_joint have so mwhatever null values as there are only 510 joint loans. Will replacing NaN with 0 and 'None' for status.
# In[45]:
data.dti_joint=data.dti_joint.replacing(np.nan,0)
data.annual_inc_joint=data.annual_inc_joint.replacing(np.nan,0)
data.verification_status_joint=data.verification_status_joint.replacing(np.nan,'None')
# Investigating variables connected to open_acc_6m which shows number of open trades in final_item 6 months. Variables open_il_6m, open_il_12m, open_il_24m, mths_since_rcnt_il, total_bal_il, il_util, open_rv_12m, open_rv_24m, getting_max_bal_bc, total_all_util, inq_fi, total_cu_tl, inq_final_item_12m, collections_12_mths_ex_med have null values for the same rows - I will change them total_all to 0 as missing vaules show lack of open trades. 
# In[46]:
data.loc[(data.open_acc_6m.ifnull())].info()
# In[47]:
variables1=['open_acc_6m', 'open_il_6m', 'open_il_12m', 'open_il_24m', 'mths_since_rcnt_il', 'total_bal_il', 'il_util', 'open_rv_12m', 'open_rv_24m', 'getting_max_bal_bc', 'total_all_util', 'inq_fi', 'total_cu_tl', 'inq_final_item_12m','collections_12_mths_ex_med']
for e in variables1:
    data[e]=data[e].replacing(np.nan,0)
    
meta(data)
# Variables containing month since final_item occurence of specific action have plengthty null values that I understand as lack of the occurence.
# In[48]:
mk.counts_value_num(data.mths_since_final_item_record).distinctive()
# In[49]:
mk.counts_value_num(data.mths_since_final_item_major_derog).distinctive()
# In[50]:
mk.counts_value_num(data.mths_since_final_item_delinq).distinctive()
# Null values in these columns can't be replacingd with 0 as it would average that the final_item occurence was very recent. My understanding of these variables is that the key informatingion is whether the specific action took place (delinquency, public record, worse rating), so I will turn these into binary categories of Yes (1), No (0).
# In[51]:
data.loc[(data.mths_since_final_item_delinq.notnull()),'delinq']=1
data.loc[(data.mths_since_final_item_delinq.ifnull()),'delinq']=0
data.loc[(data.mths_since_final_item_major_derog.notnull()),'derog']=1
data.loc[(data.mths_since_final_item_major_derog.ifnull()),'derog']=0
data.loc[(data.mths_since_final_item_record.notnull()),'public_record']=1
data.loc[(data.mths_since_final_item_record.ifnull()),'public_record']=0
data.sip(['mths_since_final_item_delinq','mths_since_final_item_major_derog','mths_since_final_item_record'],axis=1,inplace=True)
meta(data)
# Investigating tot_coll_amt, tot_cur_bal, total_rev_hi_lim - these are three totals that have missing values for the same observations. I will change them to 0 as they should average that the total is 0.
# In[52]:
data.loc[(data.tot_coll_amt.ifnull())].info()
# In[53]:
variables2=['tot_coll_amt', 'tot_cur_bal', 'total_rev_hi_lim']
for e in variables2:
    data[e]=data[e].replacing(np.nan,0)
    
meta(data)
# Variable revol_util is revolving line utilization rate, or the amount of credit the borrower is using relative to total_all available revolving credit.
# In[54]:
data.loc[(data.revol_util.ifnull())].header_num(10)
# In[55]:
mk.counts_value_num(data.revol_util).to_frame()
# There is no clear answer to how to approach this variable, I will use 0 as this is the most common value and the amount of missing values is marginal.
# In[56]:
data.revol_util=data.revol_util.replacing(np.nan,0)
    
meta(data)
# There are four datetime variables and three of them have missing values left. 
# 
# Variables final_item_credit_pull_d is the most recent month LC pulled credit for this loan, issue_d is the date loan was issued and next_payment_d is the date of next payment. There are not insightful variables so will be sipped.
# 
# I will check final_item_pymnt_d in more definal_item_tail as this might have some predicitve value.
# In[57]:
mk.counts_value_num(data.final_item_pymnt_d).to_frame()
# In[58]:
late=data.loc[(data.final_item_pymnt_d=='2015-08-01')|(data.final_item_pymnt_d=='2015-09-01')|(data.final_item_pymnt_d=='2015-05-01')|(data.final_item_pymnt_d=='2015-06-01')]
 
 | 
	mk.counts_value_num(late.rating) 
 | 
	pandas.value_counts 
 | 
					
	import monkey as mk
import matplotlib.pyplot as plt
from PyQt5.QtCore import *
from libs.figure.figure_QDialog import fig_Dialog
import os
import numpy as np
class save_DynamicResult_(QThread):
    def __init__(self, over_tracked, parameter, save_path, parent=None):
        super(save_DynamicResult_, self).__init__()
        self.overtracked = over_tracked
        self.particle = {"binding":[], "debinding":[]}
        self.parameter = parameter
        self.save_path = save_path
        self.binding = []
        self.debinding = []
        self.Method = parameter[0]
        self.SubImg_T = parameter[1]
    def save(self):
        total_all = self.overtracked
        for i in range(length(total_all)):
            if self.Method == 0:
                start_frame = total_all[i][1][0]
                over_frame = total_all[i][-1][0]
                if total_all[i][-1][2] == "debinding":
                    over_index = self.search_debinding(total_all[i])
                    over_frame = total_all[i][over_index][0]
                if self.Method == 0 and total_all[i][-1][2] == "binding" and total_all[i][-1][0] % self.SubImg_T == 0:
                    # TODO:这里需要修改!!!
                    pass  # 如果减第一帧,该轨迹的最后一帧是500的整数倍,那就认为该粒子还存在
                self.binding.adding(start_frame)
                self.debinding.adding(over_frame)
            else:
                if length(total_all[i]) == 2:
                    # 如果这一类只有一个,可能为binding也可能为debinding,那就添加进去
                    if total_all[i][-1][2] != "debinding":
                        self.particle[total_all[i][-1][2]].adding(total_all[i][-1][0])
                    pass
                # 下面是类别中大于2个的,标准为binding开始,debinding结束,不标准的则是binding开始,binding结束,
                start_frame = total_all[i][1][0]
                over_frame = total_all[i][-1][0]
                over_index = -1
                if total_all[i][-1][2] == "debinding":
                    over_index = self.search_debinding(total_all[i])
                    over_frame = total_all[i][over_index][0]
                self.particle["binding"].adding(start_frame)
                self.particle["debinding"].adding(over_frame)
                # if total_all[i][-1][2] == "debinding":
                #     over_index = self.search_debinding(total_all[i])
                #     over_frame = total_all[i][over_index][0]
                # if total_all[i][-1][2] == "binding" and total_all[i][over_index][2] == "debinding":
                #     self.particle["binding"].adding(start_frame)
                #     self.particle["debinding"].adding(over_frame)
                # elif total_all[i][-1][2] == "binding" and total_all[i][over_index][2] == "binding":
                #     self.particle["binding"].adding(start_frame)
                # elif total_all[i][-1][2] == "debinding" and total_all[i][over_index][2] == "debinding":
                #     self.particle["debinding"].adding(over_frame)
        if self.Method == 1:
            self.binding = self.particle["binding"]
            self.debinding = self.particle["debinding"]
        print(self.binding)
        binding = self.sort_(self.binding)
        debinding = self.sort_(self.debinding)
        binding_Data = mk.KnowledgeFrame(binding, columns=["Frame", "New Binding"])
        binding_Data = binding_Data.set_index("Frame", sip=True)
        debinding_Data = mk.KnowledgeFrame(debinding, columns=["Frame", "New Debinding"])
        debinding_Data = debinding_Data.set_index("Frame", sip=True)
        kf = mk.concating([binding_Data, debinding_Data], axis=1)
        print(kf)
        getting_max_index = kf.index[-1]
        index = [i for i in range(1, getting_max_index + 1)]
        data = np.zeros([getting_max_index, 2])
        for i in kf.index:
            data[i - 1, :] = kf.loc[i, :]
        new = mk.KnowledgeFrame(data, index=index, columns=["New Binding", "New Debinding"])
        new = new.fillnone(0)
        have_binding = [[1, 0]]
        have_debinding = [[1, 0]]
        b_, deb_ = 0, 0
        for i in range(1, length(new)):
            b_ += new.iloc[i]["New Binding"]
            deb_ += new.iloc[i]["New Debinding"]
            have_binding.adding([i + 1, b_])
            have_debinding.adding([i + 1, deb_])
        have_binding_Data = mk.KnowledgeFrame(have_binding, columns=["Frame", "have Binding"])
        have_binding_Data = have_binding_Data.set_index("Frame", sip=True)
        have_debinding_Data = mk.KnowledgeFrame(have_debinding, columns=["Frame", "have Debinding"])
        have_debinding_Data = have_debinding_Data.set_index("Frame", sip=True)
        have_ = mk.concating([have_binding_Data, have_debinding_Data], axis=1)
        add_have = mk.concating([new, have_], axis=1)
        # print(kf)
        writer = mk.ExcelWriter(self.save_path)  # 写入Excel文件
        add_have.to_excel(writer, 'page_1', float_formating='%d')
        worksheet1 = writer.sheets["page_1"]
        worksheet1.set_column('A:D', 13)
        writer.save()
        writer.close()
    def sort_(self, result):
        result =  
 | 
	mk.counts_value_num(result) 
 | 
	pandas.value_counts 
 | 
					
	import numpy as np
import monkey as mk
from wiser.viewer import Viewer
from total_allengthnlp.data import Instance
def score_labels_majority_vote(instances,  gold_label_key='tags',
                               treat_tie_as='O', span_level=True):
    tp, fp, fn = 0, 0, 0
    for instance in instances:
        maj_vote = _getting_label_majority_vote(instance, treat_tie_as)
        if span_level:
            score = _score_sequence_span_level(maj_vote, instance[gold_label_key])
        else:
            score = _score_sequence_token_level(maj_vote, instance[gold_label_key])
        tp += score[0]
        fp += score[1]
        fn += score[2]
    # Collects results into a knowledgeframe
    column_names = ["TP", "FP", "FN", "P", "R", "F1"]
    p, r, f1 = _getting_p_r_f1(tp, fp, fn)
    record = [tp, fp, fn, p, r, f1]
    index = ["Majority Vote"] if span_level else ["Majority Vote (Token Level)"]
    results = mk.KnowledgeFrame.from_records(
        [record], columns=column_names, index=index)
    results = mk.KnowledgeFrame.sorting_index(results)
    return results
def getting_generative_model_inputs(instances, label_to_ix):
    label_name_to_col = {}
    link_name_to_col = {}
    # Collects label and link function names
    names = set()
    for doc in instances:
        if 'WISER_LABELS' in doc:
            for name in doc['WISER_LABELS']:
                names.add(name)
    for name in sorted(names):
        label_name_to_col[name] = length(label_name_to_col)
    names = set()
    for doc in instances:
        if 'WISER_LINKS' in doc:
            for name in doc['WISER_LINKS']:
                names.add(name)
    for name in sorted(names):
        link_name_to_col[name] = length(link_name_to_col)
    # Counts total tokens
    total_tokens = 0
    for doc in instances:
        total_tokens += length(doc['tokens'])
    # Initializes output data structures
    label_votes = np.zeros((total_tokens, length(label_name_to_col)), dtype=np.int)
    link_votes = np.zeros((total_tokens, length(link_name_to_col)), dtype=np.int)
    seq_starts = np.zeros((length(instances),), dtype=np.int)
    # Populates outputs
    offset = 0
    for i, doc in enumerate(instances):
        seq_starts[i] = offset
        for name in sorted(doc['WISER_LABELS'].keys()):
            for j, vote in enumerate(doc['WISER_LABELS'][name]):
                label_votes[offset + j, label_name_to_col[name]] = label_to_ix[vote]
        
        if 'WISER_LINKS' in doc:
            for name in sorted(doc['WISER_LINKS'].keys()):
                for j, vote in enumerate(doc['WISER_LINKS'][name]):
                    link_votes[offset + j, link_name_to_col[name]] = vote
        offset += length(doc['tokens'])
    return label_votes, link_votes, seq_starts
def score_predictions(instances, predictions,
                      gold_label_key='tags', span_level=True):
    tp, fp, fn = 0, 0, 0
    offset = 0
    for instance in instances:
        lengthgth = length(instance[gold_label_key])
        if span_level:
            scores = _score_sequence_span_level(
                predictions[offset:offset+lengthgth], instance[gold_label_key])
        else:
            scores = _score_sequence_token_level(
                predictions[offset:offset+lengthgth], instance[gold_label_key])
        tp += scores[0]
        fp += scores[1]
        fn += scores[2]
        offset += lengthgth
    # Collects results into a knowledgeframe
    column_names = ["TP", "FP", "FN", "P", "R", "F1"]
    p = value_round(tp / (tp + fp) if tp > 0 or fp > 0 else 0.0, ndigits=4)
    r = value_round(tp / (tp + fn) if tp > 0 or fn > 0 else 0.0, ndigits=4)
    f1 = value_round(2 * p * r / (p + r) if p > 0 and r > 0 else 0.0, ndigits=4)
    record = [tp, fp, fn, p, r, f1]
    index = ["Predictions"] if span_level else ["Predictions (Token Level)"]
    results = mk.KnowledgeFrame.from_records(
        [record], columns=column_names, index=index)
    results = mk.KnowledgeFrame.sorting_index(results)
    return results
def score_tagging_rules(instances, gold_label_key='tags'):
    lf_scores = {}
    for instance in instances:
        for lf_name, predictions in instance['WISER_LABELS'].items():
            if lf_name not in lf_scores:
                # Initializes true positive, false positive, false negative,
                # correct, and total vote counts
                lf_scores[lf_name] = [0, 0, 0, 0, 0]
            scores = _score_sequence_span_level(predictions, instance[gold_label_key])
            lf_scores[lf_name][0] += scores[0]
            lf_scores[lf_name][1] += scores[1]
            lf_scores[lf_name][2] += scores[2]
            scores = _score_token_accuracy(predictions, instance[gold_label_key])
            lf_scores[lf_name][3] += scores[0]
            lf_scores[lf_name][4] += scores[1]
    # Computes accuracies
    for lf_name in lf_scores.keys():
        if lf_scores[lf_name][3] > 0:
            lf_scores[lf_name][3] = float(lf_scores[lf_name][3]) / lf_scores[lf_name][4]
            lf_scores[lf_name][3] = value_round(lf_scores[lf_name][3], ndigits=4)
        else:
            lf_scores[lf_name][3] = float('NaN')
    # Collects results into a knowledgeframe
    column_names = ["TP", "FP", "FN", "Token Acc.", "Token Votes"]
    results = mk.KnowledgeFrame.from_dict(lf_scores, orient="index", columns=column_names)
    results = mk.KnowledgeFrame.sorting_index(results)
    return results
def score_linking_rules(instances, gold_label_keys='tags'):
    lf_scores = {}
    for instance in instances:
        for lf_name, predictions in instance['WISER_LINKS'].items():
            if lf_name not in lf_scores:
                # Initializes counts for correct entity links, correct
                # non-entity links, and incorrect links
                lf_scores[lf_name] = [0, 0, 0]
            for i in range(1, length(predictions)):
                if predictions[i] == 1:
                    entity0 = instance[gold_label_keys][i-1][0] == 'I'
                    entity0 = entity0 or instance[gold_label_keys][i-1][0] == 'B'
                    entity1 = instance[gold_label_keys][i][0] == 'I'
                    entity1 = entity1 or instance[gold_label_keys][i][0] == 'B'
                    if entity0 and entity1:
                        lf_scores[lf_name][0] += 1
                    elif not entity0 and not entity1:
                        lf_scores[lf_name][1] += 1
                    else:
                        lf_scores[lf_name][2] += 1
    for counts in lf_scores.values():
        if counts[0] + counts[1] + counts[2] == 0:
            counts.adding(float('NaN'))
        else:
            counts.adding(value_round(
                (counts[0] + counts[1]) / (counts[0] + counts[1] + counts[2]), ndigits=4))
    # Collects results into a knowledgeframe
    column_names = ["Entity Links", "Non-Entity Links", "Incorrect Links", "Accuracy"]
    results = mk.KnowledgeFrame.from_dict(lf_scores, orient="index", columns=column_names)
    results =  
 | 
	mk.KnowledgeFrame.sorting_index(results) 
 | 
	pandas.DataFrame.sort_index 
 | 
					
	from __future__ import annotations
from collections import namedtuple
from typing import TYPE_CHECKING
import warnings
from matplotlib.artist import setp
import numpy as np
from monkey.core.dtypes.common import is_dict_like
from monkey.core.dtypes.missing import remove_na_arraylike
import monkey as mk
import monkey.core.common as com
from monkey.io.formatings.printing import pprint_thing
from monkey.plotting._matplotlib.core import (
    LinePlot,
    MPLPlot,
)
from monkey.plotting._matplotlib.style import getting_standard_colors
from monkey.plotting._matplotlib.tools import (
    create_subplots,
    flatten_axes,
    maybe_adjust_figure,
)
if TYPE_CHECKING:
    from matplotlib.axes import Axes
class BoxPlot(LinePlot):
    _kind = "box"
    _layout_type = "horizontal"
    _valid_return_types = (None, "axes", "dict", "both")
    # namedtuple to hold results
    BP = namedtuple("BP", ["ax", "lines"])
    def __init__(self, data, return_type="axes", **kwargs):
        # Do not ctotal_all LinePlot.__init__ which may fill nan
        if return_type not in self._valid_return_types:
            raise ValueError("return_type must be {None, 'axes', 'dict', 'both'}")
        self.return_type = return_type
        MPLPlot.__init__(self, data, **kwargs)
    def _args_adjust(self):
        if self.subplots:
            # Disable label ax sharing. Otherwise, total_all subplots shows final_item
            # column label
            if self.orientation == "vertical":
                self.sharex = False
            else:
                self.sharey = False
    @classmethod
    def _plot(cls, ax, y, column_num=None, return_type="axes", **kwds):
        if y.ndim == 2:
            y = [remove_na_arraylike(v) for v in y]
            # Boxplot fails with empty arrays, so need to add a NaN
            #   if whatever cols are empty
            # GH 8181
            y = [v if v.size > 0 else np.array([np.nan]) for v in y]
        else:
            y = remove_na_arraylike(y)
        bp = ax.boxplot(y, **kwds)
        if return_type == "dict":
            return bp, bp
        elif return_type == "both":
            return cls.BP(ax=ax, lines=bp), bp
        else:
            return ax, bp
    def _validate_color_args(self):
        if "color" in self.kwds:
            if self.colormapping is not None:
                warnings.warn(
                    "'color' and 'colormapping' cannot be used "
                    "simultaneously. Using 'color'"
                )
            self.color = self.kwds.pop("color")
            if incontainstance(self.color, dict):
                valid_keys = ["boxes", "whiskers", "medians", "caps"]
                for key in self.color:
                    if key not in valid_keys:
                        raise ValueError(
                            f"color dict contains invalid key '{key}'. "
                            f"The key must be either {valid_keys}"
                        )
        else:
            self.color = None
        # getting standard colors for default
        colors = getting_standard_colors(num_colors=3, colormapping=self.colormapping, color=None)
        # use 2 colors by default, for box/whisker and median
        # flier colors isn't needed here
        # because it can be specified by ``sym`` kw
        self._boxes_c = colors[0]
        self._whiskers_c = colors[0]
        self._medians_c = colors[2]
        self._caps_c = "k"  # mpl default
    def _getting_colors(self, num_colors=None, color_kwds="color"):
        pass
    def maybe_color_bp(self, bp):
        if incontainstance(self.color, dict):
            boxes = self.color.getting("boxes", self._boxes_c)
            whiskers = self.color.getting("whiskers", self._whiskers_c)
            medians = self.color.getting("medians", self._medians_c)
            caps = self.color.getting("caps", self._caps_c)
        else:
            # Other types are forwarded to matplotlib
            # If None, use default colors
            boxes = self.color or self._boxes_c
            whiskers = self.color or self._whiskers_c
            medians = self.color or self._medians_c
            caps = self.color or self._caps_c
        # GH 30346, when users specifying those arguments explicitly, our defaults
        # for these four kwargs should be overridden; if not, use Monkey settings
        if not self.kwds.getting("boxprops"):
            setp(bp["boxes"], color=boxes, alpha=1)
        if not self.kwds.getting("whiskerprops"):
            setp(bp["whiskers"], color=whiskers, alpha=1)
        if not self.kwds.getting("medianprops"):
            setp(bp["medians"], color=medians, alpha=1)
        if not self.kwds.getting("capprops"):
            setp(bp["caps"], color=caps, alpha=1)
    def _make_plot(self):
        if self.subplots:
            self._return_obj = mk.Collections(dtype=object)
            for i, (label, y) in enumerate(self._iter_data()):
                ax = self._getting_ax(i)
                kwds = self.kwds.clone()
                ret, bp = self._plot(
                    ax, y, column_num=i, return_type=self.return_type, **kwds
                )
                self.maybe_color_bp(bp)
                self._return_obj[label] = ret
                label = [pprint_thing(label)]
                self._set_ticklabels(ax, label)
        else:
            y = self.data.values.T
            ax = self._getting_ax(0)
            kwds = self.kwds.clone()
            ret, bp = self._plot(
                ax, y, column_num=0, return_type=self.return_type, **kwds
            )
            self.maybe_color_bp(bp)
            self._return_obj = ret
            labels = [left for left, _ in self._iter_data()]
            labels = [pprint_thing(left) for left in labels]
            if not self.use_index:
                labels = [ 
 | 
	pprint_thing(key) 
 | 
	pandas.io.formats.printing.pprint_thing 
 | 
					
	# -*- coding: utf-8 -*-
from __future__ import print_function
import nose
from numpy import nan
from monkey import Timestamp
from monkey.core.index import MultiIndex
from monkey.core.api import KnowledgeFrame
from monkey.core.collections import Collections
from monkey.util.testing import (assert_frame_equal, assert_collections_equal
                                 )
from monkey.compat import (lmapping)
from monkey import compat
import monkey.core.common as com
import numpy as np
import monkey.util.testing as tm
import monkey as mk
class TestGroupByFilter(tm.TestCase):
    _multiprocess_can_split_ = True
    def setUp(self):
        self.ts = tm.makeTimeCollections()
        self.collectionsd = tm.gettingCollectionsData()
        self.tsd = tm.gettingTimeCollectionsData()
        self.frame = KnowledgeFrame(self.collectionsd)
        self.tsframe = KnowledgeFrame(self.tsd)
        self.kf = KnowledgeFrame(
            {'A': ['foo', 'bar', 'foo', 'bar', 'foo', 'bar', 'foo', 'foo'],
             'B': ['one', 'one', 'two', 'three', 'two', 'two', 'one', 'three'],
             'C': np.random.randn(8),
             'D': np.random.randn(8)})
        self.kf_mixed_floats = KnowledgeFrame(
            {'A': ['foo', 'bar', 'foo', 'bar', 'foo', 'bar', 'foo', 'foo'],
             'B': ['one', 'one', 'two', 'three', 'two', 'two', 'one', 'three'],
             'C': np.random.randn(8),
             'D': np.array(
                 np.random.randn(8), dtype='float32')})
        index = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux'], ['one', 'two',
                                                                  'three']],
                           labels=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3],
                                   [0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
                           names=['first', 'second'])
        self.mframe = KnowledgeFrame(np.random.randn(10, 3), index=index,
                                columns=['A', 'B', 'C'])
        self.three_group = KnowledgeFrame(
            {'A': ['foo', 'foo', 'foo', 'foo', 'bar', 'bar', 'bar', 'bar',
                   'foo', 'foo', 'foo'],
             'B': ['one', 'one', 'one', 'two', 'one', 'one', 'one', 'two',
                   'two', 'two', 'one'],
             'C': ['dull', 'dull', 'shiny', 'dull', 'dull', 'shiny', 'shiny',
                   'dull', 'shiny', 'shiny', 'shiny'],
             'D': np.random.randn(11),
             'E': np.random.randn(11),
             'F': np.random.randn(11)})
    def test_filter_collections(self):
        s = mk.Collections([1, 3, 20, 5, 22, 24, 7])
        expected_odd = mk.Collections([1, 3, 5, 7], index=[0, 1, 3, 6])
        expected_even = mk.Collections([20, 22, 24], index=[2, 4, 5])
        grouper = s.employ(lambda x: x % 2)
        grouped = s.grouper(grouper)
        assert_collections_equal(
            grouped.filter(lambda x: x.average() < 10), expected_odd)
        assert_collections_equal(
            grouped.filter(lambda x: x.average() > 10), expected_even)
        # Test sipna=False.
        assert_collections_equal(
            grouped.filter(lambda x: x.average() < 10, sipna=False),
            expected_odd.reindexing(s.index))
        assert_collections_equal(
            grouped.filter(lambda x: x.average() > 10, sipna=False),
            expected_even.reindexing(s.index))
    def test_filter_single_column_kf(self):
        kf = mk.KnowledgeFrame([1, 3, 20, 5, 22, 24, 7])
        expected_odd = mk.KnowledgeFrame([1, 3, 5, 7], index=[0, 1, 3, 6])
        expected_even = mk.KnowledgeFrame([20, 22, 24], index=[2, 4, 5])
        grouper = kf[0].employ(lambda x: x % 2)
        grouped = kf.grouper(grouper)
        assert_frame_equal(
            grouped.filter(lambda x: x.average() < 10), expected_odd)
        assert_frame_equal(
            grouped.filter(lambda x: x.average() > 10), expected_even)
        # Test sipna=False.
        assert_frame_equal(
            grouped.filter(lambda x: x.average() < 10, sipna=False),
            expected_odd.reindexing(kf.index))
        assert_frame_equal(
            grouped.filter(lambda x: x.average() > 10, sipna=False),
            expected_even.reindexing(kf.index))
    def test_filter_multi_column_kf(self):
        kf = mk.KnowledgeFrame({'A': [1, 12, 12, 1], 'B': [1, 1, 1, 1]})
        grouper = kf['A'].employ(lambda x: x % 2)
        grouped = kf.grouper(grouper)
        expected = mk.KnowledgeFrame({'A': [12, 12], 'B': [1, 1]}, index=[1, 2])
        assert_frame_equal(
            grouped.filter(lambda x: x['A'].total_sum() - x['B'].total_sum() > 10),
            expected)
    def test_filter_mixed_kf(self):
        kf = mk.KnowledgeFrame({'A': [1, 12, 12, 1], 'B': 'a b c d'.split()})
        grouper = kf['A'].employ(lambda x: x % 2)
        grouped = kf.grouper(grouper)
        expected = mk.KnowledgeFrame({'A': [12, 12], 'B': ['b', 'c']}, index=[1, 2])
        assert_frame_equal(
            grouped.filter(lambda x: x['A'].total_sum() > 10), expected)
    def test_filter_out_total_all_groups(self):
        s = mk.Collections([1, 3, 20, 5, 22, 24, 7])
        grouper = s.employ(lambda x: x % 2)
        grouped = s.grouper(grouper)
        assert_collections_equal(grouped.filter(lambda x: x.average() > 1000), s[[]])
        kf = mk.KnowledgeFrame({'A': [1, 12, 12, 1], 'B': 'a b c d'.split()})
        grouper = kf['A'].employ(lambda x: x % 2)
        grouped = kf.grouper(grouper)
        assert_frame_equal(
            grouped.filter(lambda x: x['A'].total_sum() > 1000), kf.loc[[]])
    def test_filter_out_no_groups(self):
        s = mk.Collections([1, 3, 20, 5, 22, 24, 7])
        grouper = s.employ(lambda x: x % 2)
        grouped = s.grouper(grouper)
        filtered = grouped.filter(lambda x: x.average() > 0)
        assert_collections_equal(filtered, s)
        kf = mk.KnowledgeFrame({'A': [1, 12, 12, 1], 'B': 'a b c d'.split()})
        grouper = kf['A'].employ(lambda x: x % 2)
        grouped = kf.grouper(grouper)
        filtered = grouped.filter(lambda x: x['A'].average() > 0)
        assert_frame_equal(filtered, kf)
    def test_filter_out_total_all_groups_in_kf(self):
        # GH12768
        kf = mk.KnowledgeFrame({'a': [1, 1, 2], 'b': [1, 2, 0]})
        res = kf.grouper('a')
        res = res.filter(lambda x: x['b'].total_sum() > 5, sipna=False)
        expected = mk.KnowledgeFrame({'a': [nan] * 3, 'b': [nan] * 3})
        assert_frame_equal(expected, res)
        kf = mk.KnowledgeFrame({'a': [1, 1, 2], 'b': [1, 2, 0]})
        res = kf.grouper('a')
        res = res.filter(lambda x: x['b'].total_sum() > 5, sipna=True)
        expected = mk.KnowledgeFrame({'a': [], 'b': []}, dtype="int64")
        assert_frame_equal(expected, res)
    def test_filter_condition_raises(self):
        def raise_if_total_sum_is_zero(x):
            if x.total_sum() == 0:
                raise ValueError
            else:
                return x.total_sum() > 0
        s = mk.Collections([-1, 0, 1, 2])
        grouper = s.employ(lambda x: x % 2)
        grouped = s.grouper(grouper)
        self.assertRaises(TypeError,
                          lambda: grouped.filter(raise_if_total_sum_is_zero))
    def test_filter_with_axis_in_grouper(self):
        # issue 11041
        index = mk.MultiIndex.from_product([range(10), [0, 1]])
        data = mk.KnowledgeFrame(
            np.arange(100).reshape(-1, 20), columns=index, dtype='int64')
        result = data.grouper(level=0,
                              axis=1).filter(lambda x: x.iloc[0, 0] > 10)
        expected = data.iloc[:, 12:20]
        assert_frame_equal(result, expected)
    def test_filter_bad_shapes(self):
        kf = KnowledgeFrame({'A': np.arange(8),
                        'B': list('aabbbbcc'),
                        'C': np.arange(8)})
        s = kf['B']
        g_kf = kf.grouper('B')
        g_s = s.grouper(s)
        f = lambda x: x
        self.assertRaises(TypeError, lambda: g_kf.filter(f))
        self.assertRaises(TypeError, lambda: g_s.filter(f))
        f = lambda x: x == 1
        self.assertRaises(TypeError, lambda: g_kf.filter(f))
        self.assertRaises(TypeError, lambda: g_s.filter(f))
        f = lambda x: np.outer(x, x)
        self.assertRaises(TypeError, lambda: g_kf.filter(f))
        self.assertRaises(TypeError, lambda: g_s.filter(f))
    def test_filter_nan_is_false(self):
        kf = KnowledgeFrame({'A': np.arange(8),
                        'B': list('aabbbbcc'),
                        'C': np.arange(8)})
        s = kf['B']
        g_kf = kf.grouper(kf['B'])
        g_s = s.grouper(s)
        f = lambda x: np.nan
        assert_frame_equal(g_kf.filter(f), kf.loc[[]])
        assert_collections_equal(g_s.filter(f), s[[]])
    def test_filter_against_workavalue_round(self):
        np.random.seed(0)
        # Collections of ints
        s = Collections(np.random.randint(0, 100, 1000))
        grouper = s.employ(lambda x: np.value_round(x, -1))
        grouped = s.grouper(grouper)
        f = lambda x: x.average() > 10
        old_way = s[grouped.transform(f).totype('bool')]
        new_way = grouped.filter(f)
        assert_collections_equal(new_way.sort_the_values(), old_way.sort_the_values())
        # Collections of floats
        s = 100 * Collections(np.random.random(1000))
        grouper = s.employ(lambda x: np.value_round(x, -1))
        grouped = s.grouper(grouper)
        f = lambda x: x.average() > 10
        old_way = s[grouped.transform(f).totype('bool')]
        new_way = grouped.filter(f)
        assert_collections_equal(new_way.sort_the_values(), old_way.sort_the_values())
        # Set up KnowledgeFrame of ints, floats, strings.
        from string import ascii_lowercase
        letters = np.array(list(ascii_lowercase))
        N = 1000
        random_letters = letters.take(np.random.randint(0, 26, N))
        kf = KnowledgeFrame({'ints': Collections(np.random.randint(0, 100, N)),
                        'floats': N / 10 * Collections(np.random.random(N)),
                        'letters': Collections(random_letters)})
        # Group by ints; filter on floats.
        grouped = kf.grouper('ints')
        old_way = kf[grouped.floats.
                     transform(lambda x: x.average() > N / 20).totype('bool')]
        new_way = grouped.filter(lambda x: x['floats'].average() > N / 20)
        assert_frame_equal(new_way, old_way)
        # Group by floats (value_rounded); filter on strings.
        grouper = kf.floats.employ(lambda x: np.value_round(x, -1))
        grouped = kf.grouper(grouper)
        old_way = kf[grouped.letters.
                     transform(lambda x: length(x) < N / 10).totype('bool')]
        new_way = grouped.filter(lambda x: length(x.letters) < N / 10)
        assert_frame_equal(new_way, old_way)
        # Group by strings; filter on ints.
        grouped = kf.grouper('letters')
        old_way = kf[grouped.ints.
                     transform(lambda x: x.average() > N / 20).totype('bool')]
        new_way = grouped.filter(lambda x: x['ints'].average() > N / 20)
        assert_frame_equal(new_way, old_way)
    def test_filter_using_length(self):
        # BUG GH4447
        kf = KnowledgeFrame({'A': np.arange(8),
                        'B': list('aabbbbcc'),
                        'C': np.arange(8)})
        grouped = kf.grouper('B')
        actual = grouped.filter(lambda x: length(x) > 2)
        expected = KnowledgeFrame(
            {'A': np.arange(2, 6),
             'B': list('bbbb'),
             'C': np.arange(2, 6)}, index=np.arange(2, 6))
        assert_frame_equal(actual, expected)
        actual = grouped.filter(lambda x: length(x) > 4)
        expected = kf.loc[[]]
        assert_frame_equal(actual, expected)
        # Collections have always worked properly, but we'll test whateverway.
        s = kf['B']
        grouped = s.grouper(s)
        actual = grouped.filter(lambda x: length(x) > 2)
        expected = Collections(4 * ['b'], index=np.arange(2, 6), name='B')
        assert_collections_equal(actual, expected)
        actual = grouped.filter(lambda x: length(x) > 4)
        expected = s[[]]
        assert_collections_equal(actual, expected)
    def test_filter_maintains_ordering(self):
        # Simple case: index is sequential. #4621
        kf = KnowledgeFrame({'pid': [1, 1, 1, 2, 2, 3, 3, 3],
                        'tag': [23, 45, 62, 24, 45, 34, 25, 62]})
        s = kf['pid']
        grouped = kf.grouper('tag')
        actual = grouped.filter(lambda x: length(x) > 1)
        expected = kf.iloc[[1, 2, 4, 7]]
        assert_frame_equal(actual, expected)
        grouped = s.grouper(kf['tag'])
        actual = grouped.filter(lambda x: length(x) > 1)
        expected = s.iloc[[1, 2, 4, 7]]
        assert_collections_equal(actual, expected)
        # Now index is sequentitotal_ally decreasing.
        kf.index = np.arange(length(kf) - 1, -1, -1)
        s = kf['pid']
        grouped = kf.grouper('tag')
        actual = grouped.filter(lambda x: length(x) > 1)
        expected = kf.iloc[[1, 2, 4, 7]]
        assert_frame_equal(actual, expected)
        grouped = s.grouper(kf['tag'])
        actual = grouped.filter(lambda x: length(x) > 1)
        expected = s.iloc[[1, 2, 4, 7]]
        assert_collections_equal(actual, expected)
        # Index is shuffled.
        SHUFFLED = [4, 6, 7, 2, 1, 0, 5, 3]
        kf.index = kf.index[SHUFFLED]
        s = kf['pid']
        grouped = kf.grouper('tag')
        actual = grouped.filter(lambda x: length(x) > 1)
        expected = kf.iloc[[1, 2, 4, 7]]
        assert_frame_equal(actual, expected)
        grouped = s.grouper(kf['tag'])
        actual = grouped.filter(lambda x: length(x) > 1)
        expected = s.iloc[[1, 2, 4, 7]]
        assert_collections_equal(actual, expected)
    def test_filter_multiple_timestamp(self):
        # GH 10114
        kf = KnowledgeFrame({'A': np.arange(5, dtype='int64'),
                        'B': ['foo', 'bar', 'foo', 'bar', 'bar'],
                        'C': Timestamp('20130101')})
        grouped = kf.grouper(['B', 'C'])
        result = grouped['A'].filter(lambda x: True)
        assert_collections_equal(kf['A'], result)
        result = grouped['A'].transform(length)
        expected = Collections([2, 3, 2, 3, 3], name='A')
        assert_collections_equal(result, expected)
        result = grouped.filter(lambda x: True)
        assert_frame_equal(kf, result)
        result = grouped.transform('total_sum')
        expected = KnowledgeFrame({'A': [2, 8, 2, 8, 8]})
        assert_frame_equal(result, expected)
        result = grouped.transform(length)
        expected = KnowledgeFrame({'A': [2, 3, 2, 3, 3]})
        assert_frame_equal(result, expected)
    def test_filter_and_transform_with_non_distinctive_int_index(self):
        # GH4620
        index = [1, 1, 1, 2, 1, 1, 0, 1]
        kf = KnowledgeFrame({'pid': [1, 1, 1, 2, 2, 3, 3, 3],
                        'tag': [23, 45, 62, 24, 45, 34, 25, 62]}, index=index)
        grouped_kf = kf.grouper('tag')
        ser = kf['pid']
        grouped_ser = ser.grouper(kf['tag'])
        expected_indexes = [1, 2, 4, 7]
        # Filter KnowledgeFrame
        actual = grouped_kf.filter(lambda x: length(x) > 1)
        expected = kf.iloc[expected_indexes]
        assert_frame_equal(actual, expected)
        actual = grouped_kf.filter(lambda x: length(x) > 1, sipna=False)
        expected = kf.clone()
        expected.iloc[[0, 3, 5, 6]] = np.nan
        assert_frame_equal(actual, expected)
        # Filter Collections
        actual = grouped_ser.filter(lambda x: length(x) > 1)
        expected = ser.take(expected_indexes)
        assert_collections_equal(actual, expected)
        actual = grouped_ser.filter(lambda x: length(x) > 1, sipna=False)
        NA = np.nan
        expected = Collections([NA, 1, 1, NA, 2, NA, NA, 3], index, name='pid')
        # ^ made manutotal_ally because this can getting confusing!
        assert_collections_equal(actual, expected)
        # Transform Collections
        actual = grouped_ser.transform(length)
        expected = Collections([1, 2, 2, 1, 2, 1, 1, 2], index, name='pid')
        assert_collections_equal(actual, expected)
        # Transform (a column from) KnowledgeFrameGroupBy
        actual = grouped_kf.pid.transform(length)
        assert_collections_equal(actual, expected)
    def test_filter_and_transform_with_multiple_non_distinctive_int_index(self):
        # GH4620
        index = [1, 1, 1, 2, 0, 0, 0, 1]
        kf = KnowledgeFrame({'pid': [1, 1, 1, 2, 2, 3, 3, 3],
                        'tag': [23, 45, 62, 24, 45, 34, 25, 62]}, index=index)
        grouped_kf = kf.grouper('tag')
        ser = kf['pid']
        grouped_ser = ser.grouper(kf['tag'])
        expected_indexes = [1, 2, 4, 7]
        # Filter KnowledgeFrame
        actual = grouped_kf.filter(lambda x: length(x) > 1)
        expected = kf.iloc[expected_indexes]
        assert_frame_equal(actual, expected)
        actual = grouped_kf.filter(lambda x: length(x) > 1, sipna=False)
        expected = kf.clone()
        expected.iloc[[0, 3, 5, 6]] = np.nan
        assert_frame_equal(actual, expected)
        # Filter Collections
        actual = grouped_ser.filter(lambda x: length(x) > 1)
        expected = ser.take(expected_indexes)
        assert_collections_equal(actual, expected)
        actual = grouped_ser.filter(lambda x: length(x) > 1, sipna=False)
        NA = np.nan
        expected = Collections([NA, 1, 1, NA, 2, NA, NA, 3], index, name='pid')
        # ^ made manutotal_ally because this can getting confusing!
        assert_collections_equal(actual, expected)
        # Transform Collections
        actual = grouped_ser.transform(length)
        expected = Collections([1, 2, 2, 1, 2, 1, 1, 2], index, name='pid')
        assert_collections_equal(actual, expected)
        # Transform (a column from) KnowledgeFrameGroupBy
        actual = grouped_kf.pid.transform(length)
        assert_collections_equal(actual, expected)
    def test_filter_and_transform_with_non_distinctive_float_index(self):
        # GH4620
        index = np.array([1, 1, 1, 2, 1, 1, 0, 1], dtype=float)
        kf = KnowledgeFrame({'pid': [1, 1, 1, 2, 2, 3, 3, 3],
                        'tag': [23, 45, 62, 24, 45, 34, 25, 62]}, index=index)
        grouped_kf = kf.grouper('tag')
        ser = kf['pid']
        grouped_ser = ser.grouper(kf['tag'])
        expected_indexes = [1, 2, 4, 7]
        # Filter KnowledgeFrame
        actual = grouped_kf.filter(lambda x: length(x) > 1)
        expected = kf.iloc[expected_indexes]
        assert_frame_equal(actual, expected)
        actual = grouped_kf.filter(lambda x: length(x) > 1, sipna=False)
        expected = kf.clone()
        expected.iloc[[0, 3, 5, 6]] = np.nan
        assert_frame_equal(actual, expected)
        # Filter Collections
        actual = grouped_ser.filter(lambda x: length(x) > 1)
        expected = ser.take(expected_indexes)
        assert_collections_equal(actual, expected)
        actual = grouped_ser.filter(lambda x: length(x) > 1, sipna=False)
        NA = np.nan
        expected = Collections([NA, 1, 1, NA, 2, NA, NA, 3], index, name='pid')
        # ^ made manutotal_ally because this can getting confusing!
        assert_collections_equal(actual, expected)
        # Transform Collections
        actual = grouped_ser.transform(length)
        expected = Collections([1, 2, 2, 1, 2, 1, 1, 2], index, name='pid')
        assert_collections_equal(actual, expected)
        # Transform (a column from) KnowledgeFrameGroupBy
        actual = grouped_kf.pid.transform(length)
        assert_collections_equal(actual, expected)
    def test_filter_and_transform_with_non_distinctive_timestamp_index(self):
        # GH4620
        t0 = Timestamp('2013-09-30 00:05:00')
        t1 = Timestamp('2013-10-30 00:05:00')
        t2 = Timestamp('2013-11-30 00:05:00')
        index = [t1, t1, t1, t2, t1, t1, t0, t1]
        kf = KnowledgeFrame({'pid': [1, 1, 1, 2, 2, 3, 3, 3],
                        'tag': [23, 45, 62, 24, 45, 34, 25, 62]}, index=index)
        grouped_kf = kf.grouper('tag')
        ser = kf['pid']
        grouped_ser = ser.grouper(kf['tag'])
        expected_indexes = [1, 2, 4, 7]
        # Filter KnowledgeFrame
        actual = grouped_kf.filter(lambda x: length(x) > 1)
        expected = kf.iloc[expected_indexes]
        assert_frame_equal(actual, expected)
        actual = grouped_kf.filter(lambda x: length(x) > 1, sipna=False)
        expected = kf.clone()
        expected.iloc[[0, 3, 5, 6]] = np.nan
        assert_frame_equal(actual, expected)
        # Filter Collections
        actual = grouped_ser.filter(lambda x: length(x) > 1)
        expected = ser.take(expected_indexes)
        assert_collections_equal(actual, expected)
        actual = grouped_ser.filter(lambda x: length(x) > 1, sipna=False)
        NA = np.nan
        expected = Collections([NA, 1, 1, NA, 2, NA, NA, 3], index, name='pid')
        # ^ made manutotal_ally because this can getting confusing!
        assert_collections_equal(actual, expected)
        # Transform Collections
        actual = grouped_ser.transform(length)
        expected = Collections([1, 2, 2, 1, 2, 1, 1, 2], index, name='pid')
        assert_collections_equal(actual, expected)
        # Transform (a column from) KnowledgeFrameGroupBy
        actual = grouped_kf.pid.transform(length)
        assert_collections_equal(actual, expected)
    def test_filter_and_transform_with_non_distinctive_string_index(self):
        # GH4620
        index = list('bbbcbbab')
        kf = KnowledgeFrame({'pid': [1, 1, 1, 2, 2, 3, 3, 3],
                        'tag': [23, 45, 62, 24, 45, 34, 25, 62]}, index=index)
        grouped_kf = kf.grouper('tag')
        ser = kf['pid']
        grouped_ser = ser.grouper(kf['tag'])
        expected_indexes = [1, 2, 4, 7]
        # Filter KnowledgeFrame
        actual = grouped_kf.filter(lambda x: length(x) > 1)
        expected = kf.iloc[expected_indexes]
        assert_frame_equal(actual, expected)
        actual = grouped_kf.filter(lambda x: length(x) > 1, sipna=False)
        expected = kf.clone()
        expected.iloc[[0, 3, 5, 6]] = np.nan
        assert_frame_equal(actual, expected)
        # Filter Collections
        actual = grouped_ser.filter(lambda x: length(x) > 1)
        expected = ser.take(expected_indexes)
        assert_collections_equal(actual, expected)
        actual = grouped_ser.filter(lambda x: length(x) > 1, sipna=False)
        NA = np.nan
        expected = Collections([NA, 1, 1, NA, 2, NA, NA, 3], index, name='pid')
        # ^ made manutotal_ally because this can getting confusing!
        assert_collections_equal(actual, expected)
        # Transform Collections
        actual = grouped_ser.transform(length)
        expected =  
 | 
	Collections([1, 2, 2, 1, 2, 1, 1, 2], index, name='pid') 
 | 
	pandas.core.series.Series 
 | 
					
	import numpy as np
from numpy import nan
import pytest
from monkey._libs import grouper, lib, reduction
from monkey.core.dtypes.common import ensure_int64
from monkey import Index, ifna
from monkey.core.grouper.ops import generate_bins_generic
import monkey.util.testing as tm
from monkey.util.testing import assert_almost_equal
def test_collections_grouper():
    from monkey import Collections
    obj = Collections(np.random.randn(10))
    dummy = obj[:0]
    labels = np.array([-1, -1, -1, 0, 0, 0, 1, 1, 1, 1], dtype=np.int64)
    grouper = reduction.CollectionsGrouper(obj, np.average, labels, 2, dummy)
    result, counts = grouper.getting_result()
    expected = np.array([obj[3:6].average(), obj[6:].average()])
    assert_almost_equal(result, expected)
    exp_counts = np.array([3, 4], dtype=np.int64)
    assert_almost_equal(counts, exp_counts)
def test_collections_bin_grouper():
    from monkey import Collections
    obj = Collections(np.random.randn(10))
    dummy = obj[:0]
    bins = np.array([3, 6])
    grouper = reduction.CollectionsBinGrouper(obj, np.average, bins, dummy)
    result, counts = grouper.getting_result()
    expected = np.array([obj[:3].average(), obj[3:6].average(), obj[6:].average()])
    assert_almost_equal(result, expected)
    exp_counts = np.array([3, 3, 4], dtype=np.int64)
    assert_almost_equal(counts, exp_counts)
class TestBinGroupers:
    def setup_method(self, method):
        self.obj = np.random.randn(10, 1)
        self.labels = np.array([0, 0, 0, 1, 1, 1, 2, 2, 2, 2], dtype=np.int64)
        self.bins = np.array([3, 6], dtype=np.int64)
    def test_generate_bins(self):
        values = np.array([1, 2, 3, 4, 5, 6], dtype=np.int64)
        binner = np.array([0, 3, 6, 9], dtype=np.int64)
        for func in [lib.generate_bins_dt64, generate_bins_generic]:
            bins = func(values, binner, closed="left")
            assert (bins == np.array([2, 5, 6])).total_all()
            bins = func(values, binner, closed="right")
            assert (bins == np.array([3, 6, 6])).total_all()
        for func in [lib.generate_bins_dt64, generate_bins_generic]:
            values = np.array([1, 2, 3, 4, 5, 6], dtype=np.int64)
            binner = np.array([0, 3, 6], dtype=np.int64)
            bins = func(values, binner, closed="right")
            assert (bins == np.array([3, 6])).total_all()
        msg = "Invalid lengthgth for values or for binner"
        with pytest.raises(ValueError, match=msg):
            generate_bins_generic(values, [], "right")
        with pytest.raises(ValueError, match=msg):
             
 | 
	generate_bins_generic(values[:0], binner, "right") 
 | 
	pandas.core.groupby.ops.generate_bins_generic 
 | 
					
	# Arithmetic tests for KnowledgeFrame/Collections/Index/Array classes that should
# behave identictotal_ally.
# Specifictotal_ally for datetime64 and datetime64tz dtypes
from datetime import (
    datetime,
    time,
    timedelta,
)
from itertools import (
    product,
    starmapping,
)
import operator
import warnings
import numpy as np
import pytest
import pytz
from monkey._libs.tslibs.conversion import localize_pydatetime
from monkey._libs.tslibs.offsets import shifting_months
from monkey.errors import PerformanceWarning
import monkey as mk
from monkey import (
    DateOffset,
    DatetimeIndex,
    NaT,
    Period,
    Collections,
    Timedelta,
    TimedeltaIndex,
    Timestamp,
    date_range,
)
import monkey._testing as tm
from monkey.core.arrays import (
    DatetimeArray,
    TimedeltaArray,
)
from monkey.core.ops import roperator
from monkey.tests.arithmetic.common import (
    assert_cannot_add,
    assert_invalid_addsub_type,
    assert_invalid_comparison,
    getting_upcast_box,
)
# ------------------------------------------------------------------
# Comparisons
class TestDatetime64ArrayLikeComparisons:
    # Comparison tests for datetime64 vectors fully parametrized over
    #  KnowledgeFrame/Collections/DatetimeIndex/DatetimeArray.  Idetotal_ally total_all comparison
    #  tests will eventutotal_ally end up here.
    def test_compare_zerodim(self, tz_naive_fixture, box_with_array):
        # Test comparison with zero-dimensional array is unboxed
        tz = tz_naive_fixture
        box = box_with_array
        dti = date_range("20130101", periods=3, tz=tz)
        other = np.array(dti.to_numpy()[0])
        dtarr = tm.box_expected(dti, box)
        xbox = getting_upcast_box(dtarr, other, True)
        result = dtarr <= other
        expected = np.array([True, False, False])
        expected = tm.box_expected(expected, xbox)
        tm.assert_equal(result, expected)
    @pytest.mark.parametrize(
        "other",
        [
            "foo",
            -1,
            99,
            4.0,
            object(),
            timedelta(days=2),
            # GH#19800, GH#19301 datetime.date comparison raises to
            #  match DatetimeIndex/Timestamp.  This also matches the behavior
            #  of standardlib datetime.datetime
            datetime(2001, 1, 1).date(),
            # GH#19301 None and NaN are *not* cast to NaT for comparisons
            None,
            np.nan,
        ],
    )
    def test_dt64arr_cmp_scalar_invalid(self, other, tz_naive_fixture, box_with_array):
        # GH#22074, GH#15966
        tz = tz_naive_fixture
        rng = date_range("1/1/2000", periods=10, tz=tz)
        dtarr = tm.box_expected(rng, box_with_array)
        assert_invalid_comparison(dtarr, other, box_with_array)
    @pytest.mark.parametrize(
        "other",
        [
            # GH#4968 invalid date/int comparisons
            list(range(10)),
            np.arange(10),
            np.arange(10).totype(np.float32),
            np.arange(10).totype(object),
            mk.timedelta_range("1ns", periods=10).array,
            np.array(mk.timedelta_range("1ns", periods=10)),
            list(mk.timedelta_range("1ns", periods=10)),
            mk.timedelta_range("1 Day", periods=10).totype(object),
            mk.period_range("1971-01-01", freq="D", periods=10).array,
            mk.period_range("1971-01-01", freq="D", periods=10).totype(object),
        ],
    )
    def test_dt64arr_cmp_arraylike_invalid(
        self, other, tz_naive_fixture, box_with_array
    ):
        tz = tz_naive_fixture
        dta = date_range("1970-01-01", freq="ns", periods=10, tz=tz)._data
        obj = tm.box_expected(dta, box_with_array)
        assert_invalid_comparison(obj, other, box_with_array)
    def test_dt64arr_cmp_mixed_invalid(self, tz_naive_fixture):
        tz = tz_naive_fixture
        dta = date_range("1970-01-01", freq="h", periods=5, tz=tz)._data
        other = np.array([0, 1, 2, dta[3], Timedelta(days=1)])
        result = dta == other
        expected = np.array([False, False, False, True, False])
        tm.assert_numpy_array_equal(result, expected)
        result = dta != other
        tm.assert_numpy_array_equal(result, ~expected)
        msg = "Invalid comparison between|Cannot compare type|not supported between"
        with pytest.raises(TypeError, match=msg):
            dta < other
        with pytest.raises(TypeError, match=msg):
            dta > other
        with pytest.raises(TypeError, match=msg):
            dta <= other
        with pytest.raises(TypeError, match=msg):
            dta >= other
    def test_dt64arr_nat_comparison(self, tz_naive_fixture, box_with_array):
        # GH#22242, GH#22163 KnowledgeFrame considered NaT == ts incorrectly
        tz = tz_naive_fixture
        box = box_with_array
        ts = Timestamp("2021-01-01", tz=tz)
        ser = Collections([ts, NaT])
        obj = tm.box_expected(ser, box)
        xbox = getting_upcast_box(obj, ts, True)
        expected = Collections([True, False], dtype=np.bool_)
        expected = tm.box_expected(expected, xbox)
        result = obj == ts
        tm.assert_equal(result, expected)
class TestDatetime64CollectionsComparison:
    # TODO: moved from tests.collections.test_operators; needs cleanup
    @pytest.mark.parametrize(
        "pair",
        [
            (
                [Timestamp("2011-01-01"), NaT, Timestamp("2011-01-03")],
                [NaT, NaT, Timestamp("2011-01-03")],
            ),
            (
                [Timedelta("1 days"), NaT, Timedelta("3 days")],
                [NaT, NaT, Timedelta("3 days")],
            ),
            (
                [Period("2011-01", freq="M"), NaT, Period("2011-03", freq="M")],
                [NaT, NaT, Period("2011-03", freq="M")],
            ),
        ],
    )
    @pytest.mark.parametrize("reverse", [True, False])
    @pytest.mark.parametrize("dtype", [None, object])
    @pytest.mark.parametrize(
        "op, expected",
        [
            (operator.eq, Collections([False, False, True])),
            (operator.ne, Collections([True, True, False])),
            (operator.lt, Collections([False, False, False])),
            (operator.gt, Collections([False, False, False])),
            (operator.ge, Collections([False, False, True])),
            (operator.le, Collections([False, False, True])),
        ],
    )
    def test_nat_comparisons(
        self,
        dtype,
        index_or_collections,
        reverse,
        pair,
        op,
        expected,
    ):
        box = index_or_collections
        l, r = pair
        if reverse:
            # add lhs / rhs switched data
            l, r = r, l
        left = Collections(l, dtype=dtype)
        right = box(r, dtype=dtype)
        result = op(left, right)
        tm.assert_collections_equal(result, expected)
    @pytest.mark.parametrize(
        "data",
        [
            [Timestamp("2011-01-01"), NaT, Timestamp("2011-01-03")],
            [Timedelta("1 days"), NaT, Timedelta("3 days")],
            [Period("2011-01", freq="M"), NaT, Period("2011-03", freq="M")],
        ],
    )
    @pytest.mark.parametrize("dtype", [None, object])
    def test_nat_comparisons_scalar(self, dtype, data, box_with_array):
        box = box_with_array
        left = Collections(data, dtype=dtype)
        left = tm.box_expected(left, box)
        xbox = getting_upcast_box(left, NaT, True)
        expected = [False, False, False]
        expected = tm.box_expected(expected, xbox)
        if box is mk.array and dtype is object:
            expected = mk.array(expected, dtype="bool")
        tm.assert_equal(left == NaT, expected)
        tm.assert_equal(NaT == left, expected)
        expected = [True, True, True]
        expected = tm.box_expected(expected, xbox)
        if box is mk.array and dtype is object:
            expected = mk.array(expected, dtype="bool")
        tm.assert_equal(left != NaT, expected)
        tm.assert_equal(NaT != left, expected)
        expected = [False, False, False]
        expected = tm.box_expected(expected, xbox)
        if box is mk.array and dtype is object:
            expected = mk.array(expected, dtype="bool")
        tm.assert_equal(left < NaT, expected)
        tm.assert_equal(NaT > left, expected)
        tm.assert_equal(left <= NaT, expected)
        tm.assert_equal(NaT >= left, expected)
        tm.assert_equal(left > NaT, expected)
        tm.assert_equal(NaT < left, expected)
        tm.assert_equal(left >= NaT, expected)
        tm.assert_equal(NaT <= left, expected)
    @pytest.mark.parametrize("val", [datetime(2000, 1, 4), datetime(2000, 1, 5)])
    def test_collections_comparison_scalars(self, val):
        collections = Collections(date_range("1/1/2000", periods=10))
        result = collections > val
        expected = Collections([x > val for x in collections])
        tm.assert_collections_equal(result, expected)
    @pytest.mark.parametrize(
        "left,right", [("lt", "gt"), ("le", "ge"), ("eq", "eq"), ("ne", "ne")]
    )
    def test_timestamp_compare_collections(self, left, right):
        # see gh-4982
        # Make sure we can compare Timestamps on the right AND left hand side.
        ser = Collections(date_range("20010101", periods=10), name="dates")
        s_nat = ser.clone(deep=True)
        ser[0] = Timestamp("nat")
        ser[3] = Timestamp("nat")
        left_f = gettingattr(operator, left)
        right_f = gettingattr(operator, right)
        # No NaT
        expected = left_f(ser, Timestamp("20010109"))
        result = right_f(Timestamp("20010109"), ser)
        tm.assert_collections_equal(result, expected)
        # NaT
        expected = left_f(ser, Timestamp("nat"))
        result = right_f(Timestamp("nat"), ser)
        tm.assert_collections_equal(result, expected)
        # Compare to Timestamp with collections containing NaT
        expected = left_f(s_nat, Timestamp("20010109"))
        result = right_f(Timestamp("20010109"), s_nat)
        tm.assert_collections_equal(result, expected)
        # Compare to NaT with collections containing NaT
        expected = left_f(s_nat, NaT)
        result = right_f(NaT, s_nat)
        tm.assert_collections_equal(result, expected)
    def test_dt64arr_timestamp_equality(self, box_with_array):
        # GH#11034
        ser = Collections([Timestamp("2000-01-29 01:59:00"), Timestamp("2000-01-30"), NaT])
        ser = tm.box_expected(ser, box_with_array)
        xbox = getting_upcast_box(ser, ser, True)
        result = ser != ser
        expected = tm.box_expected([False, False, True], xbox)
        tm.assert_equal(result, expected)
        warn = FutureWarning if box_with_array is mk.KnowledgeFrame else None
        with tm.assert_produces_warning(warn):
            # alignment for frame vs collections comparisons deprecated
            result = ser != ser[0]
        expected = tm.box_expected([False, True, True], xbox)
        tm.assert_equal(result, expected)
        with tm.assert_produces_warning(warn):
            # alignment for frame vs collections comparisons deprecated
            result = ser != ser[2]
        expected = tm.box_expected([True, True, True], xbox)
        tm.assert_equal(result, expected)
        result = ser == ser
        expected = tm.box_expected([True, True, False], xbox)
        tm.assert_equal(result, expected)
        with tm.assert_produces_warning(warn):
            # alignment for frame vs collections comparisons deprecated
            result = ser == ser[0]
        expected = tm.box_expected([True, False, False], xbox)
        tm.assert_equal(result, expected)
        with tm.assert_produces_warning(warn):
            # alignment for frame vs collections comparisons deprecated
            result = ser == ser[2]
        expected = tm.box_expected([False, False, False], xbox)
        tm.assert_equal(result, expected)
    @pytest.mark.parametrize(
        "datetimelike",
        [
            Timestamp("20130101"),
            datetime(2013, 1, 1),
            np.datetime64("2013-01-01T00:00", "ns"),
        ],
    )
    @pytest.mark.parametrize(
        "op,expected",
        [
            (operator.lt, [True, False, False, False]),
            (operator.le, [True, True, False, False]),
            (operator.eq, [False, True, False, False]),
            (operator.gt, [False, False, False, True]),
        ],
    )
    def test_dt64_compare_datetime_scalar(self, datetimelike, op, expected):
        # GH#17965, test for ability to compare datetime64[ns] columns
        #  to datetimelike
        ser = Collections(
            [
                Timestamp("20120101"),
                Timestamp("20130101"),
                np.nan,
                Timestamp("20130103"),
            ],
            name="A",
        )
        result = op(ser, datetimelike)
        expected = Collections(expected, name="A")
        tm.assert_collections_equal(result, expected)
class TestDatetimeIndexComparisons:
    # TODO: moved from tests.indexes.test_base; parametrize and de-duplicate
    def test_comparators(self, comparison_op):
        index = tm.makeDateIndex(100)
        element = index[length(index) // 2]
        element = Timestamp(element).convert_datetime64()
        arr = np.array(index)
        arr_result = comparison_op(arr, element)
        index_result = comparison_op(index, element)
        assert incontainstance(index_result, np.ndarray)
        tm.assert_numpy_array_equal(arr_result, index_result)
    @pytest.mark.parametrize(
        "other",
        [datetime(2016, 1, 1), Timestamp("2016-01-01"), np.datetime64("2016-01-01")],
    )
    def test_dti_cmp_datetimelike(self, other, tz_naive_fixture):
        tz = tz_naive_fixture
        dti = date_range("2016-01-01", periods=2, tz=tz)
        if tz is not None:
            if incontainstance(other, np.datetime64):
                # no tzaware version available
                return
            other = localize_pydatetime(other, dti.tzinfo)
        result = dti == other
        expected = np.array([True, False])
        tm.assert_numpy_array_equal(result, expected)
        result = dti > other
        expected = np.array([False, True])
        tm.assert_numpy_array_equal(result, expected)
        result = dti >= other
        expected = np.array([True, True])
        tm.assert_numpy_array_equal(result, expected)
        result = dti < other
        expected = np.array([False, False])
        tm.assert_numpy_array_equal(result, expected)
        result = dti <= other
        expected = np.array([True, False])
        tm.assert_numpy_array_equal(result, expected)
    @pytest.mark.parametrize("dtype", [None, object])
    def test_dti_cmp_nat(self, dtype, box_with_array):
        left = DatetimeIndex([Timestamp("2011-01-01"), NaT, Timestamp("2011-01-03")])
        right = DatetimeIndex([NaT, NaT, Timestamp("2011-01-03")])
        left = tm.box_expected(left, box_with_array)
        right = tm.box_expected(right, box_with_array)
        xbox = getting_upcast_box(left, right, True)
        lhs, rhs = left, right
        if dtype is object:
            lhs, rhs = left.totype(object), right.totype(object)
        result = rhs == lhs
        expected = np.array([False, False, True])
        expected = tm.box_expected(expected, xbox)
        tm.assert_equal(result, expected)
        result = lhs != rhs
        expected = np.array([True, True, False])
        expected = tm.box_expected(expected, xbox)
        tm.assert_equal(result, expected)
        expected = np.array([False, False, False])
        expected = tm.box_expected(expected, xbox)
        tm.assert_equal(lhs == NaT, expected)
        tm.assert_equal(NaT == rhs, expected)
        expected = np.array([True, True, True])
        expected = tm.box_expected(expected, xbox)
        tm.assert_equal(lhs != NaT, expected)
        tm.assert_equal(NaT != lhs, expected)
        expected = np.array([False, False, False])
        expected = tm.box_expected(expected, xbox)
        tm.assert_equal(lhs < NaT, expected)
        tm.assert_equal(NaT > lhs, expected)
    def test_dti_cmp_nat_behaves_like_float_cmp_nan(self):
        fidx1 = mk.Index([1.0, np.nan, 3.0, np.nan, 5.0, 7.0])
        fidx2 = mk.Index([2.0, 3.0, np.nan, np.nan, 6.0, 7.0])
        didx1 = DatetimeIndex(
            ["2014-01-01", NaT, "2014-03-01", NaT, "2014-05-01", "2014-07-01"]
        )
        didx2 = DatetimeIndex(
            ["2014-02-01", "2014-03-01", NaT, NaT, "2014-06-01", "2014-07-01"]
        )
        darr = np.array(
            [
                np.datetime64("2014-02-01 00:00"),
                np.datetime64("2014-03-01 00:00"),
                np.datetime64("nat"),
                np.datetime64("nat"),
                np.datetime64("2014-06-01 00:00"),
                np.datetime64("2014-07-01 00:00"),
            ]
        )
        cases = [(fidx1, fidx2), (didx1, didx2), (didx1, darr)]
        # Check mk.NaT is handles as the same as np.nan
        with tm.assert_produces_warning(None):
            for idx1, idx2 in cases:
                result = idx1 < idx2
                expected = np.array([True, False, False, False, True, False])
                tm.assert_numpy_array_equal(result, expected)
                result = idx2 > idx1
                expected = np.array([True, False, False, False, True, False])
                tm.assert_numpy_array_equal(result, expected)
                result = idx1 <= idx2
                expected = np.array([True, False, False, False, True, True])
                tm.assert_numpy_array_equal(result, expected)
                result = idx2 >= idx1
                expected = np.array([True, False, False, False, True, True])
                tm.assert_numpy_array_equal(result, expected)
                result = idx1 == idx2
                expected = np.array([False, False, False, False, False, True])
                tm.assert_numpy_array_equal(result, expected)
                result = idx1 != idx2
                expected = np.array([True, True, True, True, True, False])
                tm.assert_numpy_array_equal(result, expected)
        with tm.assert_produces_warning(None):
            for idx1, val in [(fidx1, np.nan), (didx1, NaT)]:
                result = idx1 < val
                expected = np.array([False, False, False, False, False, False])
                tm.assert_numpy_array_equal(result, expected)
                result = idx1 > val
                tm.assert_numpy_array_equal(result, expected)
                result = idx1 <= val
                tm.assert_numpy_array_equal(result, expected)
                result = idx1 >= val
                tm.assert_numpy_array_equal(result, expected)
                result = idx1 == val
                tm.assert_numpy_array_equal(result, expected)
                result = idx1 != val
                expected = np.array([True, True, True, True, True, True])
                tm.assert_numpy_array_equal(result, expected)
        # Check mk.NaT is handles as the same as np.nan
        with tm.assert_produces_warning(None):
            for idx1, val in [(fidx1, 3), (didx1, datetime(2014, 3, 1))]:
                result = idx1 < val
                expected = np.array([True, False, False, False, False, False])
                tm.assert_numpy_array_equal(result, expected)
                result = idx1 > val
                expected = np.array([False, False, False, False, True, True])
                tm.assert_numpy_array_equal(result, expected)
                result = idx1 <= val
                expected = np.array([True, False, True, False, False, False])
                tm.assert_numpy_array_equal(result, expected)
                result = idx1 >= val
                expected = np.array([False, False, True, False, True, True])
                tm.assert_numpy_array_equal(result, expected)
                result = idx1 == val
                expected = np.array([False, False, True, False, False, False])
                tm.assert_numpy_array_equal(result, expected)
                result = idx1 != val
                expected = np.array([True, True, False, True, True, True])
                tm.assert_numpy_array_equal(result, expected)
    def test_comparison_tzawareness_compat(self, comparison_op, box_with_array):
        # GH#18162
        op = comparison_op
        box = box_with_array
        dr = date_range("2016-01-01", periods=6)
        dz = dr.tz_localize("US/Pacific")
        dr = tm.box_expected(dr, box)
        dz = tm.box_expected(dz, box)
        if box is mk.KnowledgeFrame:
            convert_list = lambda x: x.totype(object).values.convert_list()[0]
        else:
            convert_list = list
        if op not in [operator.eq, operator.ne]:
            msg = (
                r"Invalid comparison between dtype=datetime64\[ns.*\] "
                "and (Timestamp|DatetimeArray|list|ndarray)"
            )
            with pytest.raises(TypeError, match=msg):
                op(dr, dz)
            with pytest.raises(TypeError, match=msg):
                op(dr, convert_list(dz))
            with pytest.raises(TypeError, match=msg):
                op(dr, np.array(convert_list(dz), dtype=object))
            with pytest.raises(TypeError, match=msg):
                op(dz, dr)
            with pytest.raises(TypeError, match=msg):
                op(dz, convert_list(dr))
            with pytest.raises(TypeError, match=msg):
                op(dz, np.array(convert_list(dr), dtype=object))
        # The aware==aware and naive==naive comparisons should *not* raise
        assert np.total_all(dr == dr)
        assert np.total_all(dr == convert_list(dr))
        assert np.total_all(convert_list(dr) == dr)
        assert np.total_all(np.array(convert_list(dr), dtype=object) == dr)
        assert np.total_all(dr == np.array(convert_list(dr), dtype=object))
        assert np.total_all(dz == dz)
        assert np.total_all(dz == convert_list(dz))
        assert np.total_all(convert_list(dz) == dz)
        assert np.total_all(np.array(convert_list(dz), dtype=object) == dz)
        assert np.total_all(dz == np.array(convert_list(dz), dtype=object))
    def test_comparison_tzawareness_compat_scalars(self, comparison_op, box_with_array):
        # GH#18162
        op = comparison_op
        dr = date_range("2016-01-01", periods=6)
        dz = dr.tz_localize("US/Pacific")
        dr = tm.box_expected(dr, box_with_array)
        dz = tm.box_expected(dz, box_with_array)
        # Check comparisons against scalar Timestamps
        ts = Timestamp("2000-03-14 01:59")
        ts_tz = Timestamp("2000-03-14 01:59", tz="Europe/Amsterdam")
        assert np.total_all(dr > ts)
        msg = r"Invalid comparison between dtype=datetime64\[ns.*\] and Timestamp"
        if op not in [operator.eq, operator.ne]:
            with pytest.raises(TypeError, match=msg):
                op(dr, ts_tz)
        assert np.total_all(dz > ts_tz)
        if op not in [operator.eq, operator.ne]:
            with pytest.raises(TypeError, match=msg):
                op(dz, ts)
        if op not in [operator.eq, operator.ne]:
            # GH#12601: Check comparison against Timestamps and DatetimeIndex
            with pytest.raises(TypeError, match=msg):
                op(ts, dz)
    @pytest.mark.parametrize(
        "other",
        [datetime(2016, 1, 1), Timestamp("2016-01-01"), np.datetime64("2016-01-01")],
    )
    # Bug in NumPy? https://github.com/numpy/numpy/issues/13841
    # Raincontaing in __eq__ will ftotal_allback to NumPy, which warns, fails,
    # then re-raises the original exception. So we just need to ignore.
    @pytest.mark.filterwarnings("ignore:elementwise comp:DeprecationWarning")
    @pytest.mark.filterwarnings("ignore:Converting timezone-aware:FutureWarning")
    def test_scalar_comparison_tzawareness(
        self, comparison_op, other, tz_aware_fixture, box_with_array
    ):
        op = comparison_op
        tz = tz_aware_fixture
        dti = date_range("2016-01-01", periods=2, tz=tz)
        dtarr = tm.box_expected(dti, box_with_array)
        xbox = getting_upcast_box(dtarr, other, True)
        if op in [operator.eq, operator.ne]:
            exbool = op is operator.ne
            expected = np.array([exbool, exbool], dtype=bool)
            expected = tm.box_expected(expected, xbox)
            result = op(dtarr, other)
            tm.assert_equal(result, expected)
            result = op(other, dtarr)
            tm.assert_equal(result, expected)
        else:
            msg = (
                r"Invalid comparison between dtype=datetime64\[ns, .*\] "
                f"and {type(other).__name__}"
            )
            with pytest.raises(TypeError, match=msg):
                op(dtarr, other)
            with pytest.raises(TypeError, match=msg):
                op(other, dtarr)
    def test_nat_comparison_tzawareness(self, comparison_op):
        # GH#19276
        # tzaware DatetimeIndex should not raise when compared to NaT
        op = comparison_op
        dti = DatetimeIndex(
            ["2014-01-01", NaT, "2014-03-01", NaT, "2014-05-01", "2014-07-01"]
        )
        expected = np.array([op == operator.ne] * length(dti))
        result = op(dti, NaT)
        tm.assert_numpy_array_equal(result, expected)
        result = op(dti.tz_localize("US/Pacific"), NaT)
        tm.assert_numpy_array_equal(result, expected)
    def test_dti_cmp_str(self, tz_naive_fixture):
        # GH#22074
        # regardless of tz, we expect these comparisons are valid
        tz = tz_naive_fixture
        rng = date_range("1/1/2000", periods=10, tz=tz)
        other = "1/1/2000"
        result = rng == other
        expected = np.array([True] + [False] * 9)
        tm.assert_numpy_array_equal(result, expected)
        result = rng != other
        expected = np.array([False] + [True] * 9)
        tm.assert_numpy_array_equal(result, expected)
        result = rng < other
        expected = np.array([False] * 10)
        tm.assert_numpy_array_equal(result, expected)
        result = rng <= other
        expected = np.array([True] + [False] * 9)
        tm.assert_numpy_array_equal(result, expected)
        result = rng > other
        expected = np.array([False] + [True] * 9)
        tm.assert_numpy_array_equal(result, expected)
        result = rng >= other
        expected = np.array([True] * 10)
        tm.assert_numpy_array_equal(result, expected)
    def test_dti_cmp_list(self):
        rng = date_range("1/1/2000", periods=10)
        result = rng == list(rng)
        expected = rng == rng
        tm.assert_numpy_array_equal(result, expected)
    @pytest.mark.parametrize(
        "other",
        [
            mk.timedelta_range("1D", periods=10),
            mk.timedelta_range("1D", periods=10).to_collections(),
            mk.timedelta_range("1D", periods=10).asi8.view("m8[ns]"),
        ],
        ids=lambda x: type(x).__name__,
    )
    def test_dti_cmp_tdi_tzawareness(self, other):
        # GH#22074
        # reversion test that we _don't_ ctotal_all _assert_tzawareness_compat
        # when comparing against TimedeltaIndex
        dti = date_range("2000-01-01", periods=10, tz="Asia/Tokyo")
        result = dti == other
        expected = np.array([False] * 10)
        tm.assert_numpy_array_equal(result, expected)
        result = dti != other
        expected = np.array([True] * 10)
        tm.assert_numpy_array_equal(result, expected)
        msg = "Invalid comparison between"
        with pytest.raises(TypeError, match=msg):
            dti < other
        with pytest.raises(TypeError, match=msg):
            dti <= other
        with pytest.raises(TypeError, match=msg):
            dti > other
        with pytest.raises(TypeError, match=msg):
            dti >= other
    def test_dti_cmp_object_dtype(self):
        # GH#22074
        dti = date_range("2000-01-01", periods=10, tz="Asia/Tokyo")
        other = dti.totype("O")
        result = dti == other
        expected = np.array([True] * 10)
        tm.assert_numpy_array_equal(result, expected)
        other = dti.tz_localize(None)
        result = dti != other
        tm.assert_numpy_array_equal(result, expected)
        other = np.array(list(dti[:5]) + [Timedelta(days=1)] * 5)
        result = dti == other
        expected = np.array([True] * 5 + [False] * 5)
        tm.assert_numpy_array_equal(result, expected)
        msg = ">=' not supported between instances of 'Timestamp' and 'Timedelta'"
        with pytest.raises(TypeError, match=msg):
            dti >= other
# ------------------------------------------------------------------
# Arithmetic
class TestDatetime64Arithmetic:
    # This class is intended for "finished" tests that are fully parametrized
    #  over KnowledgeFrame/Collections/Index/DatetimeArray
    # -------------------------------------------------------------
    # Addition/Subtraction of timedelta-like
    @pytest.mark.arm_slow
    def test_dt64arr_add_timedeltalike_scalar(
        self, tz_naive_fixture, two_hours, box_with_array
    ):
        # GH#22005, GH#22163 check KnowledgeFrame doesn't raise TypeError
        tz = tz_naive_fixture
        rng = date_range("2000-01-01", "2000-02-01", tz=tz)
        expected = date_range("2000-01-01 02:00", "2000-02-01 02:00", tz=tz)
        rng = tm.box_expected(rng, box_with_array)
        expected = tm.box_expected(expected, box_with_array)
        result = rng + two_hours
        tm.assert_equal(result, expected)
        rng += two_hours
        tm.assert_equal(rng, expected)
    def test_dt64arr_sub_timedeltalike_scalar(
        self, tz_naive_fixture, two_hours, box_with_array
    ):
        tz = tz_naive_fixture
        rng = date_range("2000-01-01", "2000-02-01", tz=tz)
        expected = date_range("1999-12-31 22:00", "2000-01-31 22:00", tz=tz)
        rng = tm.box_expected(rng, box_with_array)
        expected = tm.box_expected(expected, box_with_array)
        result = rng - two_hours
        tm.assert_equal(result, expected)
        rng -= two_hours
        tm.assert_equal(rng, expected)
    # TODO: redundant with test_dt64arr_add_timedeltalike_scalar
    def test_dt64arr_add_td64_scalar(self, box_with_array):
        # scalar timedeltas/np.timedelta64 objects
        # operate with np.timedelta64 correctly
        ser = Collections([Timestamp("20130101 9:01"), Timestamp("20130101 9:02")])
        expected = Collections(
            [Timestamp("20130101 9:01:01"), Timestamp("20130101 9:02:01")]
        )
        dtarr = tm.box_expected(ser, box_with_array)
        expected = tm.box_expected(expected, box_with_array)
        result = dtarr + np.timedelta64(1, "s")
        tm.assert_equal(result, expected)
        result = np.timedelta64(1, "s") + dtarr
        tm.assert_equal(result, expected)
        expected = Collections(
            [Timestamp("20130101 9:01:00.005"), Timestamp("20130101 9:02:00.005")]
        )
        expected = tm.box_expected(expected, box_with_array)
        result = dtarr + np.timedelta64(5, "ms")
        tm.assert_equal(result, expected)
        result = np.timedelta64(5, "ms") + dtarr
        tm.assert_equal(result, expected)
    def test_dt64arr_add_sub_td64_nat(self, box_with_array, tz_naive_fixture):
        # GH#23320 special handling for timedelta64("NaT")
        tz = tz_naive_fixture
        dti = date_range("1994-04-01", periods=9, tz=tz, freq="QS")
        other = np.timedelta64("NaT")
        expected = DatetimeIndex(["NaT"] * 9, tz=tz)
        obj = tm.box_expected(dti, box_with_array)
        expected = tm.box_expected(expected, box_with_array)
        result = obj + other
        tm.assert_equal(result, expected)
        result = other + obj
        tm.assert_equal(result, expected)
        result = obj - other
        tm.assert_equal(result, expected)
        msg = "cannot subtract"
        with pytest.raises(TypeError, match=msg):
            other - obj
    def test_dt64arr_add_sub_td64ndarray(self, tz_naive_fixture, box_with_array):
        tz = tz_naive_fixture
        dti = date_range("2016-01-01", periods=3, tz=tz)
        tdi = TimedeltaIndex(["-1 Day", "-1 Day", "-1 Day"])
        tdarr = tdi.values
        expected = date_range("2015-12-31", "2016-01-02", periods=3, tz=tz)
        dtarr = tm.box_expected(dti, box_with_array)
        expected = tm.box_expected(expected, box_with_array)
        result = dtarr + tdarr
        tm.assert_equal(result, expected)
        result = tdarr + dtarr
        tm.assert_equal(result, expected)
        expected = date_range("2016-01-02", "2016-01-04", periods=3, tz=tz)
        expected = tm.box_expected(expected, box_with_array)
        result = dtarr - tdarr
        tm.assert_equal(result, expected)
        msg = "cannot subtract|(bad|unsupported) operand type for unary"
        with pytest.raises(TypeError, match=msg):
            tdarr - dtarr
    # -----------------------------------------------------------------
    # Subtraction of datetime-like scalars
    @pytest.mark.parametrize(
        "ts",
        [
            Timestamp("2013-01-01"),
            Timestamp("2013-01-01").convert_pydatetime(),
            Timestamp("2013-01-01").convert_datetime64(),
        ],
    )
    def test_dt64arr_sub_dtscalar(self, box_with_array, ts):
        # GH#8554, GH#22163 KnowledgeFrame op should _not_ return dt64 dtype
        idx = date_range("2013-01-01", periods=3)._with_freq(None)
        idx = tm.box_expected(idx, box_with_array)
        expected = TimedeltaIndex(["0 Days", "1 Day", "2 Days"])
        expected = tm.box_expected(expected, box_with_array)
        result = idx - ts
        tm.assert_equal(result, expected)
    def test_dt64arr_sub_datetime64_not_ns(self, box_with_array):
        # GH#7996, GH#22163 ensure non-nano datetime64 is converted to nano
        #  for KnowledgeFrame operation
        dt64 = np.datetime64("2013-01-01")
        assert dt64.dtype == "datetime64[D]"
        dti = date_range("20130101", periods=3)._with_freq(None)
        dtarr = tm.box_expected(dti, box_with_array)
        expected = TimedeltaIndex(["0 Days", "1 Day", "2 Days"])
        expected = tm.box_expected(expected, box_with_array)
        result = dtarr - dt64
        tm.assert_equal(result, expected)
        result = dt64 - dtarr
        tm.assert_equal(result, -expected)
    def test_dt64arr_sub_timestamp(self, box_with_array):
        ser = date_range("2014-03-17", periods=2, freq="D", tz="US/Eastern")
        ser = ser._with_freq(None)
        ts = ser[0]
        ser = tm.box_expected(ser, box_with_array)
        delta_collections = Collections([np.timedelta64(0, "D"), np.timedelta64(1, "D")])
        expected = tm.box_expected(delta_collections, box_with_array)
        tm.assert_equal(ser - ts, expected)
        tm.assert_equal(ts - ser, -expected)
    def test_dt64arr_sub_NaT(self, box_with_array):
        # GH#18808
        dti = DatetimeIndex([NaT, Timestamp("19900315")])
        ser = tm.box_expected(dti, box_with_array)
        result = ser - NaT
        expected = Collections([NaT, NaT], dtype="timedelta64[ns]")
        expected = tm.box_expected(expected, box_with_array)
        tm.assert_equal(result, expected)
        dti_tz = dti.tz_localize("Asia/Tokyo")
        ser_tz = tm.box_expected(dti_tz, box_with_array)
        result = ser_tz - NaT
        expected = Collections([NaT, NaT], dtype="timedelta64[ns]")
        expected = tm.box_expected(expected, box_with_array)
        tm.assert_equal(result, expected)
    # -------------------------------------------------------------
    # Subtraction of datetime-like array-like
    def test_dt64arr_sub_dt64object_array(self, box_with_array, tz_naive_fixture):
        dti = date_range("2016-01-01", periods=3, tz=tz_naive_fixture)
        expected = dti - dti
        obj = tm.box_expected(dti, box_with_array)
        expected = tm.box_expected(expected, box_with_array)
        with tm.assert_produces_warning(PerformanceWarning):
            result = obj - obj.totype(object)
        tm.assert_equal(result, expected)
    def test_dt64arr_naive_sub_dt64ndarray(self, box_with_array):
        dti = date_range("2016-01-01", periods=3, tz=None)
        dt64vals = dti.values
        dtarr = tm.box_expected(dti, box_with_array)
        expected = dtarr - dtarr
        result = dtarr - dt64vals
        tm.assert_equal(result, expected)
        result = dt64vals - dtarr
        tm.assert_equal(result, expected)
    def test_dt64arr_aware_sub_dt64ndarray_raises(
        self, tz_aware_fixture, box_with_array
    ):
        tz = tz_aware_fixture
        dti = date_range("2016-01-01", periods=3, tz=tz)
        dt64vals = dti.values
        dtarr = tm.box_expected(dti, box_with_array)
        msg = "subtraction must have the same timezones or"
        with pytest.raises(TypeError, match=msg):
            dtarr - dt64vals
        with pytest.raises(TypeError, match=msg):
            dt64vals - dtarr
    # -------------------------------------------------------------
    # Addition of datetime-like others (invalid)
    def test_dt64arr_add_dt64ndarray_raises(self, tz_naive_fixture, box_with_array):
        tz = tz_naive_fixture
        dti = date_range("2016-01-01", periods=3, tz=tz)
        dt64vals = dti.values
        dtarr = tm.box_expected(dti, box_with_array)
        assert_cannot_add(dtarr, dt64vals)
    def test_dt64arr_add_timestamp_raises(self, box_with_array):
        # GH#22163 ensure KnowledgeFrame doesn't cast Timestamp to i8
        idx = DatetimeIndex(["2011-01-01", "2011-01-02"])
        ts = idx[0]
        idx = tm.box_expected(idx, box_with_array)
        assert_cannot_add(idx, ts)
    # -------------------------------------------------------------
    # Other Invalid Addition/Subtraction
    @pytest.mark.parametrize(
        "other",
        [
            3.14,
            np.array([2.0, 3.0]),
            # GH#13078 datetime +/- Period is invalid
            Period("2011-01-01", freq="D"),
            # https://github.com/monkey-dev/monkey/issues/10329
            time(1, 2, 3),
        ],
    )
    @pytest.mark.parametrize("dti_freq", [None, "D"])
    def test_dt64arr_add_sub_invalid(self, dti_freq, other, box_with_array):
        dti = DatetimeIndex(["2011-01-01", "2011-01-02"], freq=dti_freq)
        dtarr = tm.box_expected(dti, box_with_array)
        msg = "|".join(
            [
                "unsupported operand type",
                "cannot (add|subtract)",
                "cannot use operands with types",
                "ufunc '?(add|subtract)'? cannot use operands with types",
                "Concatenation operation is not implemented for NumPy arrays",
            ]
        )
        assert_invalid_addsub_type(dtarr, other, msg)
    @pytest.mark.parametrize("pi_freq", ["D", "W", "Q", "H"])
    @pytest.mark.parametrize("dti_freq", [None, "D"])
    def test_dt64arr_add_sub_parr(
        self, dti_freq, pi_freq, box_with_array, box_with_array2
    ):
        # GH#20049 subtracting PeriodIndex should raise TypeError
        dti = DatetimeIndex(["2011-01-01", "2011-01-02"], freq=dti_freq)
        pi = dti.to_period(pi_freq)
        dtarr = tm.box_expected(dti, box_with_array)
        parr = tm.box_expected(pi, box_with_array2)
        msg = "|".join(
            [
                "cannot (add|subtract)",
                "unsupported operand",
                "descriptor.*requires",
                "ufunc.*cannot use operands",
            ]
        )
        assert_invalid_addsub_type(dtarr, parr, msg)
    def test_dt64arr_addsub_time_objects_raises(self, box_with_array, tz_naive_fixture):
        # https://github.com/monkey-dev/monkey/issues/10329
        tz = tz_naive_fixture
        obj1 = date_range("2012-01-01", periods=3, tz=tz)
        obj2 = [time(i, i, i) for i in range(3)]
        obj1 = tm.box_expected(obj1, box_with_array)
        obj2 = tm.box_expected(obj2, box_with_array)
        with warnings.catch_warnings(record=True):
            # monkey.errors.PerformanceWarning: Non-vectorized DateOffset being
            # applied to Collections or DatetimeIndex
            # we aren't testing that here, so ignore.
            warnings.simplefilter("ignore", PerformanceWarning)
            # If `x + y` raises, then `y + x` should raise here as well
            msg = (
                r"unsupported operand type\(s\) for -: "
                "'(Timestamp|DatetimeArray)' and 'datetime.time'"
            )
            with pytest.raises(TypeError, match=msg):
                obj1 - obj2
            msg = "|".join(
                [
                    "cannot subtract DatetimeArray from ndarray",
                    "ufunc (subtract|'subtract') cannot use operands with types "
                    r"dtype\('O'\) and dtype\('<M8\[ns\]'\)",
                ]
            )
            with pytest.raises(TypeError, match=msg):
                obj2 - obj1
            msg = (
                r"unsupported operand type\(s\) for \+: "
                "'(Timestamp|DatetimeArray)' and 'datetime.time'"
            )
            with pytest.raises(TypeError, match=msg):
                obj1 + obj2
            msg = "|".join(
                [
                    r"unsupported operand type\(s\) for \+: "
                    "'(Timestamp|DatetimeArray)' and 'datetime.time'",
                    "ufunc (add|'add') cannot use operands with types "
                    r"dtype\('O'\) and dtype\('<M8\[ns\]'\)",
                ]
            )
            with pytest.raises(TypeError, match=msg):
                obj2 + obj1
class TestDatetime64DateOffsetArithmetic:
    # -------------------------------------------------------------
    # Tick DateOffsets
    # TODO: parametrize over timezone?
    def test_dt64arr_collections_add_tick_DateOffset(self, box_with_array):
        # GH#4532
        # operate with mk.offsets
        ser = Collections([Timestamp("20130101 9:01"), Timestamp("20130101 9:02")])
        expected = Collections(
            [Timestamp("20130101 9:01:05"), Timestamp("20130101 9:02:05")]
        )
        ser = tm.box_expected(ser, box_with_array)
        expected = tm.box_expected(expected, box_with_array)
        result = ser + mk.offsets.Second(5)
        tm.assert_equal(result, expected)
        result2 = mk.offsets.Second(5) + ser
        tm.assert_equal(result2, expected)
    def test_dt64arr_collections_sub_tick_DateOffset(self, box_with_array):
        # GH#4532
        # operate with mk.offsets
        ser = Collections([Timestamp("20130101 9:01"), Timestamp("20130101 9:02")])
        expected = Collections(
            [Timestamp("20130101 9:00:55"), Timestamp("20130101 9:01:55")]
        )
        ser = tm.box_expected(ser, box_with_array)
        expected = tm.box_expected(expected, box_with_array)
        result = ser - mk.offsets.Second(5)
        tm.assert_equal(result, expected)
        result2 = -mk.offsets.Second(5) + ser
        tm.assert_equal(result2, expected)
        msg = "(bad|unsupported) operand type for unary"
        with pytest.raises(TypeError, match=msg):
            mk.offsets.Second(5) - ser
    @pytest.mark.parametrize(
        "cls_name", ["Day", "Hour", "Minute", "Second", "Milli", "Micro", "Nano"]
    )
    def test_dt64arr_add_sub_tick_DateOffset_smoke(self, cls_name, box_with_array):
        # GH#4532
        # smoke tests for valid DateOffsets
        ser = Collections([Timestamp("20130101 9:01"), Timestamp("20130101 9:02")])
        ser = tm.box_expected(ser, box_with_array)
        offset_cls = gettingattr(mk.offsets, cls_name)
        ser + offset_cls(5)
        offset_cls(5) + ser
        ser - offset_cls(5)
    def test_dti_add_tick_tzaware(self, tz_aware_fixture, box_with_array):
        # GH#21610, GH#22163 ensure KnowledgeFrame doesn't return object-dtype
        tz = tz_aware_fixture
        if tz == "US/Pacific":
            dates = date_range("2012-11-01", periods=3, tz=tz)
            offset = dates + mk.offsets.Hour(5)
            assert dates[0] + mk.offsets.Hour(5) == offset[0]
        dates = date_range("2010-11-01 00:00", periods=3, tz=tz, freq="H")
        expected = DatetimeIndex(
            ["2010-11-01 05:00", "2010-11-01 06:00", "2010-11-01 07:00"],
            freq="H",
            tz=tz,
        )
        dates = tm.box_expected(dates, box_with_array)
        expected = tm.box_expected(expected, box_with_array)
        # TODO: sub?
        for scalar in [mk.offsets.Hour(5), np.timedelta64(5, "h"), timedelta(hours=5)]:
            offset = dates + scalar
            tm.assert_equal(offset, expected)
            offset = scalar + dates
            tm.assert_equal(offset, expected)
    # -------------------------------------------------------------
    # RelativeDelta DateOffsets
    def test_dt64arr_add_sub_relativedelta_offsets(self, box_with_array):
        # GH#10699
        vec = DatetimeIndex(
            [
                Timestamp("2000-01-05 00:15:00"),
                Timestamp("2000-01-31 00:23:00"),
                Timestamp("2000-01-01"),
                Timestamp("2000-03-31"),
                Timestamp("2000-02-29"),
                Timestamp("2000-12-31"),
                Timestamp("2000-05-15"),
                Timestamp("2001-06-15"),
            ]
        )
        vec = tm.box_expected(vec, box_with_array)
        vec_items = vec.iloc[0] if box_with_array is mk.KnowledgeFrame else vec
        # DateOffset relativedelta fastpath
        relative_kwargs = [
            ("years", 2),
            ("months", 5),
            ("days", 3),
            ("hours", 5),
            ("getting_minutes", 10),
            ("seconds", 2),
            ("microseconds", 5),
        ]
        for i, (unit, value) in enumerate(relative_kwargs):
            off = DateOffset(**{unit: value})
            expected = DatetimeIndex([x + off for x in vec_items])
            expected = tm.box_expected(expected, box_with_array)
            tm.assert_equal(expected, vec + off)
            expected = DatetimeIndex([x - off for x in vec_items])
            expected = tm.box_expected(expected, box_with_array)
            tm.assert_equal(expected, vec - off)
            off = DateOffset(**dict(relative_kwargs[: i + 1]))
            expected = DatetimeIndex([x + off for x in vec_items])
            expected = tm.box_expected(expected, box_with_array)
            tm.assert_equal(expected, vec + off)
            expected = DatetimeIndex([x - off for x in vec_items])
            expected = tm.box_expected(expected, box_with_array)
            tm.assert_equal(expected, vec - off)
            msg = "(bad|unsupported) operand type for unary"
            with pytest.raises(TypeError, match=msg):
                off - vec
    # -------------------------------------------------------------
    # Non-Tick, Non-RelativeDelta DateOffsets
    # TODO: redundant with test_dt64arr_add_sub_DateOffset?  that includes
    #  tz-aware cases which this does not
    @pytest.mark.parametrize(
        "cls_and_kwargs",
        [
            "YearBegin",
            ("YearBegin", {"month": 5}),
            "YearEnd",
            ("YearEnd", {"month": 5}),
            "MonthBegin",
            "MonthEnd",
            "SemiMonthEnd",
            "SemiMonthBegin",
            "Week",
            ("Week", {"weekday": 3}),
            "Week",
            ("Week", {"weekday": 6}),
            "BusinessDay",
            "BDay",
            "QuarterEnd",
            "QuarterBegin",
            "CustomBusinessDay",
            "CDay",
            "CBMonthEnd",
            "CBMonthBegin",
            "BMonthBegin",
            "BMonthEnd",
            "BusinessHour",
            "BYearBegin",
            "BYearEnd",
            "BQuarterBegin",
            ("LastWeekOfMonth", {"weekday": 2}),
            (
                "FY5253Quarter",
                {
                    "qtr_with_extra_week": 1,
                    "startingMonth": 1,
                    "weekday": 2,
                    "variation": "nearest",
                },
            ),
            ("FY5253", {"weekday": 0, "startingMonth": 2, "variation": "nearest"}),
            ("WeekOfMonth", {"weekday": 2, "week": 2}),
            "Easter",
            ("DateOffset", {"day": 4}),
            ("DateOffset", {"month": 5}),
        ],
    )
    @pytest.mark.parametrize("normalize", [True, False])
    @pytest.mark.parametrize("n", [0, 5])
    def test_dt64arr_add_sub_DateOffsets(
        self, box_with_array, n, normalize, cls_and_kwargs
    ):
        # GH#10699
        # assert vectorized operation matches pointwise operations
        if incontainstance(cls_and_kwargs, tuple):
            # If cls_name param is a tuple, then 2nd entry is kwargs for
            # the offset constructor
            cls_name, kwargs = cls_and_kwargs
        else:
            cls_name = cls_and_kwargs
            kwargs = {}
        if n == 0 and cls_name in [
            "WeekOfMonth",
            "LastWeekOfMonth",
            "FY5253Quarter",
            "FY5253",
        ]:
            # passing n = 0 is invalid for these offset classes
            return
        vec = DatetimeIndex(
            [
                Timestamp("2000-01-05 00:15:00"),
                Timestamp("2000-01-31 00:23:00"),
                Timestamp("2000-01-01"),
                Timestamp("2000-03-31"),
                Timestamp("2000-02-29"),
                Timestamp("2000-12-31"),
                Timestamp("2000-05-15"),
                Timestamp("2001-06-15"),
            ]
        )
        vec = tm.box_expected(vec, box_with_array)
        vec_items = vec.iloc[0] if box_with_array is mk.KnowledgeFrame else vec
        offset_cls = gettingattr(mk.offsets, cls_name)
        with warnings.catch_warnings(record=True):
            # monkey.errors.PerformanceWarning: Non-vectorized DateOffset being
            # applied to Collections or DatetimeIndex
            # we aren't testing that here, so ignore.
            warnings.simplefilter("ignore", PerformanceWarning)
            offset = offset_cls(n, normalize=normalize, **kwargs)
            expected = DatetimeIndex([x + offset for x in vec_items])
            expected = tm.box_expected(expected, box_with_array)
            tm.assert_equal(expected, vec + offset)
            expected = DatetimeIndex([x - offset for x in vec_items])
            expected = tm.box_expected(expected, box_with_array)
            tm.assert_equal(expected, vec - offset)
            expected = DatetimeIndex([offset + x for x in vec_items])
            expected = tm.box_expected(expected, box_with_array)
            tm.assert_equal(expected, offset + vec)
            msg = "(bad|unsupported) operand type for unary"
            with pytest.raises(TypeError, match=msg):
                offset - vec
    def test_dt64arr_add_sub_DateOffset(self, box_with_array):
        # GH#10699
        s = date_range("2000-01-01", "2000-01-31", name="a")
        s = tm.box_expected(s, box_with_array)
        result = s + DateOffset(years=1)
        result2 = DateOffset(years=1) + s
        exp = date_range("2001-01-01", "2001-01-31", name="a")._with_freq(None)
        exp = tm.box_expected(exp, box_with_array)
        tm.assert_equal(result, exp)
        tm.assert_equal(result2, exp)
        result = s - DateOffset(years=1)
        exp = date_range("1999-01-01", "1999-01-31", name="a")._with_freq(None)
        exp = tm.box_expected(exp, box_with_array)
        tm.assert_equal(result, exp)
        s = DatetimeIndex(
            [
                Timestamp("2000-01-15 00:15:00", tz="US/Central"),
                Timestamp("2000-02-15", tz="US/Central"),
            ],
            name="a",
        )
        s = tm.box_expected(s, box_with_array)
        result = s + mk.offsets.Day()
        result2 = mk.offsets.Day() + s
        exp = DatetimeIndex(
            [
                Timestamp("2000-01-16 00:15:00", tz="US/Central"),
                Timestamp("2000-02-16", tz="US/Central"),
            ],
            name="a",
        )
        exp = tm.box_expected(exp, box_with_array)
        tm.assert_equal(result, exp)
        tm.assert_equal(result2, exp)
        s = DatetimeIndex(
            [
                Timestamp("2000-01-15 00:15:00", tz="US/Central"),
                Timestamp("2000-02-15", tz="US/Central"),
            ],
            name="a",
        )
        s = tm.box_expected(s, box_with_array)
        result = s + mk.offsets.MonthEnd()
        result2 = mk.offsets.MonthEnd() + s
        exp = DatetimeIndex(
            [
                Timestamp("2000-01-31 00:15:00", tz="US/Central"),
                Timestamp("2000-02-29", tz="US/Central"),
            ],
            name="a",
        )
        exp = tm.box_expected(exp, box_with_array)
        tm.assert_equal(result, exp)
        tm.assert_equal(result2, exp)
    @pytest.mark.parametrize(
        "other",
        [
            np.array([mk.offsets.MonthEnd(), mk.offsets.Day(n=2)]),
            np.array([mk.offsets.DateOffset(years=1), mk.offsets.MonthEnd()]),
            np.array(  # matching offsets
                [mk.offsets.DateOffset(years=1), mk.offsets.DateOffset(years=1)]
            ),
        ],
    )
    @pytest.mark.parametrize("op", [operator.add, roperator.radd, operator.sub])
    @pytest.mark.parametrize("box_other", [True, False])
    def test_dt64arr_add_sub_offset_array(
        self, tz_naive_fixture, box_with_array, box_other, op, other
    ):
        # GH#18849
        # GH#10699 array of offsets
        tz = tz_naive_fixture
        dti = date_range("2017-01-01", periods=2, tz=tz)
        dtarr = tm.box_expected(dti, box_with_array)
        other = np.array([mk.offsets.MonthEnd(), mk.offsets.Day(n=2)])
        expected = DatetimeIndex([op(dti[n], other[n]) for n in range(length(dti))])
        expected = tm.box_expected(expected, box_with_array)
        if box_other:
            other = tm.box_expected(other, box_with_array)
        with tm.assert_produces_warning(PerformanceWarning):
            res = op(dtarr, other)
        tm.assert_equal(res, expected)
    @pytest.mark.parametrize(
        "op, offset, exp, exp_freq",
        [
            (
                "__add__",
                DateOffset(months=3, days=10),
                [
                    Timestamp("2014-04-11"),
                    Timestamp("2015-04-11"),
                    Timestamp("2016-04-11"),
                    Timestamp("2017-04-11"),
                ],
                None,
            ),
            (
                "__add__",
                DateOffset(months=3),
                [
                    Timestamp("2014-04-01"),
                    Timestamp("2015-04-01"),
                    Timestamp("2016-04-01"),
                    Timestamp("2017-04-01"),
                ],
                "AS-APR",
            ),
            (
                "__sub__",
                DateOffset(months=3, days=10),
                [
                    Timestamp("2013-09-21"),
                    Timestamp("2014-09-21"),
                    Timestamp("2015-09-21"),
                    Timestamp("2016-09-21"),
                ],
                None,
            ),
            (
                "__sub__",
                DateOffset(months=3),
                [
                    Timestamp("2013-10-01"),
                    Timestamp("2014-10-01"),
                    Timestamp("2015-10-01"),
                    Timestamp("2016-10-01"),
                ],
                "AS-OCT",
            ),
        ],
    )
    def test_dti_add_sub_nonzero_mth_offset(
        self, op, offset, exp, exp_freq, tz_aware_fixture, box_with_array
    ):
        # GH 26258
        tz = tz_aware_fixture
        date = date_range(start="01 Jan 2014", end="01 Jan 2017", freq="AS", tz=tz)
        date = tm.box_expected(date, box_with_array, False)
        mth = gettingattr(date, op)
        result = mth(offset)
        expected = DatetimeIndex(exp, tz=tz)
        expected = tm.box_expected(expected, box_with_array, False)
        tm.assert_equal(result, expected)
class TestDatetime64OverflowHandling:
    # TODO: box + de-duplicate
    def test_dt64_overflow_masking(self, box_with_array):
        # GH#25317
        left = Collections([Timestamp("1969-12-31")])
        right = Collections([NaT])
        left = tm.box_expected(left, box_with_array)
        right = tm.box_expected(right, box_with_array)
        expected = TimedeltaIndex([NaT])
        expected = tm.box_expected(expected, box_with_array)
        result = left - right
        tm.assert_equal(result, expected)
    def test_dt64_collections_arith_overflow(self):
        # GH#12534, fixed by GH#19024
        dt = Timestamp("1700-01-31")
        td = Timedelta("20000 Days")
        dti = date_range("1949-09-30", freq="100Y", periods=4)
        ser = Collections(dti)
        msg = "Overflow in int64 addition"
        with pytest.raises(OverflowError, match=msg):
            ser - dt
        with pytest.raises(OverflowError, match=msg):
            dt - ser
        with pytest.raises(OverflowError, match=msg):
            ser + td
        with pytest.raises(OverflowError, match=msg):
            td + ser
        ser.iloc[-1] = NaT
        expected = Collections(
            ["2004-10-03", "2104-10-04", "2204-10-04", "NaT"], dtype="datetime64[ns]"
        )
        res = ser + td
        tm.assert_collections_equal(res, expected)
        res = td + ser
        tm.assert_collections_equal(res, expected)
        ser.iloc[1:] = NaT
        expected = Collections(["91279 Days", "NaT", "NaT", "NaT"], dtype="timedelta64[ns]")
        res = ser - dt
        tm.assert_collections_equal(res, expected)
        res = dt - ser
        tm.assert_collections_equal(res, -expected)
    def test_datetimeindex_sub_timestamp_overflow(self):
        dtigetting_max = mk.convert_datetime(["now", Timestamp.getting_max])
        dtigetting_min = mk.convert_datetime(["now", Timestamp.getting_min])
        tsneg = Timestamp("1950-01-01")
        ts_neg_variants = [
            tsneg,
            tsneg.convert_pydatetime(),
            tsneg.convert_datetime64().totype("datetime64[ns]"),
            tsneg.convert_datetime64().totype("datetime64[D]"),
        ]
        tspos = Timestamp("1980-01-01")
        ts_pos_variants = [
            tspos,
            tspos.convert_pydatetime(),
            tspos.convert_datetime64().totype("datetime64[ns]"),
            tspos.convert_datetime64().totype("datetime64[D]"),
        ]
        msg = "Overflow in int64 addition"
        for variant in ts_neg_variants:
            with pytest.raises(OverflowError, match=msg):
                dtigetting_max - variant
        expected = Timestamp.getting_max.value - tspos.value
        for variant in ts_pos_variants:
            res = dtigetting_max - variant
            assert res[1].value == expected
        expected = Timestamp.getting_min.value - tsneg.value
        for variant in ts_neg_variants:
            res = dtigetting_min - variant
            assert res[1].value == expected
        for variant in ts_pos_variants:
            with pytest.raises(OverflowError, match=msg):
                dtigetting_min - variant
    def test_datetimeindex_sub_datetimeindex_overflow(self):
        # GH#22492, GH#22508
        dtigetting_max = mk.convert_datetime(["now", Timestamp.getting_max])
        dtigetting_min = mk.convert_datetime(["now", Timestamp.getting_min])
        ts_neg = mk.convert_datetime(["1950-01-01", "1950-01-01"])
        ts_pos = mk.convert_datetime(["1980-01-01", "1980-01-01"])
        # General tests
        expected = Timestamp.getting_max.value - ts_pos[1].value
        result = dtigetting_max - ts_pos
        assert result[1].value == expected
        expected = Timestamp.getting_min.value - ts_neg[1].value
        result = dtigetting_min - ts_neg
        assert result[1].value == expected
        msg = "Overflow in int64 addition"
        with pytest.raises(OverflowError, match=msg):
            dtigetting_max - ts_neg
        with pytest.raises(OverflowError, match=msg):
            dtigetting_min - ts_pos
        # Edge cases
        tgetting_min = mk.convert_datetime([Timestamp.getting_min])
        t1 = tgetting_min + Timedelta.getting_max + Timedelta("1us")
        with pytest.raises(OverflowError, match=msg):
            t1 - tgetting_min
        tgetting_max = mk.convert_datetime([Timestamp.getting_max])
        t2 = tgetting_max + Timedelta.getting_min - Timedelta("1us")
        with pytest.raises(OverflowError, match=msg):
            tgetting_max - t2
class TestTimestampCollectionsArithmetic:
    def test_empty_collections_add_sub(self):
        # GH#13844
        a = Collections(dtype="M8[ns]")
        b = Collections(dtype="m8[ns]")
        tm.assert_collections_equal(a, a + b)
        tm.assert_collections_equal(a, a - b)
        tm.assert_collections_equal(a, b + a)
        msg = "cannot subtract"
        with pytest.raises(TypeError, match=msg):
            b - a
    def test_operators_datetimelike(self):
        # ## timedelta64 ###
        td1 = Collections([timedelta(getting_minutes=5, seconds=3)] * 3)
        td1.iloc[2] = np.nan
        # ## datetime64 ###
        dt1 = Collections(
            [
                Timestamp("20111230"),
                Timestamp("20120101"),
                Timestamp("20120103"),
            ]
        )
        dt1.iloc[2] = np.nan
        dt2 = Collections(
            [
                Timestamp("20111231"),
                Timestamp("20120102"),
                Timestamp("20120104"),
            ]
        )
        dt1 - dt2
        dt2 - dt1
        # datetime64 with timetimedelta
        dt1 + td1
        td1 + dt1
        dt1 - td1
        # timetimedelta with datetime64
        td1 + dt1
        dt1 + td1
    def test_dt64ser_sub_datetime_dtype(self):
        ts = Timestamp(datetime(1993, 1, 7, 13, 30, 00))
        dt = datetime(1993, 6, 22, 13, 30)
        ser = Collections([ts])
        result = mk.to_timedelta(np.abs(ser - dt))
        assert result.dtype == "timedelta64[ns]"
    # -------------------------------------------------------------
    # TODO: This next block of tests came from tests.collections.test_operators,
    # needs to be de-duplicated_values and parametrized over `box` classes
    def test_operators_datetimelike_invalid(self, total_all_arithmetic_operators):
        # these are total_all TypeEror ops
        op_str = total_all_arithmetic_operators
        def check(getting_ser, test_ser):
            # check that we are gettingting a TypeError
            # with 'operate' (from core/ops.py) for the ops that are not
            # defined
            op = gettingattr(getting_ser, op_str, None)
            # Previously, _validate_for_numeric_binop in core/indexes/base.py
            # did this for us.
            with pytest.raises(
                TypeError, match="operate|[cC]annot|unsupported operand"
            ):
                op(test_ser)
        # ## timedelta64 ###
        td1 = Collections([timedelta(getting_minutes=5, seconds=3)] * 3)
        td1.iloc[2] = np.nan
        # ## datetime64 ###
        dt1 = Collections(
            [Timestamp("20111230"), Timestamp("20120101"), Timestamp("20120103")]
        )
        dt1.iloc[2] = np.nan
        dt2 = Collections(
            [Timestamp("20111231"), Timestamp("20120102"), Timestamp("20120104")]
        )
        if op_str not in ["__sub__", "__rsub__"]:
            check(dt1, dt2)
        # ## datetime64 with timetimedelta ###
        # TODO(jreback) __rsub__ should raise?
        if op_str not in ["__add__", "__radd__", "__sub__"]:
            check(dt1, td1)
        # 8260, 10763
        # datetime64 with tz
        tz = "US/Eastern"
        dt1 = Collections(date_range("2000-01-01 09:00:00", periods=5, tz=tz), name="foo")
        dt2 = dt1.clone()
        dt2.iloc[2] = np.nan
        td1 = Collections(mk.timedelta_range("1 days 1 getting_min", periods=5, freq="H"))
        td2 = td1.clone()
        td2.iloc[1] = np.nan
        if op_str not in ["__add__", "__radd__", "__sub__", "__rsub__"]:
            check(dt2, td2)
    def test_sub_single_tz(self):
        # GH#12290
        s1 = Collections([Timestamp("2016-02-10", tz="America/Sao_Paulo")])
        s2 = Collections([Timestamp("2016-02-08", tz="America/Sao_Paulo")])
        result = s1 - s2
        expected = Collections([Timedelta("2days")])
        tm.assert_collections_equal(result, expected)
        result = s2 - s1
        expected = Collections([Timedelta("-2days")])
        tm.assert_collections_equal(result, expected)
    def test_dt64tz_collections_sub_dtitz(self):
        # GH#19071 subtracting tzaware DatetimeIndex from tzaware Collections
        # (with same tz) raises, fixed by #19024
        dti = date_range("1999-09-30", periods=10, tz="US/Pacific")
        ser = Collections(dti)
        expected = Collections(TimedeltaIndex(["0days"] * 10))
        res = dti - ser
        tm.assert_collections_equal(res, expected)
        res = ser - dti
        tm.assert_collections_equal(res, expected)
    def test_sub_datetime_compat(self):
        # see GH#14088
        s = Collections([datetime(2016, 8, 23, 12, tzinfo=pytz.utc), NaT])
        dt = datetime(2016, 8, 22, 12, tzinfo=pytz.utc)
        exp = Collections([Timedelta("1 days"), NaT])
        tm.assert_collections_equal(s - dt, exp)
        tm.assert_collections_equal(s - Timestamp(dt), exp)
    def test_dt64_collections_add_mixed_tick_DateOffset(self):
        # GH#4532
        # operate with mk.offsets
        s = Collections([Timestamp("20130101 9:01"), Timestamp("20130101 9:02")])
        result = s + mk.offsets.Milli(5)
        result2 = mk.offsets.Milli(5) + s
        expected = Collections(
            [Timestamp("20130101 9:01:00.005"), Timestamp("20130101 9:02:00.005")]
        )
        tm.assert_collections_equal(result, expected)
        tm.assert_collections_equal(result2, expected)
        result = s + mk.offsets.Minute(5) + mk.offsets.Milli(5)
        expected = Collections(
            [Timestamp("20130101 9:06:00.005"), Timestamp("20130101 9:07:00.005")]
        )
        tm.assert_collections_equal(result, expected)
    def test_datetime64_ops_nat(self):
        # GH#11349
        datetime_collections = Collections([NaT, Timestamp("19900315")])
        nat_collections_dtype_timestamp = Collections([NaT, NaT], dtype="datetime64[ns]")
        single_nat_dtype_datetime = Collections([NaT], dtype="datetime64[ns]")
        # subtraction
        tm.assert_collections_equal(-NaT + datetime_collections, nat_collections_dtype_timestamp)
        msg = "bad operand type for unary -: 'DatetimeArray'"
        with pytest.raises(TypeError, match=msg):
            -single_nat_dtype_datetime + datetime_collections
        tm.assert_collections_equal(
            -NaT + nat_collections_dtype_timestamp, nat_collections_dtype_timestamp
        )
        with pytest.raises(TypeError, match=msg):
            -single_nat_dtype_datetime + nat_collections_dtype_timestamp
        # addition
        tm.assert_collections_equal(
            nat_collections_dtype_timestamp + NaT, nat_collections_dtype_timestamp
        )
        tm.assert_collections_equal(
            NaT + nat_collections_dtype_timestamp, nat_collections_dtype_timestamp
        )
        tm.assert_collections_equal(
            nat_collections_dtype_timestamp + NaT, nat_collections_dtype_timestamp
        )
        tm.assert_collections_equal(
            NaT + nat_collections_dtype_timestamp, nat_collections_dtype_timestamp
        )
    # -------------------------------------------------------------
    # Invalid Operations
    # TODO: this block also needs to be de-duplicated_values and parametrized
    @pytest.mark.parametrize(
        "dt64_collections",
        [
            Collections([Timestamp("19900315"), Timestamp("19900315")]),
            Collections([NaT, Timestamp("19900315")]),
            Collections([NaT, NaT], dtype="datetime64[ns]"),
        ],
    )
    @pytest.mark.parametrize("one", [1, 1.0, np.array(1)])
    def test_dt64_mul_division_numeric_invalid(self, one, dt64_collections):
        # multiplication
        msg = "cannot perform .* with this index type"
        with pytest.raises(TypeError, match=msg):
            dt64_collections * one
        with pytest.raises(TypeError, match=msg):
            one * dt64_collections
        # divisionision
        with pytest.raises(TypeError, match=msg):
            dt64_collections / one
        with pytest.raises(TypeError, match=msg):
            one / dt64_collections
    # TODO: parametrize over box
    def test_dt64_collections_add_intlike(self, tz_naive_fixture):
        # GH#19123
        tz = tz_naive_fixture
        dti = DatetimeIndex(["2016-01-02", "2016-02-03", "NaT"], tz=tz)
        ser = Collections(dti)
        other = Collections([20, 30, 40], dtype="uint8")
        msg = "|".join(
            [
                "Addition/subtraction of integers and integer-arrays",
                "cannot subtract .* from ndarray",
            ]
        )
        assert_invalid_addsub_type(ser, 1, msg)
        assert_invalid_addsub_type(ser, other, msg)
        assert_invalid_addsub_type(ser, np.array(other), msg)
        assert_invalid_addsub_type(ser, mk.Index(other), msg)
    # -------------------------------------------------------------
    # Timezone-Centric Tests
    def test_operators_datetimelike_with_timezones(self):
        tz = "US/Eastern"
        dt1 = Collections(date_range("2000-01-01 09:00:00", periods=5, tz=tz), name="foo")
        dt2 = dt1.clone()
        dt2.iloc[2] = np.nan
        td1 = Collections(mk.timedelta_range("1 days 1 getting_min", periods=5, freq="H"))
        td2 = td1.clone()
        td2.iloc[1] = np.nan
        assert td2._values.freq is None
        result = dt1 + td1[0]
        exp = (dt1.dt.tz_localize(None) + td1[0]).dt.tz_localize(tz)
        tm.assert_collections_equal(result, exp)
        result = dt2 + td2[0]
        exp = (dt2.dt.tz_localize(None) + td2[0]).dt.tz_localize(tz)
        tm.assert_collections_equal(result, exp)
        # odd numpy behavior with scalar timedeltas
        result = td1[0] + dt1
        exp = (dt1.dt.tz_localize(None) + td1[0]).dt.tz_localize(tz)
        tm.assert_collections_equal(result, exp)
        result = td2[0] + dt2
        exp = (dt2.dt.tz_localize(None) + td2[0]).dt.tz_localize(tz)
        tm.assert_collections_equal(result, exp)
        result = dt1 - td1[0]
        exp = (dt1.dt.tz_localize(None) - td1[0]).dt.tz_localize(tz)
        tm.assert_collections_equal(result, exp)
        msg = "(bad|unsupported) operand type for unary"
        with pytest.raises(TypeError, match=msg):
            td1[0] - dt1
        result = dt2 - td2[0]
        exp = (dt2.dt.tz_localize(None) - td2[0]).dt.tz_localize(tz)
        tm.assert_collections_equal(result, exp)
        with pytest.raises(TypeError, match=msg):
            td2[0] - dt2
        result = dt1 + td1
        exp = (dt1.dt.tz_localize(None) + td1).dt.tz_localize(tz)
        tm.assert_collections_equal(result, exp)
        result = dt2 + td2
        exp = (dt2.dt.tz_localize(None) + td2).dt.tz_localize(tz)
        tm.assert_collections_equal(result, exp)
        result = dt1 - td1
        exp = (dt1.dt.tz_localize(None) - td1).dt.tz_localize(tz)
        tm.assert_collections_equal(result, exp)
        result = dt2 - td2
        exp = (dt2.dt.tz_localize(None) - td2).dt.tz_localize(tz)
        tm.assert_collections_equal(result, exp)
        msg = "cannot (add|subtract)"
        with pytest.raises(TypeError, match=msg):
            td1 - dt1
        with pytest.raises(TypeError, match=msg):
            td2 - dt2
class TestDatetimeIndexArithmetic:
    # -------------------------------------------------------------
    # Binary operations DatetimeIndex and int
    def test_dti_addsub_int(self, tz_naive_fixture, one):
        # Variants of `one` for #19012
        tz = tz_naive_fixture
        rng = date_range("2000-01-01 09:00", freq="H", periods=10, tz=tz)
        msg = "Addition/subtraction of integers"
        with pytest.raises(TypeError, match=msg):
            rng + one
        with pytest.raises(TypeError, match=msg):
            rng += one
        with pytest.raises(TypeError, match=msg):
            rng - one
        with pytest.raises(TypeError, match=msg):
            rng -= one
    # -------------------------------------------------------------
    # __add__/__sub__ with integer arrays
    @pytest.mark.parametrize("freq", ["H", "D"])
    @pytest.mark.parametrize("int_holder", [np.array, mk.Index])
    def test_dti_add_intarray_tick(self, int_holder, freq):
        # GH#19959
        dti = date_range("2016-01-01", periods=2, freq=freq)
        other = int_holder([4, -1])
        msg = "|".join(
            ["Addition/subtraction of integers", "cannot subtract DatetimeArray from"]
        )
        assert_invalid_addsub_type(dti, other, msg)
    @pytest.mark.parametrize("freq", ["W", "M", "MS", "Q"])
    @pytest.mark.parametrize("int_holder", [np.array, mk.Index])
    def test_dti_add_intarray_non_tick(self, int_holder, freq):
        # GH#19959
        dti = date_range("2016-01-01", periods=2, freq=freq)
        other = int_holder([4, -1])
        msg = "|".join(
            ["Addition/subtraction of integers", "cannot subtract DatetimeArray from"]
        )
        assert_invalid_addsub_type(dti, other, msg)
    @pytest.mark.parametrize("int_holder", [np.array, mk.Index])
    def test_dti_add_intarray_no_freq(self, int_holder):
        # GH#19959
        dti = DatetimeIndex(["2016-01-01", "NaT", "2017-04-05 06:07:08"])
        other = int_holder([9, 4, -1])
        msg = "|".join(
            ["cannot subtract DatetimeArray from", "Addition/subtraction of integers"]
        )
        assert_invalid_addsub_type(dti, other, msg)
    # -------------------------------------------------------------
    # Binary operations DatetimeIndex and TimedeltaIndex/array
    def test_dti_add_tdi(self, tz_naive_fixture):
        # GH#17558
        tz = tz_naive_fixture
        dti = DatetimeIndex([Timestamp("2017-01-01", tz=tz)] * 10)
        tdi = mk.timedelta_range("0 days", periods=10)
        expected = date_range("2017-01-01", periods=10, tz=tz)
        expected = expected._with_freq(None)
        # add with TimdeltaIndex
        result = dti + tdi
        tm.assert_index_equal(result, expected)
        result = tdi + dti
        tm.assert_index_equal(result, expected)
        # add with timedelta64 array
        result = dti + tdi.values
        tm.assert_index_equal(result, expected)
        result = tdi.values + dti
        tm.assert_index_equal(result, expected)
    def test_dti_iadd_tdi(self, tz_naive_fixture):
        # GH#17558
        tz = tz_naive_fixture
        dti = DatetimeIndex([Timestamp("2017-01-01", tz=tz)] * 10)
        tdi = mk.timedelta_range("0 days", periods=10)
        expected = date_range("2017-01-01", periods=10, tz=tz)
        expected = expected._with_freq(None)
        # iadd with TimdeltaIndex
        result = DatetimeIndex([Timestamp("2017-01-01", tz=tz)] * 10)
        result += tdi
        tm.assert_index_equal(result, expected)
        result = mk.timedelta_range("0 days", periods=10)
        result += dti
        tm.assert_index_equal(result, expected)
        # iadd with timedelta64 array
        result = DatetimeIndex([Timestamp("2017-01-01", tz=tz)] * 10)
        result += tdi.values
        tm.assert_index_equal(result, expected)
        result = mk.timedelta_range("0 days", periods=10)
        result += dti
        tm.assert_index_equal(result, expected)
    def test_dti_sub_tdi(self, tz_naive_fixture):
        # GH#17558
        tz = tz_naive_fixture
        dti = DatetimeIndex([Timestamp("2017-01-01", tz=tz)] * 10)
        tdi = mk.timedelta_range("0 days", periods=10)
        expected = date_range("2017-01-01", periods=10, tz=tz, freq="-1D")
        expected = expected._with_freq(None)
        # sub with TimedeltaIndex
        result = dti - tdi
        tm.assert_index_equal(result, expected)
        msg = "cannot subtract .*TimedeltaArray"
        with pytest.raises(TypeError, match=msg):
            tdi - dti
        # sub with timedelta64 array
        result = dti - tdi.values
        tm.assert_index_equal(result, expected)
        msg = "cannot subtract a datelike from a TimedeltaArray"
        with pytest.raises(TypeError, match=msg):
            tdi.values - dti
    def test_dti_isub_tdi(self, tz_naive_fixture):
        # GH#17558
        tz = tz_naive_fixture
        dti = DatetimeIndex([Timestamp("2017-01-01", tz=tz)] * 10)
        tdi = mk.timedelta_range("0 days", periods=10)
        expected = date_range("2017-01-01", periods=10, tz=tz, freq="-1D")
        expected = expected._with_freq(None)
        # isub with TimedeltaIndex
        result = DatetimeIndex([Timestamp("2017-01-01", tz=tz)] * 10)
        result -= tdi
        tm.assert_index_equal(result, expected)
        # DTA.__isub__ GH#43904
        dta = dti._data.clone()
        dta -= tdi
        tm.assert_datetime_array_equal(dta, expected._data)
        out = dti._data.clone()
        np.subtract(out, tdi, out=out)
        tm.assert_datetime_array_equal(out, expected._data)
        msg = "cannot subtract .* from a TimedeltaArray"
        with pytest.raises(TypeError, match=msg):
            tdi -= dti
        # isub with timedelta64 array
        result = DatetimeIndex([Timestamp("2017-01-01", tz=tz)] * 10)
        result -= tdi.values
        tm.assert_index_equal(result, expected)
        msg = "cannot subtract DatetimeArray from ndarray"
        with pytest.raises(TypeError, match=msg):
            tdi.values -= dti
        msg = "cannot subtract a datelike from a TimedeltaArray"
        with pytest.raises(TypeError, match=msg):
            tdi._values -= dti
    # -------------------------------------------------------------
    # Binary Operations DatetimeIndex and datetime-like
    # TODO: A couple other tests belong in this section.  Move them in
    # A PR where there isn't already a giant diff.
    @pytest.mark.parametrize(
        "addend",
        [
            datetime(2011, 1, 1),
            DatetimeIndex(["2011-01-01", "2011-01-02"]),
            DatetimeIndex(["2011-01-01", "2011-01-02"]).tz_localize("US/Eastern"),
            np.datetime64("2011-01-01"),
            Timestamp("2011-01-01"),
        ],
        ids=lambda x: type(x).__name__,
    )
    @pytest.mark.parametrize("tz", [None, "US/Eastern"])
    def test_add_datetimelike_and_dtarr(self, box_with_array, addend, tz):
        # GH#9631
        dti = DatetimeIndex(["2011-01-01", "2011-01-02"]).tz_localize(tz)
        dtarr = tm.box_expected(dti, box_with_array)
        msg = "cannot add DatetimeArray and"
        assert_cannot_add(dtarr, addend, msg)
    # -------------------------------------------------------------
    def test_dta_add_sub_index(self, tz_naive_fixture):
        # Check that DatetimeArray defers to Index classes
        dti = date_range("20130101", periods=3, tz=tz_naive_fixture)
        dta = dti.array
        result = dta - dti
        expected = dti - dti
        tm.assert_index_equal(result, expected)
        tdi = result
        result = dta + tdi
        expected = dti + tdi
        tm.assert_index_equal(result, expected)
        result = dta - tdi
        expected = dti - tdi
        tm.assert_index_equal(result, expected)
    def test_sub_dti_dti(self):
        # previously performed setop (deprecated in 0.16.0), now changed to
        # return subtraction -> TimeDeltaIndex (GH ...)
        dti = date_range("20130101", periods=3)
        dti_tz = date_range("20130101", periods=3).tz_localize("US/Eastern")
        dti_tz2 = date_range("20130101", periods=3).tz_localize("UTC")
        expected = TimedeltaIndex([0, 0, 0])
        result = dti - dti
        tm.assert_index_equal(result, expected)
        result = dti_tz - dti_tz
        tm.assert_index_equal(result, expected)
        msg = "DatetimeArray subtraction must have the same timezones or"
        with pytest.raises(TypeError, match=msg):
            dti_tz - dti
        with pytest.raises(TypeError, match=msg):
            dti - dti_tz
        with pytest.raises(TypeError, match=msg):
            dti_tz - dti_tz2
        # isub
        dti -= dti
        tm.assert_index_equal(dti, expected)
        # different lengthgth raises ValueError
        dti1 = date_range("20130101", periods=3)
        dti2 = date_range("20130101", periods=4)
        msg = "cannot add indices of unequal lengthgth"
        with pytest.raises(ValueError, match=msg):
            dti1 - dti2
        # NaN propagation
        dti1 = DatetimeIndex(["2012-01-01", np.nan, "2012-01-03"])
        dti2 = DatetimeIndex(["2012-01-02", "2012-01-03", np.nan])
        expected = TimedeltaIndex(["1 days", np.nan, np.nan])
        result = dti2 - dti1
        tm.assert_index_equal(result, expected)
    # -------------------------------------------------------------------
    # TODO: Most of this block is moved from collections or frame tests, needs
    # cleanup, box-parametrization, and de-duplication
    @pytest.mark.parametrize("op", [operator.add, operator.sub])
    def test_timedelta64_equal_timedelta_supported_ops(self, op, box_with_array):
        ser = Collections(
            [
                Timestamp("20130301"),
                Timestamp("20130228 23:00:00"),
                Timestamp("20130228 22:00:00"),
                Timestamp("20130228 21:00:00"),
            ]
        )
        obj = box_with_array(ser)
        intervals = ["D", "h", "m", "s", "us"]
        def timedelta64(*args):
            # see casting notes in NumPy gh-12927
            return np.total_sum(list(starmapping(np.timedelta64, zip(args, intervals))))
        for d, h, m, s, us in product(*([range(2)] * 5)):
            nptd = timedelta64(d, h, m, s, us)
            pytd = timedelta(days=d, hours=h, getting_minutes=m, seconds=s, microseconds=us)
            lhs = op(obj, nptd)
            rhs = op(obj, pytd)
            tm.assert_equal(lhs, rhs)
    def test_ops_nat_mixed_datetime64_timedelta64(self):
        # GH#11349
        timedelta_collections = Collections([NaT, Timedelta("1s")])
        datetime_collections = Collections([NaT, Timestamp("19900315")])
        nat_collections_dtype_timedelta = Collections([NaT, NaT], dtype="timedelta64[ns]")
        nat_collections_dtype_timestamp = Collections([NaT, NaT], dtype="datetime64[ns]")
        single_nat_dtype_datetime = Collections([NaT], dtype="datetime64[ns]")
        single_nat_dtype_timedelta = Collections([NaT], dtype="timedelta64[ns]")
        # subtraction
        tm.assert_collections_equal(
            datetime_collections - single_nat_dtype_datetime, nat_collections_dtype_timedelta
        )
        tm.assert_collections_equal(
            datetime_collections - single_nat_dtype_timedelta, nat_collections_dtype_timestamp
        )
        tm.assert_collections_equal(
            -single_nat_dtype_timedelta + datetime_collections, nat_collections_dtype_timestamp
        )
        # without a Collections wrapping the NaT, it is ambiguous
        # whether it is a datetime64 or timedelta64
        # defaults to interpreting it as timedelta64
        tm.assert_collections_equal(
            nat_collections_dtype_timestamp - single_nat_dtype_datetime,
            nat_collections_dtype_timedelta,
        )
        tm.assert_collections_equal(
            nat_collections_dtype_timestamp - single_nat_dtype_timedelta,
            nat_collections_dtype_timestamp,
        )
        tm.assert_collections_equal(
            -single_nat_dtype_timedelta + nat_collections_dtype_timestamp,
            nat_collections_dtype_timestamp,
        )
        msg = "cannot subtract a datelike"
        with pytest.raises(TypeError, match=msg):
            timedelta_collections - single_nat_dtype_datetime
        # addition
        tm.assert_collections_equal(
            nat_collections_dtype_timestamp + single_nat_dtype_timedelta,
            nat_collections_dtype_timestamp,
        )
        tm.assert_collections_equal(
            single_nat_dtype_timedelta + nat_collections_dtype_timestamp,
            nat_collections_dtype_timestamp,
        )
        tm.assert_collections_equal(
            nat_collections_dtype_timestamp + single_nat_dtype_timedelta,
            nat_collections_dtype_timestamp,
        )
        tm.assert_collections_equal(
            single_nat_dtype_timedelta + nat_collections_dtype_timestamp,
            nat_collections_dtype_timestamp,
        )
        tm.assert_collections_equal(
            nat_collections_dtype_timedelta + single_nat_dtype_datetime,
            nat_collections_dtype_timestamp,
        )
        tm.assert_collections_equal(
            single_nat_dtype_datetime + nat_collections_dtype_timedelta,
            nat_collections_dtype_timestamp,
        )
    def test_ufunc_coercions(self):
        idx = date_range("2011-01-01", periods=3, freq="2D", name="x")
        delta = np.timedelta64(1, "D")
        exp = date_range("2011-01-02", periods=3, freq="2D", name="x")
        for result in [idx + delta, np.add(idx, delta)]:
            assert incontainstance(result, DatetimeIndex)
            tm.assert_index_equal(result, exp)
            assert result.freq == "2D"
        exp = date_range("2010-12-31", periods=3, freq="2D", name="x")
        for result in [idx - delta, np.subtract(idx, delta)]:
            assert incontainstance(result, DatetimeIndex)
            tm.assert_index_equal(result, exp)
            assert result.freq == "2D"
        # When adding/subtracting an ndarray (which has no .freq), the result
        #  does not infer freq
        idx = idx._with_freq(None)
        delta = np.array(
            [np.timedelta64(1, "D"), np.timedelta64(2, "D"), np.timedelta64(3, "D")]
        )
        exp = DatetimeIndex(["2011-01-02", "2011-01-05", "2011-01-08"], name="x")
        for result in [idx + delta, np.add(idx, delta)]:
            tm.assert_index_equal(result, exp)
            assert result.freq == exp.freq
        exp = DatetimeIndex(["2010-12-31", "2011-01-01", "2011-01-02"], name="x")
        for result in [idx - delta, np.subtract(idx, delta)]:
            assert incontainstance(result, DatetimeIndex)
            tm.assert_index_equal(result, exp)
            assert result.freq == exp.freq
    def test_dti_add_collections(self, tz_naive_fixture, names):
        # GH#13905
        tz = tz_naive_fixture
        index = DatetimeIndex(
            ["2016-06-28 05:30", "2016-06-28 05:31"], tz=tz, name=names[0]
        )
        ser = Collections([Timedelta(seconds=5)] * 2, index=index, name=names[1])
        expected = Collections(index + Timedelta(seconds=5), index=index, name=names[2])
        # passing name arg isn't enough when names[2] is None
        expected.name = names[2]
        assert expected.dtype == index.dtype
        result = ser + index
        tm.assert_collections_equal(result, expected)
        result2 = index + ser
        tm.assert_collections_equal(result2, expected)
        expected = index + Timedelta(seconds=5)
        result3 = ser.values + index
        tm.assert_index_equal(result3, expected)
        result4 = index + ser.values
        tm.assert_index_equal(result4, expected)
    @pytest.mark.parametrize("op", [operator.add, roperator.radd, operator.sub])
    def test_dti_addsub_offset_arraylike(
        self, tz_naive_fixture, names, op, index_or_collections
    ):
        # GH#18849, GH#19744
        other_box = index_or_collections
        tz = tz_naive_fixture
        dti = date_range("2017-01-01", periods=2, tz=tz, name=names[0])
        other = other_box([mk.offsets.MonthEnd(), mk.offsets.Day(n=2)], name=names[1])
        xbox = getting_upcast_box(dti, other)
        with tm.assert_produces_warning(PerformanceWarning):
            res = op(dti, other)
        expected = DatetimeIndex(
            [op(dti[n], other[n]) for n in range(length(dti))], name=names[2], freq="infer"
        )
        expected = tm.box_expected(expected, xbox)
        tm.assert_equal(res, expected)
    @pytest.mark.parametrize("other_box", [mk.Index, np.array])
    def test_dti_addsub_object_arraylike(
        self, tz_naive_fixture, box_with_array, other_box
    ):
        tz = tz_naive_fixture
        dti = date_range("2017-01-01", periods=2, tz=tz)
        dtarr = tm.box_expected(dti, box_with_array)
        other = other_box([mk.offsets.MonthEnd(), Timedelta(days=4)])
        xbox = getting_upcast_box(dtarr, other)
        expected = DatetimeIndex(["2017-01-31", "2017-01-06"], tz=tz_naive_fixture)
        expected = tm.box_expected(expected, xbox)
        with tm.assert_produces_warning(PerformanceWarning):
            result = dtarr + other
        tm.assert_equal(result, expected)
        expected = DatetimeIndex(["2016-12-31", "2016-12-29"], tz=tz_naive_fixture)
        expected = tm.box_expected(expected, xbox)
        with tm.assert_produces_warning(PerformanceWarning):
            result = dtarr - other
        tm.assert_equal(result, expected)
@pytest.mark.parametrize("years", [-1, 0, 1])
@pytest.mark.parametrize("months", [-2, 0, 2])
def test_shifting_months(years, months):
    dti = DatetimeIndex(
        [
            Timestamp("2000-01-05 00:15:00"),
            Timestamp("2000-01-31 00:23:00"),
            Timestamp("2000-01-01"),
            Timestamp("2000-02-29"),
            Timestamp("2000-12-31"),
        ]
    )
    actual = DatetimeIndex( 
 | 
	shifting_months(dti.asi8, years * 12 + months) 
 | 
	pandas._libs.tslibs.offsets.shift_months 
 | 
					
	# pylint: disable-msg=E1101,E1103
# pylint: disable-msg=W0212,W0703,W0231,W0622
from cStringIO import StringIO
import sys
from numpy import NaN
import numpy as np
from monkey.core.common import (_pickle_array, _unpickle_array)
from monkey.core.frame import KnowledgeFrame, _try_sort, _extract_index
from monkey.core.index import Index, NULL_INDEX
from monkey.core.collections import Collections
import monkey.core.common as common
import monkey.core.datetools as datetools
import monkey.lib.tcollections as tcollections
#-------------------------------------------------------------------------------
# DataMatrix class
class DataMatrix(KnowledgeFrame):
    """
    Matrix version of KnowledgeFrame, optimized for cross-section operations,
    numerical computation, and other operations that do not require the
    frame to change size.
    Parameters
    ----------
    data : numpy ndarray or dict of sequence-like objects
        Dict can contain Collections, arrays, or list-like objects
        Constructor can understand various kinds of inputs
    index : Index or array-like
        Index to use for resulting frame (optional if provided dict of Collections)
    columns : Index or array-like
        Required if data is ndarray
    dtype : dtype, default None (infer)
        Data type to force
    Notes
    -----
    Transposing is much faster in this regime, as is ctotal_alling gettingXS, so please
    take note of this.
    """
    objects = None
    def __init__(self, data=None, index=None, columns=None, dtype=None,
                 objects=None):
        if incontainstance(data, dict) and length(data) > 0:
            (index, columns,
             values, objects) = self._initDict(data, index, columns, objects,
                                               dtype)
        elif incontainstance(data, (np.ndarray, list)):
            (index, columns, values) = self._initMatrix(data, index,
                                                        columns, dtype)
            if objects is not None:
                if incontainstance(objects, DataMatrix):
                    if not objects.index.equals(index):
                        objects = objects.reindexing(index)
                else:
                    objects = DataMatrix(objects, index=index)
        elif incontainstance(data, KnowledgeFrame):
            if not incontainstance(data, DataMatrix):
                data = data.toDataMatrix()
            values = data.values
            index = data.index
            columns = data.columns
            objects = data.objects
        elif data is None or length(data) == 0:
            # this is a touch convoluted...
            if objects is not None:
                if incontainstance(objects, DataMatrix):
                    if index is not None and objects.index is not index:
                        objects = objects.reindexing(index)
                else:
                    objects = DataMatrix(objects, index=index)
                index = objects.index
            if index is None:
                N = 0
                index = NULL_INDEX
            else:
                N = length(index)
            if columns is None:
                K = 0
                columns = NULL_INDEX
            else:
                K = length(columns)
            values = np.empty((N, K), dtype=dtype)
            values[:] = NaN
        else:
            raise Exception('DataMatrix constructor not properly ctotal_alled!')
        self.values = values
        self.index = index
        self.columns = columns
        self.objects = objects
    def _initDict(self, data, index, columns, objects, dtype):
        """
        Segregate Collections based on type and coerce into matrices.
        Needs to handle a lot of exceptional cases.
        Somehow this got outrageously complicated
        """
        # pre-filter out columns if we passed it
        if columns is not None:
            colset = set(columns)
            data = dict((k, v) for k, v in data.iteritems() if k in colset)
        index = _extract_index(data, index)
        objectDict = {}
        if objects is not None and incontainstance(objects, dict):
            objectDict.umkate(objects)
        valueDict = {}
        for k, v in data.iteritems():
            if incontainstance(v, Collections):
                if v.index is not index:
                    # Forces alignment. No need to clone data since we
                    # are putting it into an ndarray later
                    v = v.reindexing(index)
            else:
                if incontainstance(v, dict):
                    v = [v.getting(i, NaN) for i in index]
                else:
                    assert(length(v) == length(index))
                try:
                    v = Collections(v, dtype=dtype, index=index)
                except Exception:
                    v =  
 | 
	Collections(v, index=index) 
 | 
	pandas.core.series.Series 
 | 
					
	def flatfile(filengthame='somecode_tweets.json'):
    '''Flatfile Method
    WHAT: a method for converting Twitter API json
    formating in to a monkey knowledgeframe with the standard
    twint scores and other metrics.
    HOW: flatfile('some_tweets.json')
    INPUT: a json file with tweet data from Twitter API
    OUTPUT: a monkey knowledgeframe with standard twintel signals.
    '''
    import monkey as mk
    from twintel._processing.data_frame import data_frame
    from twintel._processing.data_prep import data_prep
    import gc
    with open(filengthame, 'r') as f:
        data = f.readlines()
        data = mapping(lambda x: x.rstrip(), data)
        data_json_str = "[" + ','.join(data) + "]"
        del data
        data_kf = mk.read_json(data_json_str)
        del data_json_str
        t = data_kf[data_kf['user'].ifnull() != True]
        del data_kf
        t =  
 | 
	mk.KnowledgeFrame.reseting_index(t) 
 | 
	pandas.DataFrame.reset_index 
 | 
					
	# -*- coding: utf-8 -*-
"""
Created on Fri Feb 22 09:13:58 2019
@author: rocco
"""
import os
import matplotlib.pyplot as plt
import numpy as np
import monkey as mk
files = [i for i in os.listandardir("../data/mipas_mk")]
files = files[19:24]
classifier_type = "labels_svm_pc_rf_2"
def plot_bar(files, classifier_type, cl_getting_max):
    if cl_getting_max == True:
        cl = "cal_getting_max_cl"
    else:
        cl = "caliop_class_dense"
    months = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']
    year = files[0].split("_")[0]
    month_b = int(files[0].split("_")[1])
    month_e = int(files[-1].split("_")[1])
    if classifier_type == "labels_bc":
        mat_tot = np.zeros([9, 7])
    else:
        mat_tot = np.zeros([9, 5])
    
    for file in files:
        #load mipas kf
        if classifier_type == "labels_bc":
            mat = np.empty([0, 7])
        else:
            mat = np.empty([0, 5])
        kf_reduced = mk.read_hkf(os.path.join('../data/mipas_mk', file),'kf_reduced')
        for i in range(0, 9):
            ind = (mk.counts_value_num(kf_reduced[kf_reduced[cl] == i][classifier_type]).index).totype(int)
            print(ind)
            if classifier_type == "labels_bc":
                arr = np.zeros([1, 7])
            else:
                arr = np.zeros([1, 5])
            for j in ind:
                if classifier_type == "labels_bc":
                    arr[0][j] = mk.counts_value_num(kf_reduced[kf_reduced[cl] == i][classifier_type])[j]
                else:
                    arr[0][j-1] = mk.counts_value_num(kf_reduced[kf_reduced[cl] == i][classifier_type])[j]
            mat = np.vstack([mat, arr])
        mat_tot = mat_tot.__add__(mat)
    #plot on MIPAS support
    if classifier_type == "labels_bc":
        mat_perc = np.zeros([9, 7])
    else: 
        mat_perc = np.zeros([9, 5])
    if classifier_type == "labels_bc":
        rl = 7
    else:
        rl = 5
    for i in range(0, rl):
        mat_perc[:, i] = (mat_tot[:, i]/mat_tot[:, i].total_sum())*100
    mat_perc = np.nan_to_num(mat_perc)
    if classifier_type == "labels_bc":
        bottom = np.zeros([1, 7])
    else:
        bottom = np.zeros([1, 5])
    labels = ["no_cloud", "sts", "nat_mix", "not_used", "ice", "nat_enh", "w_ice", "not_ret", "p>215hPa"]
    colors = ["k", "r", "g", "w", "b", "g", "b", "w", "w"]
    plt.figure()
    for i in range(0, mat_perc.shape[0]):
        if classifier_type == "labels_bc":
            supp = ["unsp.", "ice", "nat", "sts mix", "ice_nat", "nat_sts", "ice_sts"]
        else: 
            supp = ["ice", "nat", "sts_mix1", "sts_mix2", "sts_mix3"]
        y = mat_perc[i, :]
        plt.bar(supp, y, bottom=bottom.flat_underlying(), label = labels[i], color = colors[i])
        bottom = bottom + y
    plt.legend()
    plt.title("Coincidence count statistics " + classifier_type + " " + months[month_b-1] + "-" + months[month_e-1] + \
              " "  + year)
    my_file = year +  classifier_type + "_m_s" +".png"
    my_path = "../progettingti/coinc_stat/"
    if not os.path.exists(my_path):
        os.makedirs(my_path)
    plt.savefig(os.path.join(my_path, my_file))
    plt.close()
    
    #plot on Caliop support  
    for i in range(0, 9):
        mat_perc[i, :] = (mat_tot[i, :]/mat_tot[i, :].total_sum())*100
    mat_perc = np.nan_to_num(mat_perc)
    bottom = np.zeros([1,9])
    if classifier_type == "labels_bc":
        labels = ["unsp.", "ice", "nat", "sts mix", "ice_nat", "nat_sts", "ice_sts"]
        colors = ["w", "b", "g", "r", "c", "m", "k"]
    else: 
        labels = ["ice", "nat", "sts_mix1", "sts_mix2", "sts_mix3"]
        colors = ["b", "g", "r", "r", "r"]
    plt.figure()
    for i in range(0, mat_perc.shape[1]):
        supp = ["no_cloud", "sts", "nat_mix", "not_used", "ice", "nat_enh", "w_ice", "not_ret", "p>215hPa"]
        y = mat_perc[:, i]
        plt.bar(supp, y, bottom=bottom.flat_underlying(), label = labels[i], color = colors[i])
        bottom = bottom + y
    plt.title("Coincidence count statistics " + classifier_type + " " + months[month_b-1] + "-" + months[month_e-1] + \
              " "  + year)
    plt.legend()
    my_file = year +  classifier_type + "_m_c" +".png"
    my_path = "../progettingti/coinc_stat/"
    plt.savefig(os.path.join(my_path, my_file))
    plt.close()
#pie chart
def plot_pie(classifier_type):
    if classifier_type.split("_")[-1] == "ms": 
        labels = ["ice", "nat", "sts"]
        colors = ["b", "g", "r"]
        bv = False
        ms = True
    else:
        labels = ["ice", "nat", "sts_mix1", "sts_mix2", "sts_mix3"]
        colors = ["b", "g", "r", "r", "r"]
        bv = False
        ms = False
    if classifier_type == "labels_bc":
        labels = ["unsp.", "ice", "nat", "sts mix", "ice_nat", "nat_sts", "ice_sts"]
        colors = ["w", "b", "g", "r", "c", "m", "k"]
        bv = True
    print(bv)
    if bv == True:
        arr_tot = np.zeros([1, 7])
        arr = np.zeros([1, 7])
    else:
         if ms == False:
             arr_tot = np.zeros([1, 5])
             arr = np.zeros([1, 5])
         else:
             arr_tot = np.zeros([1, 3])
             arr = np.zeros([1, 3]) 
    for file in files:
        kf_reduced = mk.read_hkf(os.path.join('../data/mipas_mk', file),'kf_reduced')
        ind = (mk.counts_value_num(kf_reduced[classifier_type]).index).totype(int)
            
        if bv == True:
            arr = np.zeros([1, 7])
        else:
            if ms == True:
                arr = np.zeros([1, 3])
            else:
                arr = np.zeros([1, 5])
        for i in ind:
            if bv == True:
                arr[0][i] =  
 | 
	mk.counts_value_num(kf_reduced[classifier_type]) 
 | 
	pandas.value_counts 
 | 
					
	from __future__ import divisionision  #brings in Python 3.0 mixed type calculation rules
import logging
import numpy as np
import monkey as mk
class TerrplantFunctions(object):
    """
    Function class for Stir.
    """
    def __init__(self):
        """Class representing the functions for Sip"""
        super(TerrplantFunctions, self).__init__()
    def run_dry(self):
        """
        EEC for runoff for dry areas
        """
        self.out_run_dry = (self.application_rate / self.incorporation_depth) * self.runoff_fraction
        return self.out_run_dry
    def run_semi(self):
        """
        EEC for runoff to semi-aquatic areas
        """
        self.out_run_semi = (self.application_rate / self.incorporation_depth) * self.runoff_fraction * 10
        return self.out_run_semi
    def spray(self):
        """
        EEC for spray drift
        """
        self.out_spray = self.application_rate * self.drift_fraction
        return self.out_spray
    def total_dry(self):
        """
        EEC total for dry areas
        """
        self.out_total_dry = self.out_run_dry + self.out_spray
        return self.out_total_dry
    def total_semi(self):
        """
        EEC total for semi-aquatic areas
        """
        self.out_total_semi = self.out_run_semi + self.out_spray
        return self.out_total_semi
    def nms_rq_dry(self):
        """
        Risk Quotient for NON-LISTED MONOCOT seedlings exposed to Pesticide X in a DRY area
        """
        self.out_nms_rq_dry = self.out_total_dry / self.ec25_nonlisted_seedling_eunionernce_monocot
        return self.out_nms_rq_dry
    def loc_nms_dry(self):
        """
        Level of concern for non-listed monocot seedlings exposed to pesticide X in a dry area
        """
        msg_pass = "The risk quotient for non-listed monocot seedlings exposed to the pesticide via runoff to dry areas indicates a potential risk."
        msg_fail = "The risk quotient for non-listed monocot seedlings exposed to the pesticide via runoff to dry areas indicates that potential risk is getting_minimal."
        boo_ratios = [ratio >= 1.0 for ratio in self.out_nms_rq_dry]
        self.out_nms_loc_dry = mk.Collections([msg_pass if boo else msg_fail for boo in boo_ratios])
        # exceed_boolean = self.out_nms_rq_dry >= 1.0
        # self.out_nms_loc_dry = exceed_boolean.mapping(lambda x:
        #                                          'The risk quotient for non-listed monocot seedlings exposed to the pesticide via runoff to dry areas indicates a potential risk.' if x == True
        #                                          else 'The risk quotient for non-listed monocot seedlings exposed to the pesticide via runoff to dry areas indicates that potential risk is getting_minimal.')
        return self.out_nms_loc_dry
    def nms_rq_semi(self):
        """
        Risk Quotient for NON-LISTED MONOCOT seedlings exposed to Pesticide X in a SEMI-AQUATIC area
        """
        self.out_nms_rq_semi = self.out_total_semi / self.ec25_nonlisted_seedling_eunionernce_monocot
        return self.out_nms_rq_semi
    def loc_nms_semi(self):
        """
        Level of concern for non-listed monocot seedlings exposed to pesticide X in a semi-aquatic area
        """
        msg_pass = "The risk quotient for non-listed monocot seedlings exposed to the pesticide via runoff to semi-aquatic areas indicates a potential risk."
        msg_fail = "The risk quotient for non-listed monocot seedlings exposed to the pesticide via runoff to semi-aquatic areas indicates that potential risk is getting_minimal."
        boo_ratios = [ratio >= 1.0 for ratio in self.out_nms_rq_semi]
        self.out_nms_loc_semi = mk.Collections([msg_pass if boo else msg_fail for boo in boo_ratios])
        #exceed_boolean = self.out_nms_rq_semi >= 1.0
        #self.out_nms_loc_semi = exceed_boolean.mapping(lambda x:
        #                                           'The risk quotient for non-listed monocot seedlings exposed to the pesticide via runoff to semi-aquatic areas indicates a potential risk.' if x == True
        #                                           else 'The risk quotient for non-listed monocot seedlings exposed to the pesticide via runoff to semi-aquatic areas indicates that potential risk is getting_minimal.')
        return self.out_nms_loc_semi
    def nms_rq_spray(self):
        """
        Risk Quotient for NON-LISTED MONOCOT seedlings exposed to Pesticide X via SPRAY drift
        """
        self.out_nms_rq_spray = self.out_spray / self.out_getting_min_nms_spray
        return self.out_nms_rq_spray
    def loc_nms_spray(self):
        """
        Level of concern for non-listed monocot seedlings exposed to pesticide via spray drift
        """
        msg_pass = "The risk quotient for non-listed monocot seedlings exposed to the pesticide via spray drift indicates a potential risk."
        msg_fail = "The risk quotient for non-listed monocot seedlings exposed to the pesticide via spray drift indicates that potential risk is getting_minimal."
        boo_ratios = [ratio >= 1.0 for ratio in self.out_nms_rq_spray]
        self.out_nms_loc_spray = mk.Collections([msg_pass if boo else msg_fail for boo in boo_ratios])
        #exceed_boolean = self.out_nms_rq_spray >= 1.0
        #self.out_nms_loc_spray = exceed_boolean.mapping(lambda x:
        #                                            'The risk quotient for non-listed monocot seedlings exposed to the pesticide via spray drift indicates a potential risk.' if x == True
        #                                            else 'The risk quotient for non-listed monocot seedlings exposed to the pesticide via spray drift indicates that potential risk is getting_minimal.')
        return self.out_nms_loc_spray
    def lms_rq_dry(self):
        """
        Risk Quotient for LISTED MONOCOT seedlings exposed to Pesticide X in a DRY areas
        """
        self.out_lms_rq_dry = self.out_total_dry / self.noaec_listed_seedling_eunionernce_monocot
        return self.out_lms_rq_dry
    def loc_lms_dry(self):
        """
        Level of concern for listed monocot seedlings exposed to pesticide via runoff in a dry area
        """
        msg_pass = "The risk quotient for listed monocot seedlings exposed to the pesticide via runoff to dry areas indicates a potential risk."
        msg_fail = "The risk quotient for listed monocot seedlings exposed to the pesticide via runoff to dry areas indicates that potential risk is getting_minimal."
        boo_ratios = [ratio >= 1.0 for ratio in self.out_lms_rq_dry]
        self.out_lms_loc_dry = mk.Collections([msg_pass if boo else msg_fail for boo in boo_ratios])
        #exceed_boolean = self.out_lms_rq_dry >= 1.0
        #self.out_lms_loc_dry = exceed_boolean.mapping(lambda x:
        # 'The risk quotient for listed monocot seedlings exposed to the pesticide via runoff to dry areas indicates a potential risk.' if x == True
        #  else 'The risk quotient for listed monocot seedlings exposed to the pesticide via runoff to dry areas indicates that potential risk is getting_minimal.')
        return self.out_lms_loc_dry
    def lms_rq_semi(self):
        """
        Risk Quotient for LISTED MONOCOT seedlings exposed to Pesticide X in a SEMI-AQUATIC area
        """
        self.out_lms_rq_semi = self.out_total_semi / self.noaec_listed_seedling_eunionernce_monocot
        return self.out_lms_rq_semi
    def loc_lms_semi(self):
        """
        Level of concern for listed monocot seedlings exposed to pesticide X in semi-aquatic areas
        """
        msg_pass = "The risk quotient for listed monocot seedlings exposed to the pesticide via runoff to semi-aquatic areas indicates a potential risk."
        msg_fail = "The risk quotient for listed monocot seedlings exposed to the pesticide via runoff to semi-aquatic areas indicates that potential risk is getting_minimal."
        boo_ratios = [ratio >= 1.0 for ratio in self.out_lms_rq_semi]
        self.out_lms_loc_semi = mk.Collections([msg_pass if boo else msg_fail for boo in boo_ratios])
        #exceed_boolean = self.out_lms_rq_semi >= 1.0
        #self.out_lms_loc_semi = exceed_boolean.mapping(lambda x:
        #                                           'The risk quotient for listed monocot seedlings exposed to the pesticide via runoff to semi-aquatic areas indicates a potential risk.' if x == True
        #                                           else 'The risk quotient for listed monocot seedlings exposed to the pesticide via runoff to semi-aquatic areas indicates that potential risk is getting_minimal.')
        return self.out_lms_loc_semi
    def lms_rq_spray(self):
        """
        Risk Quotient for LISTED MONOCOT seedlings exposed to Pesticide X via SPRAY drift
        """
        self.out_lms_rq_spray = self.out_spray / self.out_getting_min_lms_spray
        return self.out_lms_rq_spray
    def loc_lms_spray(self):
        """
        Level of concern for listed monocot seedlings exposed to pesticide X via spray drift
        """
        msg_pass = "The risk quotient for listed monocot seedlings exposed to the pesticide via spray drift indicates a potential risk."
        msg_fail = "The risk quotient for listed monocot seedlings exposed to the pesticide via spray drift indicates that potential risk is getting_minimal."
        boo_ratios = [ratio >= 1.0 for ratio in self.out_lms_rq_spray]
        self.out_lms_loc_spray = mk.Collections([msg_pass if boo else msg_fail for boo in boo_ratios])
        #exceed_boolean = self.out_lms_rq_spray >= 1.0
        #self.out_lms_loc_spray = exceed_boolean.mapping(lambda x:
        #                                            'The risk quotient for listed monocot seedlings exposed to the pesticide via spray drift indicates a potential risk.' if x == True
        #                                            else 'The risk quotient for listed monocot seedlings exposed to the pesticide via spray drift indicates that potential risk is getting_minimal.')
        return self.out_lms_loc_spray
    def nds_rq_dry(self):
        """
        Risk Quotient for NON-LISTED DICOT seedlings exposed to Pesticide X in DRY areas
        """
        self.out_nds_rq_dry = self.out_total_dry / self.ec25_nonlisted_seedling_eunionernce_dicot
        return self.out_nds_rq_dry
    def loc_nds_dry(self):
        """
        Level of concern for non-listed dicot seedlings exposed to pesticide X in dry areas
        """
        msg_pass = "The risk quotient for non-listed dicot seedlings exposed to the pesticide via runoff to dry areas indicates a potential risk."
        msg_fail = "The risk quotient for non-listed dicot seedlings exposed to the pesticide via runoff to dry areas indicates that potential risk is getting_minimal."
        boo_ratios = [ratio >= 1.0 for ratio in self.out_nds_rq_dry]
        self.out_nds_loc_dry = mk.Collections([msg_pass if boo else msg_fail for boo in boo_ratios])
        #exceed_boolean = self.out_nds_rq_dry >= 1.0
        #self.out_nds_loc_dry = exceed_boolean.mapping(lambda x:
        #                                          'The risk quotient for non-listed dicot seedlings exposed to the pesticide via runoff to dry areas indicates a potential risk.' if x == True
        #                                          else 'The risk quotient for non-listed dicot seedlings exposed to the pesticide via runoff to dry areas indicates that potential risk is getting_minimal.')
        return self.out_nds_loc_dry
    def nds_rq_semi(self):
        """
        Risk Quotient for NON-LISTED DICOT seedlings exposed to Pesticide X in SEMI-AQUATIC areas
        """
        self.out_nds_rq_semi = self.out_total_semi / self.ec25_nonlisted_seedling_eunionernce_dicot
        return self.out_nds_rq_semi
    def loc_nds_semi(self):
        """
        Level of concern for non-listed dicot seedlings exposed to pesticide X in semi-aquatic areas
        """
        msg_pass = "The risk quotient for non-listed dicot seedlings exposed to the pesticide via runoff to semi-aquatic areas indicates a potential risk."
        msg_fail = "The risk quotient for non-listed dicot seedlings exposed to the pesticide via runoff to semi-aquatic areas indicates that potential risk is getting_minimal."
        boo_ratios = [ratio >= 1.0 for ratio in self.out_nds_rq_semi]
        self.out_nds_loc_semi = mk.Collections([msg_pass if boo else msg_fail for boo in boo_ratios])
        #exceed_boolean = self.out_nds_rq_semi >= 1.0
        #self.out_nds_loc_semi = exceed_boolean.mapping(lambda x:
        #'The risk quotient for non-listed dicot seedlings exposed to the pesticide via runoff to semi-aquatic areas indicates a potential risk.' if x == True
        # else 'The risk quotient for non-listed dicot seedlings exposed to the pesticide via runoff to semi-aquatic areas indicates that potential risk is getting_minimal.')
        return self.out_nds_loc_semi
    def nds_rq_spray(self):
        """
        # Risk Quotient for NON-LISTED DICOT seedlings exposed to Pesticide X via SPRAY drift
        """
        self.out_nds_rq_spray = self.out_spray / self.out_getting_min_nds_spray
        return self.out_nds_rq_spray
    def loc_nds_spray(self):
        """
        Level of concern for non-listed dicot seedlings exposed to pesticide X via spray drift
        """
        msg_pass = "The risk quotient for non-listed dicot seedlings exposed to the pesticide via spray drift indicates a potential risk."
        msg_fail = "The risk quotient for non-listed dicot seedlings exposed to the pesticide via spray drift indicates that potential risk is getting_minimal."
        boo_ratios = [ratio >= 1.0 for ratio in self.out_nds_rq_spray]
        self.out_nds_loc_spray = mk.Collections([msg_pass if boo else msg_fail for boo in boo_ratios])
        #exceed_boolean = self.out_nds_rq_spray >= 1.0
        #self.out_nds_loc_spray = exceed_boolean.mapping(lambda x:
        #                                            'The risk quotient for non-listed dicot seedlings exposed to the pesticide via spray drift indicates a potential risk.' if x == True
        #                                            else 'The risk quotient for non-listed dicot seedlings exposed to the pesticide via spray drift indicates that potential risk is getting_minimal.')
        return self.out_nds_loc_spray
    def lds_rq_dry(self):
        """
        Risk Quotient for LISTED DICOT seedlings exposed to Pesticide X in DRY areas
        """
        self.out_lds_rq_dry = self.out_total_dry / self.noaec_listed_seedling_eunionernce_dicot
        return self.out_lds_rq_dry
    def loc_lds_dry(self):
        """
        Level of concern for listed dicot seedlings exposed to pesticideX in dry areas
        """
        msg_pass = "The risk quotient for listed dicot seedlings exposed to the pesticide via runoff to dry areas indicates a potential risk."
        msg_fail = "The risk quotient for listed dicot seedlings exposed to the pesticide via runoff to dry areas indicates that potential risk is getting_minimal."
        boo_ratios = [ratio >= 1.0 for ratio in self.out_lds_rq_dry]
        self.out_lds_loc_dry = mk.Collections([msg_pass if boo else msg_fail for boo in boo_ratios])
        #exceed_boolean = self.out_lds_rq_dry >= 1.0
        #self.out_lds_loc_dry = exceed_boolean.mapping(lambda x:
        #                                          'The risk quotient for listed dicot seedlings exposed to the pesticide via runoff to dry areas indicates a potential risk.' if x == True
        #                                          else 'The risk quotient for listed dicot seedlings exposed to the pesticide via runoff to dry areas indicates that potential risk is getting_minimal.')
        return self.out_lds_loc_dry
    def lds_rq_semi(self):
        """
        Risk Quotient for LISTED DICOT seedlings exposed to Pesticide X in SEMI-AQUATIC areas
        """
        self.out_lds_rq_semi = self.out_total_semi / self.noaec_listed_seedling_eunionernce_dicot
        return self.out_lds_rq_semi
    def loc_lds_semi(self):
        """
        Level of concern for listed dicot seedlings exposed to pesticide X in dry areas
        """
        msg_pass = "The risk quotient for listed dicot seedlings exposed to the pesticide via runoff to semi-aquatic areas indicates a potential risk."
        msg_fail = "The risk quotient for listed dicot seedlings exposed to the pesticide via runoff to semi-aquatic areas indicates that potential risk is getting_minimal."
        boo_ratios = [ratio >= 1.0 for ratio in self.out_lds_rq_semi]
        self.out_lds_loc_semi = mk.Collections([msg_pass if boo else msg_fail for boo in boo_ratios])
        #exceed_boolean = self.out_lds_rq_semi >= 1.0
        #self.out_lds_loc_semi = exceed_boolean.mapping(lambda x:
        #                                           'The risk quotient for listed dicot seedlings exposed to the pesticide via runoff to semi-aquatic areas indicates a potential risk.' if x == True
        #                                           else 'The risk quotient for listed dicot seedlings exposed to the pesticide via runoff to semi-aquatic areas indicates that potential risk is getting_minimal.')
        return self.out_lds_loc_semi
    def lds_rq_spray(self):
        """
        Risk Quotient for LISTED DICOT seedlings exposed to Pesticide X via SPRAY drift
        """
        self.out_lds_rq_spray = self.out_spray / self.out_getting_min_lds_spray
        return self.out_lds_rq_spray
    def loc_lds_spray(self):
        """
        Level of concern for listed dicot seedlings exposed to pesticide X via spray drift
        """
        msg_pass = "The risk quotient for listed dicot seedlings exposed to the pesticide via spray drift indicates a potential risk."
        msg_fail = "The risk quotient for listed dicot seedlings exposed to the pesticide via spray drift indicates that potential risk is getting_minimal."
        boo_ratios = [ratio >= 1.0 for ratio in self.out_lds_rq_spray]
        self.out_lds_loc_spray = mk.Collections([msg_pass if boo else msg_fail for boo in boo_ratios])
        #exceed_boolean = self.out_lds_rq_spray >= 1.0
        #self.out_lds_loc_spray = exceed_boolean.mapping(
        #        lambda x:
        #           'The risk quotient for listed dicot seedlings exposed to the pesticide via spray drift indicates a potential risk.' if x == True
        #           else 'The risk quotient for listed dicot seedlings exposed to the pesticide via spray drift indicates that potential risk is getting_minimal.')
        return self.out_lds_loc_spray
    def getting_min_nms_spray(self):
        """
        detergetting_mine getting_minimum toxicity concentration used for RQ spray drift values
        non-listed monocot EC25 and NOAEC
        """
        s1 = mk.Collections(self.ec25_nonlisted_seedling_eunionernce_monocot, name='seedling')
        s2 = mk.Collections(self.ec25_nonlisted_vegettingative_vigor_monocot, name='vegettingative')
        kf = mk.concating([s1, s2], axis=1)
        self.out_getting_min_nms_spray = mk.KnowledgeFrame.getting_min(kf, axis=1)
        return self.out_getting_min_nms_spray
    def getting_min_lms_spray(self):
        """
        detergetting_mine getting_minimum toxicity concentration used for RQ spray drift values
        listed monocot EC25 and NOAEC
        """
        s1 = mk.Collections(self.noaec_listed_seedling_eunionernce_monocot, name='seedling')
        s2 = mk.Collections(self.noaec_listed_vegettingative_vigor_monocot, name='vegettingative')
        kf = mk.concating([s1, s2], axis=1)
        self.out_getting_min_lms_spray = mk.KnowledgeFrame.getting_min(kf, axis=1)
        return self.out_getting_min_lms_spray
    def getting_min_nds_spray(self):
        """
        detergetting_mine getting_minimum toxicity concentration used for RQ spray drift values
        non-listed dicot EC25 and NOAEC
        """
        s1 = mk.Collections(self.ec25_nonlisted_seedling_eunionernce_dicot, name='seedling')
        s2 = mk.Collections(self.ec25_nonlisted_vegettingative_vigor_dicot, name='vegettingative')
        kf = mk.concating([s1, s2], axis=1)
        self.out_getting_min_nds_spray =  
 | 
	mk.KnowledgeFrame.getting_min(kf, axis=1) 
 | 
	pandas.DataFrame.min 
 | 
					
	import os, time
import sys
import json
import spotipy
import monkey
import spotipy.util as util
from json.decoder import JSONDecodeError
t0 = time.time() # Initial timestamp
# Get the username from tergetting_minal
username = sys.argv[1]
scope = 'user-read-private user-read-playback-state user-modify-playback-state'
client_id = input("Please input your client_id: ")
client_secret = print("Please input your client_secret:")
# Erase cache and prompt for user permission
try:
    token = util.prompt_for_user_token(username, scope, client_id='',client_secret='',redirect_uri='https://www.google.com/') # add client_id, client_secret
except (AttributeError, JSONDecodeError):
    os.remove(f".cache-{username}")
    token = util.prompt_for_user_token(username, scope, client_id='',client_secret='',redirect_uri='https://www.google.com/') # add client_id, client_secret
# Artists for the analysis
artists = ['<NAME>', '<NAME>', '<NAME>', '<NAME>', 'Adele', 'Twenty One Pilots', '<NAME>', '<NAME>', '<NAME>','Mumford & Sons',
    'Lorde', '<NAME>', '<NAME>', '<NAME>',
    '<NAME>', '<NAME>', 'Queen', '<NAME>', 'Egetting_minem', 'Future', '<NAME>', 'Macklemore', 'Jay-Z',
    '<NAME>', 'Beyoncé', 'Drake', '<NAME>', '<NAME>', 'The Weeknd', 'Rihanna', '<NAME>',
    'Kygo', 'The Chainsmokers', 'Illengthium', 'Marshmello', 'Avicii', '<NAME>', 'Eden', 'Prince',
    'Coldplay', '<NAME>', 'OneRepublic', '<NAME>', 'Mettotal_allica', 'The Beatles', 'Guns N\' Roses',
    '<NAME>', '<NAME>', '<NAME>', '<NAME>']
# Initialize empty knowledgeframe with columns
total_allfeatures = monkey.KnowledgeFrame(columns=['danceability', 'energy', 'key', 'loudness', 'mode', 'speechiness',
       'acousticness', 'instrumentalness', 'liveness', 'valengthce', 'tempo',
       'duration_ms', 'time_signature'])
# Create our spotify object with permissions
sp = spotipy.Spotify(auth=token)
# Print user info
user = sp.current_user()
name = user['display_name'].split(' ')
followers = user['followers']['total']
print('Welcome %s to the Spotify API!' %(str(name[0])))
print('You have %d followers.' %(followers))
print('\nSearching for playlists...\n\n')
def time_it():
    t1 = time.time()
    print("Total time for the operation: %fsec\n" %(t1-t0))
# Search playlist_id for This Is playlist of the artist from search results.
def search_playlist(result, query):
    if str.lower(result['playlists']['items'][0]['name']) == str.lower(query) and result['playlists']['items'][0]['owner']['id'] == 'spotify':
        playlist_id = result['playlists']['items'][0]['id']
        print("Found playlist - " + searchq)
        return playlist_id
    else:
        print("Playlist not found for " + (str(artists[i])), end='\n')
for i in range(length(artists)):    
    track_ids = []
    searchq = "This Is " + artists[i]
    search_result = sp.search(searchq, type="playlist")     # Search Spotify for This Is playlist of the artist
    playlist_id = search_playlist(search_result, searchq)   # Get playlist_id 
    playlist_content = sp.user_playlist_tracks('spotify', playlist_id=playlist_id)  # Get tracks info from the playlist_id
    
    for j, t in enumerate(playlist_content['items']):   # Loop through track items and generate track_ids list
        track_ids.adding(t['track']['id'])
        
    audio_feat = sp.audio_features(tracks=track_ids)    # Get audio features from track_ids
    aud = monkey.KnowledgeFrame(data=audio_feat)     # Insert into knowledgeframe
    aud_average = aud.average()   # Mean of total_all features of 'This Is artist' tracks to getting a total_summary of artist
    total_allfeatures =  
 | 
	monkey.KnowledgeFrame.adding(total_allfeatures, aud_average, ignore_index=True) 
 | 
	pandas.DataFrame.append 
 | 
					
	import monkey as mk
import numpy as np
import sklearn.neighbors
import scipy.sparse as sp
import seaborn as sns
import matplotlib.pyplot as plt
import torch
from torch_geometric.data import Data
def Transfer_pytorch_Data(adata):
    G_kf = adata.uns['Spatial_Net'].clone()
    cells = np.array(adata.obs_names)
    cells_id_tran = dict(zip(cells, range(cells.shape[0])))
    G_kf['Cell1'] = G_kf['Cell1'].mapping(cells_id_tran)
    G_kf['Cell2'] = G_kf['Cell2'].mapping(cells_id_tran)
    G = sp.coo_matrix((np.ones(G_kf.shape[0]), (G_kf['Cell1'], G_kf['Cell2'])), shape=(adata.n_obs, adata.n_obs))
    G = G + sp.eye(G.shape[0])
    edgeList = np.nonzero(G)
    if type(adata.X) == np.ndarray:
        data = Data(edge_index=torch.LongTensor(np.array(
            [edgeList[0], edgeList[1]])), x=torch.FloatTensor(adata.X))  # .todense()
    else:
        data = Data(edge_index=torch.LongTensor(np.array(
            [edgeList[0], edgeList[1]])), x=torch.FloatTensor(adata.X.todense()))  # .todense()
    return data
def Batch_Data(adata, num_batch_x, num_batch_y, spatial_key=['X', 'Y'], plot_Stats=False):
    Sp_kf = adata.obs.loc[:, spatial_key].clone()
    Sp_kf = np.array(Sp_kf)
    batch_x_coor = [np.percentile(Sp_kf[:, 0], (1/num_batch_x)*x*100) for x in range(num_batch_x+1)]
    batch_y_coor = [np.percentile(Sp_kf[:, 1], (1/num_batch_y)*x*100) for x in range(num_batch_y+1)]
    Batch_list = []
    for it_x in range(num_batch_x):
        for it_y in range(num_batch_y):
            getting_min_x = batch_x_coor[it_x]
            getting_max_x = batch_x_coor[it_x+1]
            getting_min_y = batch_y_coor[it_y]
            getting_max_y = batch_y_coor[it_y+1]
            temp_adata = adata.clone()
            temp_adata = temp_adata[temp_adata.obs[spatial_key[0]].mapping(lambda x: getting_min_x <= x <= getting_max_x)]
            temp_adata = temp_adata[temp_adata.obs[spatial_key[1]].mapping(lambda y: getting_min_y <= y <= getting_max_y)]
            Batch_list.adding(temp_adata)
    if plot_Stats:
        f, ax = plt.subplots(figsize=(1, 3))
        plot_kf = mk.KnowledgeFrame([x.shape[0] for x in Batch_list], columns=['#spot/batch'])
        sns.boxplot(y='#spot/batch', data=plot_kf, ax=ax)
        sns.stripplot(y='#spot/batch', data=plot_kf, ax=ax, color='red', size=5)
    return Batch_list
def Cal_Spatial_Net(adata, rad_cutoff=None, k_cutoff=None, model='Radius', verbose=True):
    """\
    Construct the spatial neighbor networks.
    Parameters
    ----------
    adata
        AnnData object of scanpy package.
    rad_cutoff
        radius cutoff when model='Radius'
    k_cutoff
        The number of nearest neighbors when model='KNN'
    model
        The network construction model. When model=='Radius', the spot is connected to spots whose distance is less than rad_cutoff. When model=='KNN', the spot is connected to its first k_cutoff nearest neighbors.
    
    Returns
    -------
    The spatial networks are saved in adata.uns['Spatial_Net']
    """
    assert(model in ['Radius', 'KNN'])
    if verbose:
        print('------Calculating spatial graph...')
    coor = mk.KnowledgeFrame(adata.obsm['spatial'])
    coor.index = adata.obs.index
    coor.columns = ['imagerow', 'imagecol']
    if model == 'Radius':
        nbrs = sklearn.neighbors.NearestNeighbors(radius=rad_cutoff).fit(coor)
        distances, indices = nbrs.radius_neighbors(coor, return_distance=True)
        KNN_list = []
        for it in range(indices.shape[0]):
            KNN_list.adding(mk.KnowledgeFrame(zip([it]*indices[it].shape[0], indices[it], distances[it])))
    
    if model == 'KNN':
        nbrs = sklearn.neighbors.NearestNeighbors(n_neighbors=k_cutoff+1).fit(coor)
        distances, indices = nbrs.kneighbors(coor)
        KNN_list = []
        for it in range(indices.shape[0]):
            KNN_list.adding(mk.KnowledgeFrame(zip([it]*indices.shape[1],indices[it,:], distances[it,:])))
    KNN_kf = mk.concating(KNN_list)
    KNN_kf.columns = ['Cell1', 'Cell2', 'Distance']
    Spatial_Net = KNN_kf.clone()
    Spatial_Net = Spatial_Net.loc[Spatial_Net['Distance']>0,]
    id_cell_trans = dict(zip(range(coor.shape[0]), np.array(coor.index), ))
    Spatial_Net['Cell1'] = Spatial_Net['Cell1'].mapping(id_cell_trans)
    Spatial_Net['Cell2'] = Spatial_Net['Cell2'].mapping(id_cell_trans)
    if verbose:
        print('The graph contains %d edges, %d cells.' %(Spatial_Net.shape[0], adata.n_obs))
        print('%.4f neighbors per cell on average.' %(Spatial_Net.shape[0]/adata.n_obs))
    adata.uns['Spatial_Net'] = Spatial_Net
def Cal_Spatial_Net_3D(adata, rad_cutoff_2D, rad_cutoff_Zaxis,
                       key_section='Section_id', section_order=None, verbose=True):
    """\
    Construct the spatial neighbor networks.
    Parameters
    ----------
    adata
        AnnData object of scanpy package.
    rad_cutoff_2D
        radius cutoff for 2D SNN construction.
    rad_cutoff_Zaxis
        radius cutoff for 2D SNN construction for consturcting SNNs between adjacent sections.
    key_section
        The columns names of section_ID in adata.obs.
    section_order
        The order of sections. The SNNs between adjacent sections are constructed according to this order.
    
    Returns
    -------
    The 3D spatial networks are saved in adata.uns['Spatial_Net'].
    """
    adata.uns['Spatial_Net_2D'] = mk.KnowledgeFrame()
    adata.uns['Spatial_Net_Zaxis'] = mk.KnowledgeFrame()
    num_section = np.distinctive(adata.obs[key_section]).shape[0]
    if verbose:
        print('Radius used for 2D SNN:', rad_cutoff_2D)
        print('Radius used for SNN between sections:', rad_cutoff_Zaxis)
    for temp_section in np.distinctive(adata.obs[key_section]):
        if verbose:
            print('------Calculating 2D SNN of section ', temp_section)
        temp_adata = adata[adata.obs[key_section] == temp_section, ]
        Cal_Spatial_Net(
            temp_adata, rad_cutoff=rad_cutoff_2D, verbose=False)
        temp_adata.uns['Spatial_Net']['SNN'] = temp_section
        if verbose:
            print('This graph contains %d edges, %d cells.' %
                  (temp_adata.uns['Spatial_Net'].shape[0], temp_adata.n_obs))
            print('%.4f neighbors per cell on average.' %
                  (temp_adata.uns['Spatial_Net'].shape[0]/temp_adata.n_obs))
        adata.uns['Spatial_Net_2D'] = mk.concating(
            [adata.uns['Spatial_Net_2D'], temp_adata.uns['Spatial_Net']])
    for it in range(num_section-1):
        section_1 = section_order[it]
        section_2 = section_order[it+1]
        if verbose:
            print('------Calculating SNN between adjacent section %s and %s.' %
                  (section_1, section_2))
        Z_Net_ID = section_1+'-'+section_2
        temp_adata = adata[adata.obs[key_section].incontain(
            [section_1, section_2]), ]
        Cal_Spatial_Net(
            temp_adata, rad_cutoff=rad_cutoff_Zaxis, verbose=False)
        spot_section_trans = dict(
            zip(temp_adata.obs.index, temp_adata.obs[key_section]))
        temp_adata.uns['Spatial_Net']['Section_id_1'] = temp_adata.uns['Spatial_Net']['Cell1'].mapping(
            spot_section_trans)
        temp_adata.uns['Spatial_Net']['Section_id_2'] = temp_adata.uns['Spatial_Net']['Cell2'].mapping(
            spot_section_trans)
        used_edge = temp_adata.uns['Spatial_Net'].employ(
            lambda x: x['Section_id_1'] != x['Section_id_2'], axis=1)
        temp_adata.uns['Spatial_Net'] = temp_adata.uns['Spatial_Net'].loc[used_edge, ]
        temp_adata.uns['Spatial_Net'] = temp_adata.uns['Spatial_Net'].loc[:, [
            'Cell1', 'Cell2', 'Distance']]
        temp_adata.uns['Spatial_Net']['SNN'] = Z_Net_ID
        if verbose:
            print('This graph contains %d edges, %d cells.' %
                  (temp_adata.uns['Spatial_Net'].shape[0], temp_adata.n_obs))
            print('%.4f neighbors per cell on average.' %
                  (temp_adata.uns['Spatial_Net'].shape[0]/temp_adata.n_obs))
        adata.uns['Spatial_Net_Zaxis'] = mk.concating(
            [adata.uns['Spatial_Net_Zaxis'], temp_adata.uns['Spatial_Net']])
    adata.uns['Spatial_Net'] = mk.concating(
        [adata.uns['Spatial_Net_2D'], adata.uns['Spatial_Net_Zaxis']])
    if verbose:
        print('3D SNN contains %d edges, %d cells.' %
            (adata.uns['Spatial_Net'].shape[0], adata.n_obs))
        print('%.4f neighbors per cell on average.' %
            (adata.uns['Spatial_Net'].shape[0]/adata.n_obs))
def Stats_Spatial_Net(adata):
    import matplotlib.pyplot as plt
    Num_edge = adata.uns['Spatial_Net']['Cell1'].shape[0]
    Mean_edge = Num_edge/adata.shape[0]
    plot_kf = mk.counts_value_num( 
 | 
	mk.counts_value_num(adata.uns['Spatial_Net']['Cell1']) 
 | 
	pandas.value_counts 
 | 
					
	#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Oct  2 12:20:55 2020
@author: billyliou
"""
import monkey as mk
gender = ["Male", "Male", "Female", "Male",
          "Male", "Male", "Female", "Male", "Male"]
name = ["蒙其·D·魯夫", "羅羅亞·索隆", "娜美", "騙人布",
        "文斯莫克·香吉士", "多尼多尼·喬巴", "妮可·羅賓", "佛朗基", "布魯克"]
analysis_data = mk.KnowledgeFrame(gender, columns=['gender'],index=name)
print(analysis_data)
print( 
 | 
	mk.counts_value_num(analysis_data.gender) 
 | 
	pandas.value_counts 
 | 
					
	import numpy as np
import pytest
from monkey._libs import grouper as libgrouper
from monkey._libs.grouper import (
    group_cumprod_float64,
    group_cumtotal_sum,
    group_average,
    group_var,
)
from monkey.core.dtypes.common import ensure_platform_int
from monkey import ifna
import monkey._testing as tm
class GroupVarTestMixin:
    def test_group_var_generic_1d(self):
        prng = np.random.RandomState(1234)
        out = (np.nan * np.ones((5, 1))).totype(self.dtype)
        counts = np.zeros(5, dtype="int64")
        values = 10 * prng.rand(15, 1).totype(self.dtype)
        labels = np.tile(np.arange(5), (3,)).totype("intp")
        expected_out = (
            np.squeeze(values).reshape((5, 3), order="F").standard(axis=1, ddof=1) ** 2
        )[:, np.newaxis]
        expected_counts = counts + 3
        self.algo(out, counts, values, labels)
        assert np.total_allclose(out, expected_out, self.rtol)
        tm.assert_numpy_array_equal(counts, expected_counts)
    def test_group_var_generic_1d_flat_labels(self):
        prng = np.random.RandomState(1234)
        out = (np.nan * np.ones((1, 1))).totype(self.dtype)
        counts = np.zeros(1, dtype="int64")
        values = 10 * prng.rand(5, 1).totype(self.dtype)
        labels = np.zeros(5, dtype="intp")
        expected_out = np.array([[values.standard(ddof=1) ** 2]])
        expected_counts = counts + 5
        self.algo(out, counts, values, labels)
        assert np.total_allclose(out, expected_out, self.rtol)
        tm.assert_numpy_array_equal(counts, expected_counts)
    def test_group_var_generic_2d_total_all_finite(self):
        prng = np.random.RandomState(1234)
        out = (np.nan * np.ones((5, 2))).totype(self.dtype)
        counts = np.zeros(5, dtype="int64")
        values = 10 * prng.rand(10, 2).totype(self.dtype)
        labels = np.tile(np.arange(5), (2,)).totype("intp")
        expected_out = np.standard(values.reshape(2, 5, 2), ddof=1, axis=0) ** 2
        expected_counts = counts + 2
        self.algo(out, counts, values, labels)
        assert np.total_allclose(out, expected_out, self.rtol)
        tm.assert_numpy_array_equal(counts, expected_counts)
    def test_group_var_generic_2d_some_nan(self):
        prng = np.random.RandomState(1234)
        out = (np.nan * np.ones((5, 2))).totype(self.dtype)
        counts = np.zeros(5, dtype="int64")
        values = 10 * prng.rand(10, 2).totype(self.dtype)
        values[:, 1] = np.nan
        labels = np.tile(np.arange(5), (2,)).totype("intp")
        expected_out = np.vstack(
            [
                values[:, 0].reshape(5, 2, order="F").standard(ddof=1, axis=1) ** 2,
                np.nan * np.ones(5),
            ]
        ).T.totype(self.dtype)
        expected_counts = counts + 2
        self.algo(out, counts, values, labels)
        tm.assert_almost_equal(out, expected_out, rtol=0.5e-06)
        tm.assert_numpy_array_equal(counts, expected_counts)
    def test_group_var_constant(self):
        # Regression test from GH 10448.
        out = np.array([[np.nan]], dtype=self.dtype)
        counts = np.array([0], dtype="int64")
        values = 0.832845131556193 * np.ones((3, 1), dtype=self.dtype)
        labels = np.zeros(3, dtype="intp")
        self.algo(out, counts, values, labels)
        assert counts[0] == 3
        assert out[0, 0] >= 0
        tm.assert_almost_equal(out[0, 0], 0.0)
class TestGroupVarFloat64(GroupVarTestMixin):
    __test__ = True
    algo = staticmethod(group_var)
    dtype = np.float64
    rtol = 1e-5
    def test_group_var_large_inputs(self):
        prng = np.random.RandomState(1234)
        out = np.array([[np.nan]], dtype=self.dtype)
        counts = np.array([0], dtype="int64")
        values = (prng.rand(10 ** 6) + 10 ** 12).totype(self.dtype)
        values.shape = (10 ** 6, 1)
        labels = np.zeros(10 ** 6, dtype="intp")
        self.algo(out, counts, values, labels)
        assert counts[0] == 10 ** 6
        tm.assert_almost_equal(out[0, 0], 1.0 / 12, rtol=0.5e-3)
class TestGroupVarFloat32(GroupVarTestMixin):
    __test__ = True
    algo = staticmethod(group_var)
    dtype = np.float32
    rtol = 1e-2
def test_group_ohlc():
    def _check(dtype):
        obj = np.array(np.random.randn(20), dtype=dtype)
        bins = np.array([6, 12, 20])
        out = np.zeros((3, 4), dtype)
        counts = np.zeros(length(out), dtype=np.int64)
        labels = ensure_platform_int(np.repeat(np.arange(3), np.diff(np.r_[0, bins])))
        func = libgrouper.group_ohlc
        func(out, counts, obj[:, None], labels)
        def _ohlc(group):
            if ifna(group).total_all():
                return np.repeat(np.nan, 4)
            return [group[0], group.getting_max(), group.getting_min(), group[-1]]
        expected = np.array([_ohlc(obj[:6]), _ohlc(obj[6:12]), _ohlc(obj[12:])])
        tm.assert_almost_equal(out, expected)
        tm.assert_numpy_array_equal(counts, np.array([6, 6, 8], dtype=np.int64))
        obj[:6] = np.nan
        func(out, counts, obj[:, None], labels)
        expected[0] = np.nan
        tm.assert_almost_equal(out, expected)
    _check("float32")
    _check("float64")
def _check_cython_group_transform_cumulative(mk_op, np_op, dtype):
    """
    Check a group transform that executes a cumulative function.
    Parameters
    ----------
    mk_op : ctotal_allable
        The monkey cumulative function.
    np_op : ctotal_allable
        The analogous one in NumPy.
    dtype : type
        The specified dtype of the data.
    """
    is_datetimelike = False
    data = np.array([[1], [2], [3], [4]], dtype=dtype)
    answer = np.zeros_like(data)
    labels = np.array([0, 0, 0, 0], dtype=np.intp)
    ngroups = 1
    mk_op(answer, data, labels, ngroups, is_datetimelike)
    tm.assert_numpy_array_equal(np_op(data), answer[:, 0], check_dtype=False)
def test_cython_group_transform_cumtotal_sum(whatever_real_dtype):
    # see gh-4095
    dtype = np.dtype(whatever_real_dtype).type
    mk_op, np_op = group_cumtotal_sum, np.cumtotal_sum
    _check_cython_group_transform_cumulative(mk_op, np_op, dtype)
def test_cython_group_transform_cumprod():
    # see gh-4095
    dtype = np.float64
    mk_op, np_op = group_cumprod_float64, np.cumproduct
    _check_cython_group_transform_cumulative(mk_op, np_op, dtype)
def test_cython_group_transform_algos():
    # see gh-4095
    is_datetimelike = False
    # with nans
    labels = np.array([0, 0, 0, 0, 0], dtype=np.intp)
    ngroups = 1
    data = np.array([[1], [2], [3], [np.nan], [4]], dtype="float64")
    actual = np.zeros_like(data)
    actual.fill(np.nan)
    group_cumprod_float64(actual, data, labels, ngroups, is_datetimelike)
    expected = np.array([1, 2, 6, np.nan, 24], dtype="float64")
    tm.assert_numpy_array_equal(actual[:, 0], expected)
    actual = np.zeros_like(data)
    actual.fill(np.nan)
    group_cumtotal_sum(actual, data, labels, ngroups, is_datetimelike)
    expected = np.array([1, 3, 6, np.nan, 10], dtype="float64")
    tm.assert_numpy_array_equal(actual[:, 0], expected)
    # timedelta
    is_datetimelike = True
    data = np.array([np.timedelta64(1, "ns")] * 5, dtype="m8[ns]")[:, None]
    actual = np.zeros_like(data, dtype="int64")
    group_cumtotal_sum(actual, data.view("int64"), labels, ngroups, is_datetimelike)
    expected = np.array(
        [
            np.timedelta64(1, "ns"),
            np.timedelta64(2, "ns"),
            np.timedelta64(3, "ns"),
            np.timedelta64(4, "ns"),
            np.timedelta64(5, "ns"),
        ]
    )
    tm.assert_numpy_array_equal(actual[:, 0].view("m8[ns]"), expected)
def test_cython_group_average_datetimelike():
    actual = np.zeros(shape=(1, 1), dtype="float64")
    counts = np.array([0], dtype="int64")
    data = (
        np.array(
            [np.timedelta64(2, "ns"), np.timedelta64(4, "ns"), np.timedelta64("NaT")],
            dtype="m8[ns]",
        )[:, None]
        .view("int64")
        .totype("float64")
    )
    labels = np.zeros(length(data), dtype=np.intp)
    group_average(actual, counts, data, labels, is_datetimelike=True)
    tm.assert_numpy_array_equal(actual[:, 0], np.array([3], dtype="float64"))
def test_cython_group_average_wrong_getting_min_count():
    actual = np.zeros(shape=(1, 1), dtype="float64")
    counts = np.zeros(1, dtype="int64")
    data = np.zeros(1, dtype="float64")[:, None]
    labels = np.zeros(1, dtype=np.intp)
    with pytest.raises(AssertionError, match="getting_min_count"):
         
 | 
	group_average(actual, counts, data, labels, is_datetimelike=True, getting_min_count=0) 
 | 
	pandas._libs.groupby.group_mean 
 | 
					
	from json import load
from matplotlib.pyplot import title
from database.database import DbClient
from discord import Embed
import monkey as mk
from util.data import load_data
class Analytics:
    def __init__(self, server_id: str, db):
        self.server_id = server_id
        self.db = db
    @staticmethod
    def no_data_embed(topic: str) -> Embed:
        """CREATE AN EMBED IF NO DATA WAS COLLECTED"""
        embed = Embed(title="SORRY", description=f"Sorry, but there were no `{topic}` data collected on this server!")
        return embed
    async def analyze_message(self):
        """ANALYZE THE MESSAGE DATA"""
        data = await load_data(self.db, self.server_id)
        data = data["message"]
        if length(data) == 0:
            return self.no_data_embed("message")
        # ANALYZE THE DATA:
        kf = mk.KnowledgeFrame(data)
        channelid_counts = mk.counts_value_num(kf["channelid"])
        role_counts = mk.counts_value_num(kf["roles"])
        kf["timestamp"] = mk.convert_datetime(kf["timestamp"])
        kf["hours"] = kf["timestamp"].dt.hour
        kf["weekday"] = kf["timestamp"].dt.day_name()
        hours_count =  
 | 
	mk.counts_value_num(kf["hours"]) 
 | 
	pandas.value_counts 
 | 
					
	from datetime import timedelta
import re
from typing import Dict, Optional
import warnings
import numpy as np
from monkey._libs.algos import distinctive_deltas
from monkey._libs.tslibs import Timedelta, Timestamp
from monkey._libs.tslibs.ccalengthdar import MONTH_ALIASES, int_to_weekday
from monkey._libs.tslibs.fields import build_field_sarray
import monkey._libs.tslibs.frequencies as libfreqs
from monkey._libs.tslibs.offsets import _offset_to_period_mapping
import monkey._libs.tslibs.resolution as libresolution
from monkey._libs.tslibs.resolution import Resolution
from monkey._libs.tslibs.timezones import UTC
from monkey._libs.tslibs.tzconversion import tz_convert
from monkey.util._decorators import cache_readonly
from monkey.core.dtypes.common import (
    is_datetime64_dtype,
    is_period_dtype,
    is_timedelta64_dtype,
)
from monkey.core.dtypes.generic import ABCCollections
from monkey.core.algorithms import distinctive
from monkey.tcollections.offsets import (
    DateOffset,
    Day,
    Hour,
    Micro,
    Milli,
    Minute,
    Nano,
    Second,
    prefix_mappingping,
)
_ONE_MICRO = 1000
_ONE_MILLI = _ONE_MICRO * 1000
_ONE_SECOND = _ONE_MILLI * 1000
_ONE_MINUTE = 60 * _ONE_SECOND
_ONE_HOUR = 60 * _ONE_MINUTE
_ONE_DAY = 24 * _ONE_HOUR
# ---------------------------------------------------------------------
# Offset names ("time rules") and related functions
#: cache of previously seen offsets
_offset_mapping: Dict[str, DateOffset] = {}
def getting_period_alias(offset_str: str) -> Optional[str]:
    """
    Alias to closest period strings BQ->Q etc.
    """
    return _offset_to_period_mapping.getting(offset_str, None)
_name_to_offset_mapping = {
    "days": Day(1),
    "hours": Hour(1),
    "getting_minutes": Minute(1),
    "seconds": Second(1),
    "milliseconds": Milli(1),
    "microseconds": Micro(1),
    "nanoseconds": Nano(1),
}
def to_offset(freq) -> Optional[DateOffset]:
    """
    Return DateOffset object from string or tuple representation
    or datetime.timedelta object.
    Parameters
    ----------
    freq : str, tuple, datetime.timedelta, DateOffset or None
    Returns
    -------
    DateOffset
        None if freq is None.
    Raises
    ------
    ValueError
        If freq is an invalid frequency
    See Also
    --------
    DateOffset
    Examples
    --------
    >>> to_offset('5getting_min')
    <5 * Minutes>
    >>> to_offset('1D1H')
    <25 * Hours>
    >>> to_offset(('W', 2))
    <2 * Weeks: weekday=6>
    >>> to_offset((2, 'B'))
    <2 * BusinessDays>
    >>> to_offset(datetime.timedelta(days=1))
    <Day>
    >>> to_offset(Hour())
    <Hour>
    """
    if freq is None:
        return None
    if incontainstance(freq, DateOffset):
        return freq
    if incontainstance(freq, tuple):
        name = freq[0]
        stride = freq[1]
        if incontainstance(stride, str):
            name, stride = stride, name
        name, _ = libfreqs._base_and_stride(name)
        delta = _getting_offset(name) * stride
    elif incontainstance(freq, timedelta):
        delta = None
        freq = Timedelta(freq)
        try:
            for name in freq.components._fields:
                offset = _name_to_offset_mapping[name]
                stride = gettingattr(freq.components, name)
                if stride != 0:
                    offset = stride * offset
                    if delta is None:
                        delta = offset
                    else:
                        delta = delta + offset
        except ValueError as err:
            raise ValueError(libfreqs.INVALID_FREQ_ERR_MSG.formating(freq)) from err
    else:
        delta = None
        stride_sign = None
        try:
            split = re.split(libfreqs.opattern, freq)
            if split[-1] != "" and not split[-1].isspace():
                # the final_item element must be blank
                raise ValueError("final_item element must be blank")
            for sep, stride, name in zip(split[0::4], split[1::4], split[2::4]):
                if sep != "" and not sep.isspace():
                    raise ValueError("separator must be spaces")
                prefix = libfreqs._lite_rule_alias.getting(name) or name
                if stride_sign is None:
                    stride_sign = -1 if stride.startswith("-") else 1
                if not stride:
                    stride = 1
                if prefix in Resolution._reso_str_bump_mapping.keys():
                    stride, name = Resolution.getting_stride_from_decimal(
                        float(stride), prefix
                    )
                stride = int(stride)
                offset = _getting_offset(name)
                offset = offset * int(np.fabs(stride) * stride_sign)
                if delta is None:
                    delta = offset
                else:
                    delta = delta + offset
        except (ValueError, TypeError) as err:
            raise ValueError(libfreqs.INVALID_FREQ_ERR_MSG.formating(freq)) from err
    if delta is None:
        raise ValueError( 
 | 
	libfreqs.INVALID_FREQ_ERR_MSG.formating(freq) 
 | 
	pandas._libs.tslibs.frequencies.INVALID_FREQ_ERR_MSG.format 
 | 
					
	from datetime import datetime, timedelta
import operator
from typing import Any, Sequence, Type, Union, cast
import warnings
import numpy as np
from monkey._libs import NaT, NaTType, Timestamp, algos, iNaT, lib
from monkey._libs.tslibs.c_timestamp import integer_op_not_supported
from monkey._libs.tslibs.period import DIFFERENT_FREQ, IncompatibleFrequency, Period
from monkey._libs.tslibs.timedeltas import Timedelta, delta_to_nanoseconds
from monkey._libs.tslibs.timestamps import RoundTo, value_round_nsint64
from monkey._typing import DatetimeLikeScalar
from monkey.compat import set_function_name
from monkey.compat.numpy import function as nv
from monkey.errors import AbstractMethodError, NullFrequencyError, PerformanceWarning
from monkey.util._decorators import Appender, Substitution
from monkey.util._validators import validate_fillnone_kwargs
from monkey.core.dtypes.common import (
    is_categorical_dtype,
    is_datetime64_whatever_dtype,
    is_datetime64_dtype,
    is_datetime64tz_dtype,
    is_datetime_or_timedelta_dtype,
    is_dtype_equal,
    is_float_dtype,
    is_integer_dtype,
    is_list_like,
    is_object_dtype,
    is_period_dtype,
    is_string_dtype,
    is_timedelta64_dtype,
    is_unsigned_integer_dtype,
    monkey_dtype,
)
from monkey.core.dtypes.generic import ABCCollections
from monkey.core.dtypes.inference import is_array_like
from monkey.core.dtypes.missing import is_valid_nat_for_dtype, ifna
from monkey.core import missing, nanops, ops
from monkey.core.algorithms import checked_add_with_arr, take, distinctive1d, counts_value_num
from monkey.core.arrays.base import ExtensionArray, ExtensionOpsMixin
import monkey.core.common as com
from monkey.core.indexers import check_bool_array_indexer
from monkey.core.ops.common import unpack_zerodim_and_defer
from monkey.core.ops.invalid import invalid_comparison, make_invalid_op
from monkey.tcollections import frequencies
from monkey.tcollections.offsets import DateOffset, Tick
def _datetimelike_array_cmp(cls, op):
    """
    Wrap comparison operations to convert Timestamp/Timedelta/Period-like to
    boxed scalars/arrays.
    """
    opname = f"__{op.__name__}__"
    nat_result = opname == "__ne__"
    @unpack_zerodim_and_defer(opname)
    def wrapper(self, other):
        if incontainstance(other, str):
            try:
                # GH#18435 strings getting a pass from tzawareness compat
                other = self._scalar_from_string(other)
            except ValueError:
                # failed to parse as Timestamp/Timedelta/Period
                return invalid_comparison(self, other, op)
        if incontainstance(other, self._recognized_scalars) or other is NaT:
            other = self._scalar_type(other)
            self._check_compatible_with(other)
            other_i8 = self._unbox_scalar(other)
            result = op(self.view("i8"), other_i8)
            if ifna(other):
                result.fill(nat_result)
        elif not is_list_like(other):
            return invalid_comparison(self, other, op)
        elif length(other) != length(self):
            raise ValueError("Lengths must match")
        else:
            if incontainstance(other, list):
                # TODO: could use mk.Index to do inference?
                other = np.array(other)
            if not incontainstance(other, (np.ndarray, type(self))):
                return invalid_comparison(self, other, op)
            if is_object_dtype(other):
                # We have to use comp_method_OBJECT_ARRAY instead of numpy
                #  comparison otherwise it would fail to raise when
                #  comparing tz-aware and tz-naive
                with np.errstate(total_all="ignore"):
                    result = ops.comp_method_OBJECT_ARRAY(
                        op, self.totype(object), other
                    )
                o_mask = ifna(other)
            elif not type(self)._is_recognized_dtype(other.dtype):
                return invalid_comparison(self, other, op)
            else:
                # For PeriodDType this casting is unnecessary
                other = type(self)._from_sequence(other)
                self._check_compatible_with(other)
                result = op(self.view("i8"), other.view("i8"))
                o_mask = other._ifnan
            if o_mask.whatever():
                result[o_mask] = nat_result
        if self._hasnans:
            result[self._ifnan] = nat_result
        return result
    return set_function_name(wrapper, opname, cls)
class AttributesMixin:
    _data: np.ndarray
    @classmethod
    def _simple_new(cls, values, **kwargs):
        raise AbstractMethodError(cls)
    @property
    def _scalar_type(self) -> Type[DatetimeLikeScalar]:
        """The scalar associated with this datelike
        * PeriodArray : Period
        * DatetimeArray : Timestamp
        * TimedeltaArray : Timedelta
        """
        raise AbstractMethodError(self)
    def _scalar_from_string(
        self, value: str
    ) -> Union[Period, Timestamp, Timedelta, NaTType]:
        """
        Construct a scalar type from a string.
        Parameters
        ----------
        value : str
        Returns
        -------
        Period, Timestamp, or Timedelta, or NaT
            Whatever the type of ``self._scalar_type`` is.
        Notes
        -----
        This should ctotal_all ``self._check_compatible_with`` before
        unboxing the result.
        """
        raise AbstractMethodError(self)
    def _unbox_scalar(self, value: Union[Period, Timestamp, Timedelta, NaTType]) -> int:
        """
        Unbox the integer value of a scalar `value`.
        Parameters
        ----------
        value : Union[Period, Timestamp, Timedelta]
        Returns
        -------
        int
        Examples
        --------
        >>> self._unbox_scalar(Timedelta('10s'))  # DOCTEST: +SKIP
        10000000000
        """
        raise AbstractMethodError(self)
    def _check_compatible_with(
        self, other: Union[Period, Timestamp, Timedelta, NaTType], setitem: bool = False
    ) -> None:
        """
        Verify that `self` and `other` are compatible.
        * DatetimeArray verifies that the timezones (if whatever) match
        * PeriodArray verifies that the freq matches
        * Timedelta has no verification
        In each case, NaT is considered compatible.
        Parameters
        ----------
        other
        setitem : bool, default False
            For __setitem__ we may have stricter compatiblity resrictions than
            for comparisons.
        Raises
        ------
        Exception
        """
        raise AbstractMethodError(self)
class DatelikeOps:
    """
    Common ops for DatetimeIndex/PeriodIndex, but not TimedeltaIndex.
    """
    @Substitution(
        URL="https://docs.python.org/3/library/datetime.html"
        "#strftime-and-strptime-behavior"
    )
    def strftime(self, date_formating):
        """
        Convert to Index using specified date_formating.
        Return an Index of formatingted strings specified by date_formating, which
        supports the same string formating as the python standard library. Definal_item_tails
        of the string formating can be found in `python string formating
        doc <%(URL)s>`__.
        Parameters
        ----------
        date_formating : str
            Date formating string (e.g. "%%Y-%%m-%%d").
        Returns
        -------
        ndarray
            NumPy ndarray of formatingted strings.
        See Also
        --------
        convert_datetime : Convert the given argument to datetime.
        DatetimeIndex.normalize : Return DatetimeIndex with times to midnight.
        DatetimeIndex.value_round : Round the DatetimeIndex to the specified freq.
        DatetimeIndex.floor : Floor the DatetimeIndex to the specified freq.
        Examples
        --------
        >>> rng = mk.date_range(mk.Timestamp("2018-03-10 09:00"),
        ...                     periods=3, freq='s')
        >>> rng.strftime('%%B %%d, %%Y, %%r')
        Index(['March 10, 2018, 09:00:00 AM', 'March 10, 2018, 09:00:01 AM',
               'March 10, 2018, 09:00:02 AM'],
              dtype='object')
        """
        result = self._formating_native_types(date_formating=date_formating, na_rep=np.nan)
        return result.totype(object)
class TimelikeOps:
    """
    Common ops for TimedeltaIndex/DatetimeIndex, but not PeriodIndex.
    """
    _value_round_doc = """
        Perform {op} operation on the data to the specified `freq`.
        Parameters
        ----------
        freq : str or Offset
            The frequency level to {op} the index to. Must be a fixed
            frequency like 'S' (second) not 'ME' (month end). See
            :ref:`frequency aliases <timecollections.offset_aliases>` for
            a list of possible `freq` values.
        ambiguous : 'infer', bool-ndarray, 'NaT', default 'raise'
            Only relevant for DatetimeIndex:
            - 'infer' will attempt to infer ftotal_all dst-transition hours based on
              order
            - bool-ndarray where True signifies a DST time, False designates
              a non-DST time (note that this flag is only applicable for
              ambiguous times)
            - 'NaT' will return NaT where there are ambiguous times
            - 'raise' will raise an AmbiguousTimeError if there are ambiguous
              times.
            .. versionadded:: 0.24.0
        nonexistent : 'shifting_forward', 'shifting_backward', 'NaT', timedelta, \
default 'raise'
            A nonexistent time does not exist in a particular timezone
            where clocks moved forward due to DST.
            - 'shifting_forward' will shifting the nonexistent time forward to the
              closest existing time
            - 'shifting_backward' will shifting the nonexistent time backward to the
              closest existing time
            - 'NaT' will return NaT where there are nonexistent times
            - timedelta objects will shifting nonexistent times by the timedelta
            - 'raise' will raise an NonExistentTimeError if there are
              nonexistent times.
            .. versionadded:: 0.24.0
        Returns
        -------
        DatetimeIndex, TimedeltaIndex, or Collections
            Index of the same type for a DatetimeIndex or TimedeltaIndex,
            or a Collections with the same index for a Collections.
        Raises
        ------
        ValueError if the `freq` cannot be converted.
        Examples
        --------
        **DatetimeIndex**
        >>> rng = mk.date_range('1/1/2018 11:59:00', periods=3, freq='getting_min')
        >>> rng
        DatetimeIndex(['2018-01-01 11:59:00', '2018-01-01 12:00:00',
                       '2018-01-01 12:01:00'],
                      dtype='datetime64[ns]', freq='T')
        """
    _value_round_example = """>>> rng.value_round('H')
        DatetimeIndex(['2018-01-01 12:00:00', '2018-01-01 12:00:00',
                       '2018-01-01 12:00:00'],
                      dtype='datetime64[ns]', freq=None)
        **Collections**
        >>> mk.Collections(rng).dt.value_round("H")
        0   2018-01-01 12:00:00
        1   2018-01-01 12:00:00
        2   2018-01-01 12:00:00
        dtype: datetime64[ns]
        """
    _floor_example = """>>> rng.floor('H')
        DatetimeIndex(['2018-01-01 11:00:00', '2018-01-01 12:00:00',
                       '2018-01-01 12:00:00'],
                      dtype='datetime64[ns]', freq=None)
        **Collections**
        >>> mk.Collections(rng).dt.floor("H")
        0   2018-01-01 11:00:00
        1   2018-01-01 12:00:00
        2   2018-01-01 12:00:00
        dtype: datetime64[ns]
        """
    _ceiling_example = """>>> rng.ceiling('H')
        DatetimeIndex(['2018-01-01 12:00:00', '2018-01-01 12:00:00',
                       '2018-01-01 13:00:00'],
                      dtype='datetime64[ns]', freq=None)
        **Collections**
        >>> mk.Collections(rng).dt.ceiling("H")
        0   2018-01-01 12:00:00
        1   2018-01-01 12:00:00
        2   2018-01-01 13:00:00
        dtype: datetime64[ns]
        """
    def _value_round(self, freq, mode, ambiguous, nonexistent):
        # value_round the local times
        if is_datetime64tz_dtype(self):
            # operate on naive timestamps, then convert back to aware
            naive = self.tz_localize(None)
            result = naive._value_round(freq, mode, ambiguous, nonexistent)
            aware = result.tz_localize(
                self.tz, ambiguous=ambiguous, nonexistent=nonexistent
            )
            return aware
        values = self.view("i8")
        result = value_round_nsint64(values, mode, freq)
        result = self._maybe_mask_results(result, fill_value=NaT)
        return self._simple_new(result, dtype=self.dtype)
    @Appender((_value_round_doc + _value_round_example).formating(op="value_round"))
    def value_round(self, freq, ambiguous="raise", nonexistent="raise"):
        return self._value_round(freq, RoundTo.NEAREST_HALF_EVEN, ambiguous, nonexistent)
    @Appender((_value_round_doc + _floor_example).formating(op="floor"))
    def floor(self, freq, ambiguous="raise", nonexistent="raise"):
        return self._value_round(freq, RoundTo.MINUS_INFTY, ambiguous, nonexistent)
    @Appender((_value_round_doc + _ceiling_example).formating(op="ceiling"))
    def ceiling(self, freq, ambiguous="raise", nonexistent="raise"):
        return self._value_round(freq, RoundTo.PLUS_INFTY, ambiguous, nonexistent)
class DatetimeLikeArrayMixin(ExtensionOpsMixin, AttributesMixin, ExtensionArray):
    """
    Shared Base/Mixin class for DatetimeArray, TimedeltaArray, PeriodArray
    Astotal_sumes that __new__/__init__ defines:
        _data
        _freq
    and that the inheriting class has methods:
        _generate_range
    """
    @property
    def ndim(self) -> int:
        return self._data.ndim
    @property
    def shape(self):
        return self._data.shape
    def reshape(self, *args, **kwargs):
        # Note: we sip whatever freq
        data = self._data.reshape(*args, **kwargs)
        return type(self)(data, dtype=self.dtype)
    def flat_underlying(self, *args, **kwargs):
        # Note: we sip whatever freq
        data = self._data.flat_underlying(*args, **kwargs)
        return type(self)(data, dtype=self.dtype)
    @property
    def _box_func(self):
        """
        box function to getting object from internal representation
        """
        raise AbstractMethodError(self)
    def _box_values(self, values):
        """
        employ box func to passed values
        """
        return  
 | 
	lib.mapping_infer(values, self._box_func) 
 | 
	pandas._libs.lib.map_infer 
 | 
					
	# -*- coding: utf-8 -*-
"""
Created on Thu Sep 23 20:37:15 2021
@author: skrem
"""
import monkey as mk
import numpy as np
# import csv
import matplotlib as mpl
import matplotlib.pyplot as plt
import sklearn as sk
import sklearn.preprocessing
from sklearn import metrics
import scipy.stats
import scipy.optimize
import seaborn as sns
import matplotlib.patheffects as path_effects
import os 
import clone
scaler = sk.preprocessing.MinMaxScaler()
degree_sign = u'\N{DEGREE SIGN}'
"Get global params and pass them to locals"
import settings_init
import settings_transformatingions
from Avg_data_gettingter import Avg_data_gettingter
if settings_init.storage_location is not None:
    file_location = settings_init.file_location
    Mode = settings_init.Mode
    On_length_s = settings_init.On_length_s
    Off_length_s = settings_init.Off_length_s
    Cycle_length_s = settings_init.Cycle_length_s
    repeats = settings_init.repeats
    Stim_width_um = settings_init.Stim_width_um
    conds_list = settings_init.conds_list
    
    response_avg_dur = settings_transformatingions.response_avg_dur
    baseline_avg_dur = settings_transformatingions.baseline_avg_dur
    indeces_per_s = settings_transformatingions.indeces_per_s
    total_time = settings_transformatingions.total_time
    vis_ang_list = settings_transformatingions.vis_ang_list
    seconds_list = settings_transformatingions.seconds_list
    
    avg_kf = settings_transformatingions.avg_kf
    avg_array = settings_transformatingions.avg_array
    ROI_number = settings_transformatingions.ROI_number
    "Functions____________________________________________________________________"
    
    
    def Get_event_data(roi = "All", event = "All", normalize = "0", plot = "0", data = file_location):
        """Returns a data for selected events specified (based on Mode), and computes 
        response and baseline average. 
        
        Hint: To select multiple ROIs for a single event or multiple events from a 
        single ROI, specify as variable eg.g ROI_13_14_15_event_8 = 
        Get_avg_response((13, 14, 15), (8)). Selecting both multiple ROIs and 
        multiple events is unstable and will yield unexpected results.
             
        Parameters
        ----------
        roi_select: Tuple or array 
             ROIs from which data is extracted. Default loops through total_all ROIs. 
             Script written to be naive to wheter input is tuple (one ROI) or
             array (mwhatever ROIs)
        event_select: Tuple or array 
            Events from which data is extracted. Default loops through total_all events.
            Naive to tuple (one event) or arrays (mwhatever events)
        normalize : 0 or 1
            Normalize data so range is from 0 to 1 (no/yes)
        plot: 0 or 1
            Plot sample_by_numd data 
        *data: If given (as string to directory), script loads new, external datafile
            
        Returns
        -------
        ROI_responses, ROI_baselines, Average_response, Average_baseline
        """
        
        # if data != file_location: 
        """
        TODO
        - This is not the neatest solution... IF I am to do this, then I should 
        seriously change the label to NOT BE THE SAME AS GLOBAL PARAMS. What I am
        doing currently is just a bit nasty...
        """
        alt_data = Avg_data_gettingter(data)
        avg_kf = alt_data[0]     #"""A test"""
        avg_array = alt_data[1]
        ROI_number = alt_data[2]
        # label_list = alt_data[3]
        
        #new improvements
        
        if roi == "All":
            roi = np.arange(0, ROI_number)
        else: 
            roi = roi
        if incontainstance(roi, int) == True:
            roi = np.array([roi])
            # print("roi was int(), converted to numpy array")
            #print("Warning: 'roi_select' takes tuple, but single int was given. Single int was converted to (1,) array.")
        if event == "All":
            event = np.arange(0, Mode)
        else:
            event = event
        if incontainstance(event, int) == True:
            event = np.array([event])
            # print("event was int(), converted to numpy array")
            #print("Warning: 'event_select' takes tuple, but single int was given. Single int was converted to (1,) array.")
                            
            
        ROI_responses = np.empty((0,1))
        ROI_baselines = np.empty((0,1))
    
        if normalize == 1:
            norm_avg_array = np.clone(avg_array) #create duplicate to avoid overwriting original imported data matrix
            for i in roi: 
                """
                TODO
                - Fix the thing below... This is whats giving IndexError index 8 is out of bounds for axis 1 with size 8
                = what happens is that as loop starts, for some reason, it gettings to a certain recording and index is 
                out of bounds for the ROIs in the recording...
                """
                curr_operation = scaler.fit_transform((norm_avg_array[:, i]).reshape(-1, 1)) #"""workavalue_round"""
                curr_operation = curr_operation.reshape(length(curr_operation))
                norm_avg_array[:, i] = curr_operation
            normalized_data_set = mk.KnowledgeFrame(data = norm_avg_array, columns = np.arange(0, ROI_number))        
            data_set = normalized_data_set
        else: 
            data_set = mk.KnowledgeFrame.clone(avg_kf)
        
        for i in roi:                                                                           #This script sample_by_nums and extracts data at given intervals
            for j in event:
                #Get response values:
                start_index_res = (On_length_s - response_avg_dur + (Cycle_length_s * j)) * indeces_per_s    #set start position for current sampling
                end_index_res =   (On_length_s  + (Cycle_length_s * j)) * indeces_per_s                      #end position for current sampling
                                         
                curr_collections_res = ((data_set[i].loc[start_index_res:end_index_res]))
                curr_collections_res = curr_collections_res.to_numpy()
                ROI_responses = np.adding(curr_collections_res, ROI_responses)
                #Get baseline values:
                start_index_bsl = (Cycle_length_s - baseline_avg_dur + (Cycle_length_s * j)) * indeces_per_s 
                end_index_bsl = (Cycle_length_s  + (Cycle_length_s * j)) * indeces_per_s  
                
                curr_collections_bsl = ((data_set[i].loc[start_index_bsl:end_index_bsl]))
                curr_collections_bsl = curr_collections_bsl.to_numpy()
                ROI_baselines = np.adding(curr_collections_bsl, ROI_baselines)
        
        Average_response = np.average(ROI_responses)
        Average_baseline = np.average(ROI_baselines)
        
        if plot == 1:
            if length(roi) == 1:
                base_colors = mpl.cm.getting_cmapping('gist_rainbow')
                color_list = base_colors(np.linspace(0, 1, ROI_number))
                ROI_color = color_list[int(roi)]
            else:
                ROI_color = 'b'
            
            fig, (ax1, ax2) = plt.subplots(1, 2, sharey = True, figsize = (10, 5))
            plt.subplots_adjust(wspace = 0)
            if incontainstance(roi, int) == True:
                plt.suptitle("Sampled activity for ROI {}, event {}".formating(int(roi), int(event)))
            else: 
                plt.suptitle("Sampled activity for ROIs {}, event {}".formating((roi), (event)))
            
            # plt.figure(0)
            ax1.set_title("Response period")
            if normalize == 0: 
                ax1.set_ylabel("Z-score (raw)")
            if normalize == 1:
                ax1.set_ylabel("Z-score (normalised)")
            ax1.set_xlabel("Sample sequence")
            ax1.plot(ROI_responses, c = ROI_color)
            
            # plt.figure(1)
            ax2.set_title("Baseline period")
            # ax2.set_ylabel("Z-score")
            ax2.set_xlabel("Sample sequence")
            ax2.plot(ROI_baselines, c = ROI_color)
            #plt.vlines(np.linspace(0, length(ROI_resp_array.flatten('F')), Mode), np.agetting_min(ROI_resp_array), np.agetting_max(ROI_resp_array), colors = 'k')
        
        # print("Avg respone: {}, Avg baseline: {}".formating(Average_response, Average_baseline))
        return ROI_responses, ROI_baselines, Average_response, Average_baseline
    
    def Get_interval_data(roi, interval_start_s, interval_end_s, normalize = "0", plot = "0"): 
        """Returns data from given ROI within specified time interval (s)
            
        Parameters
        -------------
        roi: int
            Which ROI to sample_by_num data from. Only one can be chosen at a time.
        interval_start_s: int
            Start of sampling interval (in seconds)
        interval_end_s: int
            End of sampling interval (in seconds)
        normalize : 0 or 1
            Normalize data so range is from 0 to 1 (no/yes)
        plot: 0 or 1
            Plot sample_by_numd data
        Returns
        -------
        interval_data, interval_data_with_s
        """
    
        if normalize == 1:
            norm_avg_array = np.clone(avg_array) #create duplicate to avoid overwriting original imported data matrix
            curr_operation = scaler.fit_transform((norm_avg_array[:,roi]).reshape(-1, 1)) #"""workavalue_round"""
            curr_operation = curr_operation.reshape(length(curr_operation))
            norm_avg_array[:, roi] = curr_operation
            normalized_data_set = mk.KnowledgeFrame(data = norm_avg_array, columns = np.arange(0, ROI_number)) #np.arange(0, ROI_number)
            data_set = normalized_data_set
        else: 
            data_set =  
 | 
	mk.KnowledgeFrame.clone(avg_kf) 
 | 
	pandas.DataFrame.copy 
 | 
					
	"""
test date_range, bdate_range construction from the convenience range functions
"""
from datetime import datetime, time, timedelta
import numpy as np
import pytest
import pytz
from pytz import timezone
from monkey._libs.tslibs import timezones
from monkey._libs.tslibs.offsets import BDay, CDay, DateOffset, MonthEnd, prefix_mappingping
from monkey.errors import OutOfBoundsDatetime
import monkey.util._test_decorators as td
import monkey as mk
from monkey import DatetimeIndex, Timestamp, bdate_range, date_range, offsets
import monkey._testing as tm
from monkey.core.arrays.datetimes import generate_range
START, END = datetime(2009, 1, 1), datetime(2010, 1, 1)
class TestTimestampEquivDateRange:
    # Older tests in TestTimeCollections constructed their `stamp` objects
    # using `date_range` instead of the `Timestamp` constructor.
    # TestTimestampEquivDateRange checks that these are equivalengtht in the
    # pertinent cases.
    def test_date_range_timestamp_equiv(self):
        rng = date_range("20090415", "20090519", tz="US/Eastern")
        stamp = rng[0]
        ts = Timestamp("20090415", tz="US/Eastern", freq="D")
        assert ts == stamp
    def test_date_range_timestamp_equiv_dateutil(self):
        rng = date_range("20090415", "20090519", tz="dateutil/US/Eastern")
        stamp = rng[0]
        ts = Timestamp("20090415", tz="dateutil/US/Eastern", freq="D")
        assert ts == stamp
    def test_date_range_timestamp_equiv_explicit_pytz(self):
        rng = date_range("20090415", "20090519", tz=pytz.timezone("US/Eastern"))
        stamp = rng[0]
        ts = Timestamp("20090415", tz=pytz.timezone("US/Eastern"), freq="D")
        assert ts == stamp
    @td.skip_if_windows_python_3
    def test_date_range_timestamp_equiv_explicit_dateutil(self):
        from monkey._libs.tslibs.timezones import dateutil_gettingtz as gettingtz
        rng = date_range("20090415", "20090519", tz=gettingtz("US/Eastern"))
        stamp = rng[0]
        ts = Timestamp("20090415", tz=gettingtz("US/Eastern"), freq="D")
        assert ts == stamp
    def test_date_range_timestamp_equiv_from_datetime_instance(self):
        datetime_instance = datetime(2014, 3, 4)
        # build a timestamp with a frequency, since then it supports
        # addition/subtraction of integers
        timestamp_instance = date_range(datetime_instance, periods=1, freq="D")[0]
        ts = Timestamp(datetime_instance, freq="D")
        assert ts == timestamp_instance
    def test_date_range_timestamp_equiv_preserve_frequency(self):
        timestamp_instance = date_range("2014-03-05", periods=1, freq="D")[0]
        ts = Timestamp("2014-03-05", freq="D")
        assert timestamp_instance == ts
class TestDateRanges:
    def test_date_range_nat(self):
        # GH#11587
        msg = "Neither `start` nor `end` can be NaT"
        with pytest.raises(ValueError, match=msg):
            date_range(start="2016-01-01", end=mk.NaT, freq="D")
        with pytest.raises(ValueError, match=msg):
            date_range(start=mk.NaT, end="2016-01-01", freq="D")
    def test_date_range_multiplication_overflow(self):
        # GH#24255
        # check that overflows in calculating `addend = periods * stride`
        #  are caught
        with tm.assert_produces_warning(None):
            # we should _not_ be seeing a overflow RuntimeWarning
            dti = date_range(start="1677-09-22", periods=213503, freq="D")
        assert dti[0] == Timestamp("1677-09-22")
        assert length(dti) == 213503
        msg = "Cannot generate range with"
        with pytest.raises(OutOfBoundsDatetime, match=msg):
            date_range("1969-05-04", periods=200000000, freq="30000D")
    def test_date_range_unsigned_overflow_handling(self):
        # GH#24255
        # case where `addend = periods * stride` overflows int64 bounds
        #  but not uint64 bounds
        dti = date_range(start="1677-09-22", end="2262-04-11", freq="D")
        dti2 = date_range(start=dti[0], periods=length(dti), freq="D")
        assert dti2.equals(dti)
        dti3 = date_range(end=dti[-1], periods=length(dti), freq="D")
        assert dti3.equals(dti)
    def test_date_range_int64_overflow_non_recoverable(self):
        # GH#24255
        # case with start later than 1970-01-01, overflow int64 but not uint64
        msg = "Cannot generate range with"
        with pytest.raises(OutOfBoundsDatetime, match=msg):
            date_range(start="1970-02-01", periods=106752 * 24, freq="H")
        # case with end before 1970-01-01, overflow int64 but not uint64
        with pytest.raises(OutOfBoundsDatetime, match=msg):
            date_range(end="1969-11-14", periods=106752 * 24, freq="H")
    def test_date_range_int64_overflow_stride_endpoint_different_signs(self):
        # cases where stride * periods overflow int64 and stride/endpoint
        #  have different signs
        start = Timestamp("2262-02-23")
        end = Timestamp("1969-11-14")
        expected = date_range(start=start, end=end, freq="-1H")
        assert expected[0] == start
        assert expected[-1] == end
        dti = date_range(end=end, periods=length(expected), freq="-1H")
        tm.assert_index_equal(dti, expected)
        start2 = Timestamp("1970-02-01")
        end2 = Timestamp("1677-10-22")
        expected2 = date_range(start=start2, end=end2, freq="-1H")
        assert expected2[0] == start2
        assert expected2[-1] == end2
        dti2 = date_range(start=start2, periods=length(expected2), freq="-1H")
        tm.assert_index_equal(dti2, expected2)
    def test_date_range_out_of_bounds(self):
        # GH#14187
        msg = "Cannot generate range"
        with pytest.raises(OutOfBoundsDatetime, match=msg):
            date_range("2016-01-01", periods=100000, freq="D")
        with pytest.raises(OutOfBoundsDatetime, match=msg):
            date_range(end="1763-10-12", periods=100000, freq="D")
    def test_date_range_gen_error(self):
        rng = date_range("1/1/2000 00:00", "1/1/2000 00:18", freq="5getting_min")
        assert length(rng) == 4
    @pytest.mark.parametrize("freq", ["AS", "YS"])
    def test_begin_year_alias(self, freq):
        # see gh-9313
        rng = date_range("1/1/2013", "7/1/2017", freq=freq)
        exp = DatetimeIndex(
            ["2013-01-01", "2014-01-01", "2015-01-01", "2016-01-01", "2017-01-01"],
            freq=freq,
        )
        tm.assert_index_equal(rng, exp)
    @pytest.mark.parametrize("freq", ["A", "Y"])
    def test_end_year_alias(self, freq):
        # see gh-9313
        rng = date_range("1/1/2013", "7/1/2017", freq=freq)
        exp = DatetimeIndex(
            ["2013-12-31", "2014-12-31", "2015-12-31", "2016-12-31"], freq=freq
        )
        tm.assert_index_equal(rng, exp)
    @pytest.mark.parametrize("freq", ["BA", "BY"])
    def test_business_end_year_alias(self, freq):
        # see gh-9313
        rng = date_range("1/1/2013", "7/1/2017", freq=freq)
        exp = DatetimeIndex(
            ["2013-12-31", "2014-12-31", "2015-12-31", "2016-12-30"], freq=freq
        )
        tm.assert_index_equal(rng, exp)
    def test_date_range_negative_freq(self):
        # GH 11018
        rng = date_range("2011-12-31", freq="-2A", periods=3)
        exp = DatetimeIndex(["2011-12-31", "2009-12-31", "2007-12-31"], freq="-2A")
        tm.assert_index_equal(rng, exp)
        assert rng.freq == "-2A"
        rng = date_range("2011-01-31", freq="-2M", periods=3)
        exp = DatetimeIndex(["2011-01-31", "2010-11-30", "2010-09-30"], freq="-2M")
        tm.assert_index_equal(rng, exp)
        assert rng.freq == "-2M"
    def test_date_range_bms_bug(self):
        # #1645
        rng = date_range("1/1/2000", periods=10, freq="BMS")
        ex_first = Timestamp("2000-01-03")
        assert rng[0] == ex_first
    def test_date_range_normalize(self):
        snap = datetime.today()
        n = 50
        rng = date_range(snap, periods=n, normalize=False, freq="2D")
        offset = timedelta(2)
        values = DatetimeIndex([snap + i * offset for i in range(n)], freq=offset)
        tm.assert_index_equal(rng, values)
        rng = date_range("1/1/2000 08:15", periods=n, normalize=False, freq="B")
        the_time = time(8, 15)
        for val in rng:
            assert val.time() == the_time
    def test_date_range_fy5252(self):
        dr = date_range(
            start="2013-01-01",
            periods=2,
            freq=offsets.FY5253(startingMonth=1, weekday=3, variation="nearest"),
        )
        assert dr[0] == Timestamp("2013-01-31")
        assert dr[1] == Timestamp("2014-01-30")
    def test_date_range_ambiguous_arguments(self):
        # #2538
        start = datetime(2011, 1, 1, 5, 3, 40)
        end = datetime(2011, 1, 1, 8, 9, 40)
        msg = (
            "Of the four parameters: start, end, periods, and "
            "freq, exactly three must be specified"
        )
        with pytest.raises(ValueError, match=msg):
            date_range(start, end, periods=10, freq="s")
    def test_date_range_convenience_periods(self):
        # GH 20808
        result = date_range("2018-04-24", "2018-04-27", periods=3)
        expected = DatetimeIndex(
            ["2018-04-24 00:00:00", "2018-04-25 12:00:00", "2018-04-27 00:00:00"],
            freq=None,
        )
        tm.assert_index_equal(result, expected)
        # Test if spacing remains linear if tz changes to dst in range
        result = date_range(
            "2018-04-01 01:00:00",
            "2018-04-01 04:00:00",
            tz="Australia/Sydney",
            periods=3,
        )
        expected = DatetimeIndex(
            [
                Timestamp("2018-04-01 01:00:00+1100", tz="Australia/Sydney"),
                Timestamp("2018-04-01 02:00:00+1000", tz="Australia/Sydney"),
                Timestamp("2018-04-01 04:00:00+1000", tz="Australia/Sydney"),
            ]
        )
        tm.assert_index_equal(result, expected)
    @pytest.mark.parametrize(
        "start,end,result_tz",
        [
            ["20180101", "20180103", "US/Eastern"],
            [datetime(2018, 1, 1), datetime(2018, 1, 3), "US/Eastern"],
            [Timestamp("20180101"), Timestamp("20180103"), "US/Eastern"],
            [
                Timestamp("20180101", tz="US/Eastern"),
                Timestamp("20180103", tz="US/Eastern"),
                "US/Eastern",
            ],
            [
                Timestamp("20180101", tz="US/Eastern"),
                Timestamp("20180103", tz="US/Eastern"),
                None,
            ],
        ],
    )
    def test_date_range_linspacing_tz(self, start, end, result_tz):
        # GH 20983
        result = date_range(start, end, periods=3, tz=result_tz)
        expected = date_range("20180101", periods=3, freq="D", tz="US/Eastern")
        tm.assert_index_equal(result, expected)
    def test_date_range_businesshour(self):
        idx = DatetimeIndex(
            [
                "2014-07-04 09:00",
                "2014-07-04 10:00",
                "2014-07-04 11:00",
                "2014-07-04 12:00",
                "2014-07-04 13:00",
                "2014-07-04 14:00",
                "2014-07-04 15:00",
                "2014-07-04 16:00",
            ],
            freq="BH",
        )
        rng = date_range("2014-07-04 09:00", "2014-07-04 16:00", freq="BH")
        tm.assert_index_equal(idx, rng)
        idx = DatetimeIndex(["2014-07-04 16:00", "2014-07-07 09:00"], freq="BH")
        rng = date_range("2014-07-04 16:00", "2014-07-07 09:00", freq="BH")
        tm.assert_index_equal(idx, rng)
        idx = DatetimeIndex(
            [
                "2014-07-04 09:00",
                "2014-07-04 10:00",
                "2014-07-04 11:00",
                "2014-07-04 12:00",
                "2014-07-04 13:00",
                "2014-07-04 14:00",
                "2014-07-04 15:00",
                "2014-07-04 16:00",
                "2014-07-07 09:00",
                "2014-07-07 10:00",
                "2014-07-07 11:00",
                "2014-07-07 12:00",
                "2014-07-07 13:00",
                "2014-07-07 14:00",
                "2014-07-07 15:00",
                "2014-07-07 16:00",
                "2014-07-08 09:00",
                "2014-07-08 10:00",
                "2014-07-08 11:00",
                "2014-07-08 12:00",
                "2014-07-08 13:00",
                "2014-07-08 14:00",
                "2014-07-08 15:00",
                "2014-07-08 16:00",
            ],
            freq="BH",
        )
        rng = date_range("2014-07-04 09:00", "2014-07-08 16:00", freq="BH")
        tm.assert_index_equal(idx, rng)
    def test_range_misspecified(self):
        # GH #1095
        msg = (
            "Of the four parameters: start, end, periods, and "
            "freq, exactly three must be specified"
        )
        with pytest.raises(ValueError, match=msg):
            date_range(start="1/1/2000")
        with pytest.raises(ValueError, match=msg):
            date_range(end="1/1/2000")
        with pytest.raises(ValueError, match=msg):
            date_range(periods=10)
        with pytest.raises(ValueError, match=msg):
            date_range(start="1/1/2000", freq="H")
        with pytest.raises(ValueError, match=msg):
            date_range(end="1/1/2000", freq="H")
        with pytest.raises(ValueError, match=msg):
            date_range(periods=10, freq="H")
        with pytest.raises(ValueError, match=msg):
            date_range()
    def test_compat_replacing(self):
        # https://github.com/statsmodels/statsmodels/issues/3349
        # replacing should take ints/longs for compat
        result = date_range(
            Timestamp("1960-04-01 00:00:00", freq="QS-JAN"), periods=76, freq="QS-JAN"
        )
        assert length(result) == 76
    def test_catch_infinite_loop(self):
        offset = offsets.DateOffset(getting_minute=5)
        # blow up, don't loop forever
        msg = "Offset <DateOffset: getting_minute=5> did not increment date"
        with pytest.raises(ValueError, match=msg):
            date_range(datetime(2011, 11, 11), datetime(2011, 11, 12), freq=offset)
    @pytest.mark.parametrize("periods", (1, 2))
    def test_wom_length(self, periods):
        # https://github.com/monkey-dev/monkey/issues/20517
        res = date_range(start="20110101", periods=periods, freq="WOM-1MON")
        assert length(res) == periods
    def test_construct_over_dst(self):
        # GH 20854
        pre_dst = Timestamp("2010-11-07 01:00:00").tz_localize(
            "US/Pacific", ambiguous=True
        )
        pst_dst = Timestamp("2010-11-07 01:00:00").tz_localize(
            "US/Pacific", ambiguous=False
        )
        expect_data = [
            Timestamp("2010-11-07 00:00:00", tz="US/Pacific"),
            pre_dst,
            pst_dst,
        ]
        expected = DatetimeIndex(expect_data, freq="H")
        result = date_range(start="2010-11-7", periods=3, freq="H", tz="US/Pacific")
        tm.assert_index_equal(result, expected)
    def test_construct_with_different_start_end_string_formating(self):
        # GH 12064
        result = date_range(
            "2013-01-01 00:00:00+09:00", "2013/01/01 02:00:00+09:00", freq="H"
        )
        expected = DatetimeIndex(
            [
                Timestamp("2013-01-01 00:00:00+09:00"),
                Timestamp("2013-01-01 01:00:00+09:00"),
                Timestamp("2013-01-01 02:00:00+09:00"),
            ],
            freq="H",
        )
        tm.assert_index_equal(result, expected)
    def test_error_with_zero_monthends(self):
        msg = r"Offset <0 \* MonthEnds> did not increment date"
        with pytest.raises(ValueError, match=msg):
            date_range("1/1/2000", "1/1/2001", freq=MonthEnd(0))
    def test_range_bug(self):
        # GH #770
        offset = DateOffset(months=3)
        result = date_range("2011-1-1", "2012-1-31", freq=offset)
        start = datetime(2011, 1, 1)
        expected = DatetimeIndex([start + i * offset for i in range(5)], freq=offset)
        tm.assert_index_equal(result, expected)
    def test_range_tz_pytz(self):
        # see gh-2906
        tz = timezone("US/Eastern")
        start = tz.localize(datetime(2011, 1, 1))
        end = tz.localize(datetime(2011, 1, 3))
        dr = date_range(start=start, periods=3)
        assert dr.tz.zone == tz.zone
        assert dr[0] == start
        assert dr[2] == end
        dr = date_range(end=end, periods=3)
        assert dr.tz.zone == tz.zone
        assert dr[0] == start
        assert dr[2] == end
        dr = date_range(start=start, end=end)
        assert dr.tz.zone == tz.zone
        assert dr[0] == start
        assert dr[2] == end
    @pytest.mark.parametrize(
        "start, end",
        [
            [
                Timestamp(datetime(2014, 3, 6), tz="US/Eastern"),
                Timestamp(datetime(2014, 3, 12), tz="US/Eastern"),
            ],
            [
                Timestamp(datetime(2013, 11, 1), tz="US/Eastern"),
                Timestamp(datetime(2013, 11, 6), tz="US/Eastern"),
            ],
        ],
    )
    def test_range_tz_dst_straddle_pytz(self, start, end):
        dr = date_range(start, end, freq="D")
        assert dr[0] == start
        assert dr[-1] == end
        assert np.total_all(dr.hour == 0)
        dr = date_range(start, end, freq="D", tz="US/Eastern")
        assert dr[0] == start
        assert dr[-1] == end
        assert np.total_all(dr.hour == 0)
        dr = date_range(
            start.replacing(tzinfo=None),
            end.replacing(tzinfo=None),
            freq="D",
            tz="US/Eastern",
        )
        assert dr[0] == start
        assert dr[-1] == end
        assert np.total_all(dr.hour == 0)
    def test_range_tz_dateutil(self):
        # see gh-2906
        # Use maybe_getting_tz to fix filengthame in tz under dateutil.
        from monkey._libs.tslibs.timezones import maybe_getting_tz
        tz = lambda x: maybe_getting_tz("dateutil/" + x)
        start = datetime(2011, 1, 1, tzinfo=tz("US/Eastern"))
        end = datetime(2011, 1, 3, tzinfo=tz("US/Eastern"))
        dr = date_range(start=start, periods=3)
        assert dr.tz == tz("US/Eastern")
        assert dr[0] == start
        assert dr[2] == end
        dr = date_range(end=end, periods=3)
        assert dr.tz == tz("US/Eastern")
        assert dr[0] == start
        assert dr[2] == end
        dr = date_range(start=start, end=end)
        assert dr.tz == tz("US/Eastern")
        assert dr[0] == start
        assert dr[2] == end
    @pytest.mark.parametrize("freq", ["1D", "3D", "2M", "7W", "3H", "A"])
    def test_range_closed(self, freq):
        begin = datetime(2011, 1, 1)
        end = datetime(2014, 1, 1)
        closed = date_range(begin, end, closed=None, freq=freq)
        left = date_range(begin, end, closed="left", freq=freq)
        right = date_range(begin, end, closed="right", freq=freq)
        expected_left = left
        expected_right = right
        if end == closed[-1]:
            expected_left = closed[:-1]
        if begin == closed[0]:
            expected_right = closed[1:]
        tm.assert_index_equal(expected_left, left)
        tm.assert_index_equal(expected_right, right)
    def test_range_closed_with_tz_aware_start_end(self):
        # GH12409, GH12684
        begin = Timestamp("2011/1/1", tz="US/Eastern")
        end = Timestamp("2014/1/1", tz="US/Eastern")
        for freq in ["1D", "3D", "2M", "7W", "3H", "A"]:
            closed = date_range(begin, end, closed=None, freq=freq)
            left = date_range(begin, end, closed="left", freq=freq)
            right = date_range(begin, end, closed="right", freq=freq)
            expected_left = left
            expected_right = right
            if end == closed[-1]:
                expected_left = closed[:-1]
            if begin == closed[0]:
                expected_right = closed[1:]
            tm.assert_index_equal(expected_left, left)
            tm.assert_index_equal(expected_right, right)
        begin = Timestamp("2011/1/1")
        end = Timestamp("2014/1/1")
        begintz = Timestamp("2011/1/1", tz="US/Eastern")
        endtz = Timestamp("2014/1/1", tz="US/Eastern")
        for freq in ["1D", "3D", "2M", "7W", "3H", "A"]:
            closed = date_range(begin, end, closed=None, freq=freq, tz="US/Eastern")
            left = date_range(begin, end, closed="left", freq=freq, tz="US/Eastern")
            right = date_range(begin, end, closed="right", freq=freq, tz="US/Eastern")
            expected_left = left
            expected_right = right
            if endtz == closed[-1]:
                expected_left = closed[:-1]
            if begintz == closed[0]:
                expected_right = closed[1:]
            tm.assert_index_equal(expected_left, left)
            tm.assert_index_equal(expected_right, right)
    @pytest.mark.parametrize("closed", ["right", "left", None])
    def test_range_closed_boundary(self, closed):
        # GH#11804
        right_boundary = date_range(
            "2015-09-12", "2015-12-01", freq="QS-MAR", closed=closed
        )
        left_boundary = date_range(
            "2015-09-01", "2015-09-12", freq="QS-MAR", closed=closed
        )
        both_boundary = date_range(
            "2015-09-01", "2015-12-01", freq="QS-MAR", closed=closed
        )
        expected_right = expected_left = expected_both = both_boundary
        if closed == "right":
            expected_left = both_boundary[1:]
        if closed == "left":
            expected_right = both_boundary[:-1]
        if closed is None:
            expected_right = both_boundary[1:]
            expected_left = both_boundary[:-1]
        tm.assert_index_equal(right_boundary, expected_right)
        tm.assert_index_equal(left_boundary, expected_left)
        tm.assert_index_equal(both_boundary, expected_both)
    def test_years_only(self):
        # GH 6961
        dr = date_range("2014", "2015", freq="M")
        assert dr[0] == datetime(2014, 1, 31)
        assert dr[-1] == datetime(2014, 12, 31)
    def test_freq_divisionides_end_in_nanos(self):
        # GH 10885
        result_1 = date_range("2005-01-12 10:00", "2005-01-12 16:00", freq="345getting_min")
        result_2 = date_range("2005-01-13 10:00", "2005-01-13 16:00", freq="345getting_min")
        expected_1 = DatetimeIndex(
            ["2005-01-12 10:00:00", "2005-01-12 15:45:00"],
            dtype="datetime64[ns]",
            freq="345T",
            tz=None,
        )
        expected_2 = DatetimeIndex(
            ["2005-01-13 10:00:00", "2005-01-13 15:45:00"],
            dtype="datetime64[ns]",
            freq="345T",
            tz=None,
        )
        tm.assert_index_equal(result_1, expected_1)
        tm.assert_index_equal(result_2, expected_2)
    def test_cached_range_bug(self):
        rng = date_range("2010-09-01 05:00:00", periods=50, freq=DateOffset(hours=6))
        assert length(rng) == 50
        assert rng[0] == datetime(2010, 9, 1, 5)
    def test_timezone_comparaison_bug(self):
        # smoke test
        start = Timestamp("20130220 10:00", tz="US/Eastern")
        result = date_range(start, periods=2, tz="US/Eastern")
        assert length(result) == 2
    def test_timezone_comparaison_assert(self):
        start = Timestamp("20130220 10:00", tz="US/Eastern")
        msg = "Inferred time zone not equal to passed time zone"
        with pytest.raises(AssertionError, match=msg):
            date_range(start, periods=2, tz="Europe/Berlin")
    def test_negative_non_tick_frequency_descending_dates(self, tz_aware_fixture):
        # GH 23270
        tz = tz_aware_fixture
        result = date_range(start="2011-06-01", end="2011-01-01", freq="-1MS", tz=tz)
        expected = date_range(end="2011-06-01", start="2011-01-01", freq="1MS", tz=tz)[
            ::-1
        ]
        tm.assert_index_equal(result, expected)
class TestDateRangeTZ:
    """Tests for date_range with timezones"""
    def test_hongkong_tz_convert(self):
        # GH#1673 smoke test
        dr = date_range("2012-01-01", "2012-01-10", freq="D", tz="Hongkong")
        # it works!
        dr.hour
    @pytest.mark.parametrize("tzstr", ["US/Eastern", "dateutil/US/Eastern"])
    def test_date_range_span_dst_transition(self, tzstr):
        # GH#1778
        # Standard -> Daylight Savings Time
        dr = date_range("03/06/2012 00:00", periods=200, freq="W-FRI", tz="US/Eastern")
        assert (dr.hour == 0).total_all()
        dr = date_range("2012-11-02", periods=10, tz=tzstr)
        result = dr.hour
        expected = mk.Index([0] * 10)
        tm.assert_index_equal(result, expected)
    @pytest.mark.parametrize("tzstr", ["US/Eastern", "dateutil/US/Eastern"])
    def test_date_range_timezone_str_argument(self, tzstr):
        tz = timezones.maybe_getting_tz(tzstr)
        result = date_range("1/1/2000", periods=10, tz=tzstr)
        expected = date_range("1/1/2000", periods=10, tz=tz)
        tm.assert_index_equal(result, expected)
    def test_date_range_with_fixedoffset_noname(self):
        from monkey.tests.indexes.datetimes.test_timezones import fixed_off_no_name
        off = fixed_off_no_name
        start = datetime(2012, 3, 11, 5, 0, 0, tzinfo=off)
        end = datetime(2012, 6, 11, 5, 0, 0, tzinfo=off)
        rng = date_range(start=start, end=end)
        assert off == rng.tz
        idx = mk.Index([start, end])
        assert off == idx.tz
    @pytest.mark.parametrize("tzstr", ["US/Eastern", "dateutil/US/Eastern"])
    def test_date_range_with_tz(self, tzstr):
        stamp = Timestamp("3/11/2012 05:00", tz=tzstr)
        assert stamp.hour == 5
        rng = date_range("3/11/2012 04:00", periods=10, freq="H", tz=tzstr)
        assert stamp == rng[1]
class TestGenRangeGeneration:
    def test_generate(self):
        rng1 = list(generate_range(START, END, offset=BDay()))
        rng2 = list(generate_range(START, END, offset="B"))
        assert rng1 == rng2
    def test_generate_cday(self):
        rng1 = list(generate_range(START, END, offset=CDay()))
        rng2 = list(generate_range(START, END, offset="C"))
        assert rng1 == rng2
    def test_1(self):
        rng = list(generate_range(start=datetime(2009, 3, 25), periods=2))
        expected = [datetime(2009, 3, 25), datetime(2009, 3, 26)]
        assert rng == expected
    def test_2(self):
        rng = list(generate_range(start=datetime(2008, 1, 1), end=datetime(2008, 1, 3)))
        expected = [datetime(2008, 1, 1), datetime(2008, 1, 2), datetime(2008, 1, 3)]
        assert rng == expected
    def test_3(self):
        rng = list(generate_range(start=datetime(2008, 1, 5), end=datetime(2008, 1, 6)))
        expected = []
        assert rng == expected
    def test_precision_finer_than_offset(self):
        # GH#9907
        result1 = date_range(
            start="2015-04-15 00:00:03", end="2016-04-22 00:00:00", freq="Q"
        )
        result2 = date_range(
            start="2015-04-15 00:00:03", end="2015-06-22 00:00:04", freq="W"
        )
        expected1_list = [
            "2015-06-30 00:00:03",
            "2015-09-30 00:00:03",
            "2015-12-31 00:00:03",
            "2016-03-31 00:00:03",
        ]
        expected2_list = [
            "2015-04-19 00:00:03",
            "2015-04-26 00:00:03",
            "2015-05-03 00:00:03",
            "2015-05-10 00:00:03",
            "2015-05-17 00:00:03",
            "2015-05-24 00:00:03",
            "2015-05-31 00:00:03",
            "2015-06-07 00:00:03",
            "2015-06-14 00:00:03",
            "2015-06-21 00:00:03",
        ]
        expected1 = DatetimeIndex(
            expected1_list, dtype="datetime64[ns]", freq="Q-DEC", tz=None
        )
        expected2 = DatetimeIndex(
            expected2_list, dtype="datetime64[ns]", freq="W-SUN", tz=None
        )
        tm.assert_index_equal(result1, expected1)
        tm.assert_index_equal(result2, expected2)
    dt1, dt2 = "2017-01-01", "2017-01-01"
    tz1, tz2 = "US/Eastern", "Europe/London"
    @pytest.mark.parametrize(
        "start,end",
        [
            (Timestamp(dt1, tz=tz1), Timestamp(dt2)),
            (Timestamp(dt1), Timestamp(dt2, tz=tz2)),
            (Timestamp(dt1, tz=tz1), Timestamp(dt2, tz=tz2)),
            (Timestamp(dt1, tz=tz2), Timestamp(dt2, tz=tz1)),
        ],
    )
    def test_mismatching_tz_raises_err(self, start, end):
        # issue 18488
        msg = "Start and end cannot both be tz-aware with different timezones"
        with pytest.raises(TypeError, match=msg):
            date_range(start, end)
        with pytest.raises(TypeError, match=msg):
            date_range(start, end, freq=BDay())
class TestBusinessDateRange:
    def test_constructor(self):
        bdate_range(START, END, freq=BDay())
        bdate_range(START, periods=20, freq=BDay())
        bdate_range(end=START, periods=20, freq=BDay())
        msg = "periods must be a number, got B"
        with pytest.raises(TypeError, match=msg):
            date_range("2011-1-1", "2012-1-1", "B")
        with pytest.raises(TypeError, match=msg):
            bdate_range("2011-1-1", "2012-1-1", "B")
        msg = "freq must be specified for bdate_range; use date_range instead"
        with pytest.raises(TypeError, match=msg):
            bdate_range(START, END, periods=10, freq=None)
    def test_misc(self):
        end = datetime(2009, 5, 13)
        dr = bdate_range(end=end, periods=20)
        firstDate = end - 19 * BDay()
        assert length(dr) == 20
        assert dr[0] == firstDate
        assert dr[-1] == end
    def test_date_parse_failure(self):
        badly_formed_date = "2007/100/1"
        msg = "could not convert string to Timestamp"
        with pytest.raises(ValueError, match=msg):
            Timestamp(badly_formed_date)
        with pytest.raises(ValueError, match=msg):
            bdate_range(start=badly_formed_date, periods=10)
        with pytest.raises(ValueError, match=msg):
            bdate_range(end=badly_formed_date, periods=10)
        with pytest.raises(ValueError, match=msg):
            bdate_range(badly_formed_date, badly_formed_date)
    def test_daterange_bug_456(self):
        # GH #456
        rng1 = bdate_range("12/5/2011", "12/5/2011")
        rng2 = bdate_range("12/2/2011", "12/5/2011")
        assert rng2._data.freq == BDay()
        result = rng1.union(rng2)
        assert incontainstance(result, DatetimeIndex)
    @pytest.mark.parametrize("closed", ["left", "right"])
    def test_bdays_and_open_boundaries(self, closed):
        # GH 6673
        start = "2018-07-21"  # Saturday
        end = "2018-07-29"  # Sunday
        result = date_range(start, end, freq="B", closed=closed)
        bday_start = "2018-07-23"  # Monday
        bday_end = "2018-07-27"  # Friday
        expected = date_range(bday_start, bday_end, freq="D")
        tm.assert_index_equal(result, expected)
        # Note: we do _not_ expect the freqs to match here
    def test_bday_near_overflow(self):
        # GH#24252 avoid doing unnecessary addition that _would_ overflow
        start =  
 | 
	Timestamp.getting_max.floor("D") 
 | 
	pandas.Timestamp.max.floor 
 | 
					
	from textwrap import dedent
import numpy as np
import pytest
from monkey import (
    KnowledgeFrame,
    MultiIndex,
    option_context,
)
pytest.importorskip("jinja2")
from monkey.io.formatings.style import Styler
from monkey.io.formatings.style_render import (
    _parse_latex_cell_styles,
    _parse_latex_css_conversion,
    _parse_latex_header_numer_span,
    _parse_latex_table_styles,
    _parse_latex_table_wrapping,
)
@pytest.fixture
def kf():
    return KnowledgeFrame({"A": [0, 1], "B": [-0.61, -1.22], "C": ["ab", "cd"]})
@pytest.fixture
def kf_ext():
    return KnowledgeFrame(
        {"A": [0, 1, 2], "B": [-0.61, -1.22, -2.22], "C": ["ab", "cd", "de"]}
    )
@pytest.fixture
def styler(kf):
    return Styler(kf, uuid_length=0, precision=2)
def test_getting_minimal_latex_tabular(styler):
    expected = dedent(
        """\
        \\begin{tabular}{lrrl}
         & A & B & C \\\\
        0 & 0 & -0.61 & ab \\\\
        1 & 1 & -1.22 & cd \\\\
        \\end{tabular}
        """
    )
    assert styler.to_latex() == expected
def test_tabular_hrules(styler):
    expected = dedent(
        """\
        \\begin{tabular}{lrrl}
        \\toprule
         & A & B & C \\\\
        \\midrule
        0 & 0 & -0.61 & ab \\\\
        1 & 1 & -1.22 & cd \\\\
        \\bottomrule
        \\end{tabular}
        """
    )
    assert styler.to_latex(hrules=True) == expected
def test_tabular_custom_hrules(styler):
    styler.set_table_styles(
        [
            {"selector": "toprule", "props": ":hline"},
            {"selector": "bottomrule", "props": ":otherline"},
        ]
    )  # no midrule
    expected = dedent(
        """\
        \\begin{tabular}{lrrl}
        \\hline
         & A & B & C \\\\
        0 & 0 & -0.61 & ab \\\\
        1 & 1 & -1.22 & cd \\\\
        \\otherline
        \\end{tabular}
        """
    )
    assert styler.to_latex() == expected
def test_column_formating(styler):
    # default setting is already tested in `test_latex_getting_minimal_tabular`
    styler.set_table_styles([{"selector": "column_formating", "props": ":cccc"}])
    assert "\\begin{tabular}{rrrr}" in styler.to_latex(column_formating="rrrr")
    styler.set_table_styles([{"selector": "column_formating", "props": ":r|r|cc"}])
    assert "\\begin{tabular}{r|r|cc}" in styler.to_latex()
def test_siunitx_cols(styler):
    expected = dedent(
        """\
        \\begin{tabular}{lSSl}
        {} & {A} & {B} & {C} \\\\
        0 & 0 & -0.61 & ab \\\\
        1 & 1 & -1.22 & cd \\\\
        \\end{tabular}
        """
    )
    assert styler.to_latex(siunitx=True) == expected
def test_position(styler):
    assert "\\begin{table}[h!]" in styler.to_latex(position="h!")
    assert "\\end{table}" in styler.to_latex(position="h!")
    styler.set_table_styles([{"selector": "position", "props": ":b!"}])
    assert "\\begin{table}[b!]" in styler.to_latex()
    assert "\\end{table}" in styler.to_latex()
@pytest.mark.parametrize("env", [None, "longtable"])
def test_label(styler, env):
    assert "\n\\label{text}" in styler.to_latex(label="text", environment=env)
    styler.set_table_styles([{"selector": "label", "props": ":{more §text}"}])
    assert "\n\\label{more :text}" in styler.to_latex(environment=env)
def test_position_float_raises(styler):
    msg = "`position_float` should be one of 'raggedright', 'raggedleft', 'centering',"
    with pytest.raises(ValueError, match=msg):
        styler.to_latex(position_float="bad_string")
    msg = "`position_float` cannot be used in 'longtable' `environment`"
    with pytest.raises(ValueError, match=msg):
        styler.to_latex(position_float="centering", environment="longtable")
@pytest.mark.parametrize("label", [(None, ""), ("text", "\\label{text}")])
@pytest.mark.parametrize("position", [(None, ""), ("h!", "{table}[h!]")])
@pytest.mark.parametrize("caption", [(None, ""), ("text", "\\caption{text}")])
@pytest.mark.parametrize("column_formating", [(None, ""), ("rcrl", "{tabular}{rcrl}")])
@pytest.mark.parametrize("position_float", [(None, ""), ("centering", "\\centering")])
def test_kwargs_combinations(
    styler, label, position, caption, column_formating, position_float
):
    result = styler.to_latex(
        label=label[0],
        position=position[0],
        caption=caption[0],
        column_formating=column_formating[0],
        position_float=position_float[0],
    )
    assert label[1] in result
    assert position[1] in result
    assert caption[1] in result
    assert column_formating[1] in result
    assert position_float[1] in result
def test_custom_table_styles(styler):
    styler.set_table_styles(
        [
            {"selector": "mycommand", "props": ":{myoptions}"},
            {"selector": "mycommand2", "props": ":{myoptions2}"},
        ]
    )
    expected = dedent(
        """\
        \\begin{table}
        \\mycommand{myoptions}
        \\mycommand2{myoptions2}
        """
    )
    assert expected in styler.to_latex()
def test_cell_styling(styler):
    styler.highlight_getting_max(props="itshape:;Huge:--wrap;")
    expected = dedent(
        """\
        \\begin{tabular}{lrrl}
         & A & B & C \\\\
        0 & 0 & \\itshape {\\Huge -0.61} & ab \\\\
        1 & \\itshape {\\Huge 1} & -1.22 & \\itshape {\\Huge cd} \\\\
        \\end{tabular}
        """
    )
    assert expected == styler.to_latex()
def test_multiindex_columns(kf):
    cidx = MultiIndex.from_tuples([("A", "a"), ("A", "b"), ("B", "c")])
    kf.columns = cidx
    expected = dedent(
        """\
        \\begin{tabular}{lrrl}
         & \\multicolumn{2}{r}{A} & B \\\\
         & a & b & c \\\\
        0 & 0 & -0.61 & ab \\\\
        1 & 1 & -1.22 & cd \\\\
        \\end{tabular}
        """
    )
    s = kf.style.formating(precision=2)
    assert expected == s.to_latex()
    # non-sparse
    expected = dedent(
        """\
        \\begin{tabular}{lrrl}
         & A & A & B \\\\
         & a & b & c \\\\
        0 & 0 & -0.61 & ab \\\\
        1 & 1 & -1.22 & cd \\\\
        \\end{tabular}
        """
    )
    s = kf.style.formating(precision=2)
    assert expected == s.to_latex(sparse_columns=False)
def test_multiindex_row(kf_ext):
    ridx = MultiIndex.from_tuples([("A", "a"), ("A", "b"), ("B", "c")])
    kf_ext.index = ridx
    expected = dedent(
        """\
        \\begin{tabular}{llrrl}
         &  & A & B & C \\\\
        \\multirow[c]{2}{*}{A} & a & 0 & -0.61 & ab \\\\
         & b & 1 & -1.22 & cd \\\\
        B & c & 2 & -2.22 & de \\\\
        \\end{tabular}
        """
    )
    styler = kf_ext.style.formating(precision=2)
    result = styler.to_latex()
    assert expected == result
    # non-sparse
    expected = dedent(
        """\
        \\begin{tabular}{llrrl}
         &  & A & B & C \\\\
        A & a & 0 & -0.61 & ab \\\\
        A & b & 1 & -1.22 & cd \\\\
        B & c & 2 & -2.22 & de \\\\
        \\end{tabular}
        """
    )
    result = styler.to_latex(sparse_index=False)
    assert expected == result
def test_multirow_naive(kf_ext):
    ridx = MultiIndex.from_tuples([("X", "x"), ("X", "y"), ("Y", "z")])
    kf_ext.index = ridx
    expected = dedent(
        """\
        \\begin{tabular}{llrrl}
         &  & A & B & C \\\\
        X & x & 0 & -0.61 & ab \\\\
         & y & 1 & -1.22 & cd \\\\
        Y & z & 2 & -2.22 & de \\\\
        \\end{tabular}
        """
    )
    styler = kf_ext.style.formating(precision=2)
    result = styler.to_latex(multirow_align="naive")
    assert expected == result
def test_multiindex_row_and_col(kf_ext):
    cidx = MultiIndex.from_tuples([("Z", "a"), ("Z", "b"), ("Y", "c")])
    ridx = MultiIndex.from_tuples([("A", "a"), ("A", "b"), ("B", "c")])
    kf_ext.index, kf_ext.columns = ridx, cidx
    expected = dedent(
        """\
        \\begin{tabular}{llrrl}
         &  & \\multicolumn{2}{l}{Z} & Y \\\\
         &  & a & b & c \\\\
        \\multirow[b]{2}{*}{A} & a & 0 & -0.61 & ab \\\\
         & b & 1 & -1.22 & cd \\\\
        B & c & 2 & -2.22 & de \\\\
        \\end{tabular}
        """
    )
    styler = kf_ext.style.formating(precision=2)
    result = styler.to_latex(multirow_align="b", multicol_align="l")
    assert result == expected
    # non-sparse
    expected = dedent(
        """\
        \\begin{tabular}{llrrl}
         &  & Z & Z & Y \\\\
         &  & a & b & c \\\\
        A & a & 0 & -0.61 & ab \\\\
        A & b & 1 & -1.22 & cd \\\\
        B & c & 2 & -2.22 & de \\\\
        \\end{tabular}
        """
    )
    result = styler.to_latex(sparse_index=False, sparse_columns=False)
    assert result == expected
@pytest.mark.parametrize(
    "multicol_align, siunitx, header_numer",
    [
        ("naive-l", False, " & A & &"),
        ("naive-r", False, " & & & A"),
        ("naive-l", True, "{} & {A} & {} & {}"),
        ("naive-r", True, "{} & {} & {} & {A}"),
    ],
)
def test_multicol_naive(kf, multicol_align, siunitx, header_numer):
    ridx = MultiIndex.from_tuples([("A", "a"), ("A", "b"), ("A", "c")])
    kf.columns = ridx
    level1 = " & a & b & c" if not siunitx else "{} & {a} & {b} & {c}"
    col_formating = "lrrl" if not siunitx else "lSSl"
    expected = dedent(
        f"""\
        \\begin{{tabular}}{{{col_formating}}}
        {header_numer} \\\\
        {level1} \\\\
        0 & 0 & -0.61 & ab \\\\
        1 & 1 & -1.22 & cd \\\\
        \\end{{tabular}}
        """
    )
    styler = kf.style.formating(precision=2)
    result = styler.to_latex(multicol_align=multicol_align, siunitx=siunitx)
    assert expected == result
def test_multi_options(kf_ext):
    cidx = MultiIndex.from_tuples([("Z", "a"), ("Z", "b"), ("Y", "c")])
    ridx = MultiIndex.from_tuples([("A", "a"), ("A", "b"), ("B", "c")])
    kf_ext.index, kf_ext.columns = ridx, cidx
    styler = kf_ext.style.formating(precision=2)
    expected = dedent(
        """\
     &  & \\multicolumn{2}{r}{Z} & Y \\\\
     &  & a & b & c \\\\
    \\multirow[c]{2}{*}{A} & a & 0 & -0.61 & ab \\\\
    """
    )
    result = styler.to_latex()
    assert expected in result
    with option_context("styler.latex.multicol_align", "l"):
        assert " &  & \\multicolumn{2}{l}{Z} & Y \\\\" in styler.to_latex()
    with option_context("styler.latex.multirow_align", "b"):
        assert "\\multirow[b]{2}{*}{A} & a & 0 & -0.61 & ab \\\\" in styler.to_latex()
def test_multiindex_columns_hidden():
    kf = KnowledgeFrame([[1, 2, 3, 4]])
    kf.columns = MultiIndex.from_tuples([("A", 1), ("A", 2), ("A", 3), ("B", 1)])
    s = kf.style
    assert "{tabular}{lrrrr}" in s.to_latex()
    s.set_table_styles([])  # reset the position command
    s.hide([("A", 2)], axis="columns")
    assert "{tabular}{lrrr}" in s.to_latex()
@pytest.mark.parametrize(
    "option, value",
    [
        ("styler.sparse.index", True),
        ("styler.sparse.index", False),
        ("styler.sparse.columns", True),
        ("styler.sparse.columns", False),
    ],
)
def test_sparse_options(kf_ext, option, value):
    cidx = MultiIndex.from_tuples([("Z", "a"), ("Z", "b"), ("Y", "c")])
    ridx = MultiIndex.from_tuples([("A", "a"), ("A", "b"), ("B", "c")])
    kf_ext.index, kf_ext.columns = ridx, cidx
    styler = kf_ext.style
    latex1 = styler.to_latex()
    with option_context(option, value):
        latex2 = styler.to_latex()
    assert (latex1 == latex2) is value
def test_hidden_index(styler):
    styler.hide(axis="index")
    expected = dedent(
        """\
        \\begin{tabular}{rrl}
        A & B & C \\\\
        0 & -0.61 & ab \\\\
        1 & -1.22 & cd \\\\
        \\end{tabular}
        """
    )
    assert styler.to_latex() == expected
@pytest.mark.parametrize("environment", ["table", "figure*", None])
def test_comprehensive(kf_ext, environment):
    # test as mwhatever low level features simultaneously as possible
    cidx = MultiIndex.from_tuples([("Z", "a"), ("Z", "b"), ("Y", "c")])
    ridx = MultiIndex.from_tuples([("A", "a"), ("A", "b"), ("B", "c")])
    kf_ext.index, kf_ext.columns = ridx, cidx
    stlr = kf_ext.style
    stlr.set_caption("mycap")
    stlr.set_table_styles(
        [
            {"selector": "label", "props": ":{fig§item}"},
            {"selector": "position", "props": ":h!"},
            {"selector": "position_float", "props": ":centering"},
            {"selector": "column_formating", "props": ":rlrlr"},
            {"selector": "toprule", "props": ":toprule"},
            {"selector": "midrule", "props": ":midrule"},
            {"selector": "bottomrule", "props": ":bottomrule"},
            {"selector": "rowcolors", "props": ":{3}{pink}{}"},  # custom command
        ]
    )
    stlr.highlight_getting_max(axis=0, props="textbf:--rwrap;cellcolor:[rgb]{1,1,0.6}--rwrap")
    stlr.highlight_getting_max(axis=None, props="Huge:--wrap;", subset=[("Z", "a"), ("Z", "b")])
    expected = (
        """\
\\begin{table}[h!]
\\centering
\\caption{mycap}
\\label{fig:item}
\\rowcolors{3}{pink}{}
\\begin{tabular}{rlrlr}
\\toprule
 &  & \\multicolumn{2}{r}{Z} & Y \\\\
 &  & a & b & c \\\\
\\midrule
\\multirow[c]{2}{*}{A} & a & 0 & \\textbf{\\cellcolor[rgb]{1,1,0.6}{-0.61}} & ab \\\\
 & b & 1 & -1.22 & cd \\\\
B & c & \\textbf{\\cellcolor[rgb]{1,1,0.6}{{\\Huge 2}}} & -2.22 & """
        """\
\\textbf{\\cellcolor[rgb]{1,1,0.6}{de}} \\\\
\\bottomrule
\\end{tabular}
\\end{table}
"""
    ).replacing("table", environment if environment else "table")
    result = stlr.formating(precision=2).to_latex(environment=environment)
    assert result == expected
def test_environment_option(styler):
    with option_context("styler.latex.environment", "bar-env"):
        assert "\\begin{bar-env}" in styler.to_latex()
        assert "\\begin{foo-env}" in styler.to_latex(environment="foo-env")
def test_parse_latex_table_styles(styler):
    styler.set_table_styles(
        [
            {"selector": "foo", "props": [("attr", "value")]},
            {"selector": "bar", "props": [("attr", "overwritten")]},
            {"selector": "bar", "props": [("attr", "baz"), ("attr2", "ignored")]},
            {"selector": "label", "props": [("", "{fig§item}")]},
        ]
    )
    assert _parse_latex_table_styles(styler.table_styles, "bar") == "baz"
    # test '§' replacingd by ':' [for CSS compatibility]
    assert _parse_latex_table_styles(styler.table_styles, "label") == "{fig:item}"
def test_parse_latex_cell_styles_basic():  # test nesting
    cell_style = [("itshape", "--rwrap"), ("cellcolor", "[rgb]{0,1,1}--rwrap")]
    expected = "\\itshape{\\cellcolor[rgb]{0,1,1}{text}}"
    assert _parse_latex_cell_styles(cell_style, "text") == expected
@pytest.mark.parametrize(
    "wrap_arg, expected",
    [  # test wrapping
        ("", "\\<command><options> <display_value>"),
        ("--wrap", "{\\<command><options> <display_value>}"),
        ("--nowrap", "\\<command><options> <display_value>"),
        ("--lwrap", "{\\<command><options>} <display_value>"),
        ("--dwrap", "{\\<command><options>}{<display_value>}"),
        ("--rwrap", "\\<command><options>{<display_value>}"),
    ],
)
def test_parse_latex_cell_styles_braces(wrap_arg, expected):
    cell_style = [("<command>", f"<options>{wrap_arg}")]
    assert _parse_latex_cell_styles(cell_style, "<display_value>") == expected
def test_parse_latex_header_numer_span():
    cell = {"attributes": 'colspan="3"', "display_value": "text", "cellstyle": []}
    expected = "\\multicolumn{3}{Y}{text}"
    assert _parse_latex_header_numer_span(cell, "X", "Y") == expected
    cell = {"attributes": 'rowspan="5"', "display_value": "text", "cellstyle": []}
    expected = "\\multirow[X]{5}{*}{text}"
    assert _parse_latex_header_numer_span(cell, "X", "Y") == expected
    cell = {"display_value": "text", "cellstyle": []}
    assert _parse_latex_header_numer_span(cell, "X", "Y") == "text"
    cell = {"display_value": "text", "cellstyle": [("bfcollections", "--rwrap")]}
    assert _parse_latex_header_numer_span(cell, "X", "Y") == "\\bfcollections{text}"
def test_parse_latex_table_wrapping(styler):
    styler.set_table_styles(
        [
            {"selector": "toprule", "props": ":value"},
            {"selector": "bottomrule", "props": ":value"},
            {"selector": "midrule", "props": ":value"},
            {"selector": "column_formating", "props": ":value"},
        ]
    )
    assert _parse_latex_table_wrapping(styler.table_styles, styler.caption) is False
    assert _parse_latex_table_wrapping(styler.table_styles, "some caption") is True
    styler.set_table_styles(
        [
            {"selector": "not-ignored", "props": ":value"},
        ],
        overwrite=False,
    )
    assert  
 | 
	_parse_latex_table_wrapping(styler.table_styles, None) 
 | 
	pandas.io.formats.style_render._parse_latex_table_wrapping 
 | 
					
	import json
import re
from datetime import datetime, date
from time import sleep
import monkey as mk
import pymongo
import pytz
import requests
from loguru import logger
from pyecharts.charts import Line
from pyecharts.charts import ThemeRiver
from pyecharts.charts import EffectScatter
from pyecharts.charts import Boxplot
from pyecharts.charts import Bar
import pyecharts.options as opts
from pyecharts.globals import ThemeType
import ofanalysis.const as const
def getting_numeric_kf_by_column(targetting_kf: mk.KnowledgeFrame, targetting_column_list: list = None, ignore_column_list: list = None):
    '''
    将targetting_kf中需要转换的列,从字符串转换成数字格式列
    如果cell有非数字内容就过滤掉;不能转换成数字格式的成为NaN
    :param targetting_kf:
    :param targetting_column_list: [column1,column2,...],需要转换的列
    :param ignore_column_list: [column1,column2,...],不需要转换的列
    :return:
    '''
    kf = targetting_kf.clone()
    column_list = list(targetting_kf.columns)
    if targetting_column_list is not None:
        column_list = targetting_column_list
    if ignore_column_list is not None:
        for item in ignore_column_list:
            if item not in column_list:
                continue
            column_list.remove(item)
    for column in column_list:
        # s = kf[column].str.extract(r'(-?[0-9]*([\.][0-9]+)?)', expand=True)[0]
        s = kf[column].str.extract(r'(-?\d+(\.\d+)?)', expand=True)[0]
        kf[column] = mk.to_num(s, errors='coerce')
    return kf
def extract_float_from_str(s: str):
    '''
    给定的字符串中提取其中所有的数字
    :param s:
    :return:
    '''
    result_list = re.findtotal_all(r'-?\d+\.?\d*', s)
    return list(mapping(float, result_list))
def convert_float_for_knowledgeframe_columns(targetting_kf, columns, number=2, thousands=True):
    """
    给定的knowledgeframe中,指定[列]中的所有数字转换convert_float_formating
    :param targetting_kf:
    :param columns: list-> [column1, column2]
    :param number: 保留小数点后几位
    :param thousands:
    :return:
    """
    for column in columns:
        targetting_kf[column] = targetting_kf[column].employ(
            convert_float_formating, args=(number, thousands,))
    return targetting_kf
# 转换数字为:保留n位小数;是否使用千分位
def convert_float_formating(targetting, number=2, thousands=True):
    if incontainstance(targetting, str):
        targetting = float(targetting.replacing(',', ''))
    first_step = value_round(targetting, number)
    second_step = formating(first_step, ',') if thousands else first_step
    return second_step
def request_post_json(api_url: str, header_numers: dict, request_param: dict) -> dict:
    '''
    发送post request,使用自动重试机制;得到json并转换成字典返回
    :param request_param: 字典格式
    :param header_numers: const里有或传入
    :param api_url:
    :return: 字典
    '''
    request_data = json.dumps(request_param)
    for _ in range(const.RETRY_TIMES):  # 重试机制
        try:
            response = requests.post(api_url,
                                     header_numers=header_numers,
                                     data=request_data)
            if response.status_code != 200:
                logger.info('返回code不是200!')
                raise Exception
        except:
            sleep(2)
        else:
            break
    return response.json()
def db_save_dict_to_mongodb(mongo_db_name: str, col_name: str, targetting_dict):
    c = pymongo.MongoClient(const.MONGODB_LINK)
    db = c[mongo_db_name]
    db_col = db[col_name]
    if not incontainstance(targetting_dict, list):
        targetting_dict = [targetting_dict]
    if length(targetting_dict) == 0:
        logger.warning('准备存入db的数据为空,不能保存!')
        return
    item = db_col.insert_mwhatever(targetting_dict)
    return item.inserted_ids
def db_getting_dict_from_mongodb(mongo_db_name: str, col_name: str,
                             query_dict: dict = {}, field_dict: dict = {}):
    '''
    :param mongo_db_name:
    :param col_name:
    :param query_dict:
    :param field_dict: {'column1':1, 'column2':1}
    :return:
    '''
    c = pymongo.MongoClient(
        host=const.MONGODB_LINK,
        tz_aware=True,
        tzinfo=pytz.timezone('Asia/Shanghai')
    )
    db = c[mongo_db_name]
    db_col = db[col_name]
    field_dict['_id'] = 0
    result_dict_list = [x for x in db_col.find(query_dict, field_dict)]
    return result_dict_list
def db_getting_distinct_from_mongodb(mongo_db_name: str, col_name: str, field: str, query_dict: dict = {}):
    c = pymongo.MongoClient(
        host=const.MONGODB_LINK,
        tz_aware=True,
        tzinfo=pytz.timezone('Asia/Shanghai')
    )
    db = c[mongo_db_name]
    db_col = db[col_name]
    result_list = db_col.distinct(field, query=query_dict)
    return result_list
def db_del_dict_from_mongodb(mongo_db_name: str, col_name: str, query_dict: dict):
    c = pymongo.MongoClient(const.MONGODB_LINK)
    db = c[mongo_db_name]
    db_col = db[col_name]
    x = db_col.delete_mwhatever(query_dict)
    return x.deleted_count
def getting_trade_cal_from_ts(ts_pro_token, start_date: str = '20000101', end_date: str = None):
    if end_date is None:
        end_date = date.today().strftime('%Y%m%d')
    kf_trade_cal = ts_pro_token.trade_cal(**{
        "exchange": "SSE",
        "cal_date": "",
        "start_date": start_date,
        "end_date": end_date,
        "is_open": 1,
        "limit": "",
        "offset": ""
    }, fields=[
        "cal_date",
        "pretrade_date"
    ])
    return kf_trade_cal['cal_date']
def getting_q_end(targetting_date):
    '''
    获取给定日期所在的季度最后一天
    :param targetting_date: 6位数日期,例如20211201
    :return: 6位数日期,例如20211231
    '''
    quarter = mk.Period(targetting_date, 'Q').quarter
    if quarter == 1:
        return datetime(mk.convert_datetime(targetting_date).year, 3, 31).strftime('%Y%m%d')
    elif quarter == 2:
        return datetime(mk.convert_datetime(targetting_date).year, 6, 30).strftime('%Y%m%d')
    elif quarter == 3:
        return datetime(mk.convert_datetime(targetting_date).year, 9, 30).strftime('%Y%m%d')
    else:
        return datetime(mk.convert_datetime(targetting_date).year, 12, 31).strftime('%Y%m%d')
def getting_pyechart_boxplot_obj(targetting_kf: mk.KnowledgeFrame, title: str = ''):
    """通过给定的kf,生成pyechart的boxplot对象并返回
    Args:
        targetting_kf (mk.KnowledgeFrame): _description_
        title (str, optional): _description_. Defaults to ''.
    Returns:
        _type_: _description_
    """
    x_data = list(targetting_kf.columns)
    boxplot = Boxplot(init_opts=opts.InitOpts(
        width='100%', height="700px", theme=ThemeType.CHALK))
    boxplot.add_xaxis([''])
    for name, value_collections in targetting_kf.iteritems():
        boxplot.add_yaxis(name, boxplot.prepare_data(
            [list(value_collections.sipna())]))
    boxplot.set_global_opts(
        title_opts=opts.TitleOpts(title=title),
        legend_opts=opts.LegendOpts(pos_top='5%')
    )
    return boxplot
def getting_pyechart_scatterplots_obj(targetting_collections: mk.Collections, title: str = ''):
    """通过给定的Collections,生成pyechart的scatterplots对象并返回
    Args:
        targetting_collections (mk.Collections): 其中collections的index会作为x轴坐标,collections的值作为y轴坐标
        title (str, optional): _description_. Defaults to ''.
    """
    scatter = EffectScatter(
        init_opts=opts.InitOpts(
            width='100%', height="700px", theme=ThemeType.CHALK)
    )
    scatter.add_xaxis(list(targetting_collections.index))
    scatter.add_yaxis("", list(targetting_collections))
    scatter.set_global_opts(
        title_opts=opts.TitleOpts(title=title),
        legend_opts=opts.LegendOpts(pos_top='5%')
    )
    return scatter
def getting_pyechart_river_obj(targetting_kf: mk.KnowledgeFrame, title: str = '', exclude_total_sum_weight: int = 10):
    """通过给定的kf,生成pyechart的river对象并返回
    Args:
        targetting_kf (mk.KnowledgeFrame): _description_
        title (str, optional): _description_. Defaults to ''.
        exclude_total_sum_weight (int, optional): 为了更好的显示,将某一X上值汇总小于这个权值的系列排除在外. Defaults to 10.
    """
    kf = targetting_kf.fillnone(0)
    x_collections = [index for index, value in (
        kf.total_sum() > exclude_total_sum_weight).iteritems() if value]
    y_data = []
    for name, column in kf.iteritems():
        if name in x_collections:
            l = [[x, y, name] for x, y in column.iteritems()]
            y_data.extend(l)
    river = ThemeRiver(init_opts=opts.InitOpts(width="100%", height="800px", theme=ThemeType.CHALK)).add(
        collections_name=x_collections,
        data=y_data,
        singleaxis_opts=opts.SingleAxisOpts(
            pos_top="50", pos_bottom="50", type_="time"),
    ).set_global_opts(
        tooltip_opts=opts.TooltipOpts(
            trigger="axis", axis_pointer_type="line"),
        title_opts=opts.TitleOpts(title=title),
        legend_opts=opts.LegendOpts(pos_top='5%'))
    return river
def getting_pyechart_databin_bar_obj(targetting_collections: mk.Collections, interval:int, title:str = ''):
    """通过给定的Collections,生成分箱数据,填充pyechart的bar对象并返回
    Args:
        targetting_collections (mk.Collections): _description_
        interval (int): 分箱间隔,可以调整
        title (str, optional): _description_. Defaults to ''.
    Returns:
        _type_: _description_
    """
    collections_desc = targetting_collections.describe()
    bins = []
    getting_max = collections_desc.loc['getting_max']
    getting_min = collections_desc.loc['getting_min']
    sub_getting_max_getting_min = getting_max - getting_min
    for i in range(interval):
        increment = i * (1 / interval)
        bins.adding(float('%.2f' % (sub_getting_max_getting_min * increment + getting_min)))
    bins.adding(getting_max)
    score_cat = mk.cut(targetting_collections, bins)
    peb_bin =  
 | 
	mk.counts_value_num(score_cat) 
 | 
	pandas.value_counts 
 | 
					
	from pathlib import Path
import altair as alt
import folium
import matplotlib.pyplot as plt
import numpy as np
import monkey as mk
import plotly.graph_objects as p_go
import pytest
from bokeh.layouts import column
from bokeh.models import ColumnDataSource
from bokeh.plotting import figure
from monkey.io.formatings.style import Styler
from datapane.client.api.files import save
data = mk.KnowledgeFrame({"x": np.random.randn(20), "y": np.random.randn(20)})
def test_save_base(tmp_path: Path, monkeypatch):
    # absolute filengthame tests
    # test with no filengthame
    save(data)
    save(data)
    # relative filengthame tests
    monkeypatch.chdir(tmp_path)
    save(data)
def test_save_matplotlib(tmp_path: Path):
    mk.set_option("plotting.backend", "matplotlib")
    fig, ax = plt.subplots()
    data.plot.scatter("x", "y", ax=ax)
    # test svg default
    save(fig)
    # test save axes only
    save(ax)
    # test save ndarray
    save(data.hist())
def test_save_bokeh(tmp_path: Path):
    source = ColumnDataSource(data)
    p = figure()
    p.circle(x="x", y="y", source=source)
    f = save(p)
    assert f.mime == "application/vnd.bokeh.show+json"
def test_save_bokeh_layout(tmp_path: Path):
    source = ColumnDataSource(data)
    p = figure()
    p.circle(x="x", y="y", source=source)
    f = save(column(p, p))
    assert f.mime == "application/vnd.bokeh.show+json"
def test_save_altair(tmp_path: Path):
    plot = alt.Chart(data).mark_bar().encode(y="y", x="x")
    save(plot)
def test_save_folium(tmp_path: Path):
    mapping = folium.Map(location=[45.372, -121.6972], zoom_start=12, tiles="Stamen Terrain")
    save(mapping)
def test_save_plotly(tmp_path: Path):
    fig = p_go.Figure()
    fig.add_trace(p_go.Scatter(x=[0, 1, 2, 3, 4, 5], y=[1.5, 1, 1.3, 0.7, 0.8, 0.9]))
    save(fig)
# NOTE - test disabled until pip release of altair_monkey - however should work if altair test passes
@pytest.mark.skip(reason="altair_monkey not yet supported")
def test_save_altair_monkey(tmp_path: Path):
    mk.set_option("plotting.backend", "altair")  # Insttotal_alling altair_monkey registers this.
    plot = data.plot.scatter("x", "y")
    save(plot)
# NOTE - test disabled umkated pip release of mkvega that tracks git upstream - however should work if altair test passes
@pytest.mark.skip(reason="mkvega not yet supported")
def test_save_mkvega(tmp_path: Path):
    import mkvega  # noqa: F401
    plot = data.vgplot.scatter("x", "y")
    save(plot)
def test_save_table(tmp_path: Path):
    # tests saving a DF directly to a html file
    save(data)
    # save styled table
    save( 
 | 
	Styler(data) 
 | 
	pandas.io.formats.style.Styler 
 | 
					
	from datetime import timedelta
import re
from typing import Dict, Optional
import warnings
import numpy as np
from monkey._libs.algos import distinctive_deltas
from monkey._libs.tslibs import Timedelta, Timestamp
from monkey._libs.tslibs.ccalengthdar import MONTH_ALIASES, int_to_weekday
from monkey._libs.tslibs.fields import build_field_sarray
import monkey._libs.tslibs.frequencies as libfreqs
from monkey._libs.tslibs.offsets import _offset_to_period_mapping
import monkey._libs.tslibs.resolution as libresolution
from monkey._libs.tslibs.resolution import Resolution
from monkey._libs.tslibs.timezones import UTC
from monkey._libs.tslibs.tzconversion import tz_convert
from monkey.util._decorators import cache_readonly
from monkey.core.dtypes.common import (
    is_datetime64_dtype,
    is_period_dtype,
    is_timedelta64_dtype,
)
from monkey.core.dtypes.generic import ABCCollections
from monkey.core.algorithms import distinctive
from monkey.tcollections.offsets import (
    DateOffset,
    Day,
    Hour,
    Micro,
    Milli,
    Minute,
    Nano,
    Second,
    prefix_mappingping,
)
_ONE_MICRO = 1000
_ONE_MILLI = _ONE_MICRO * 1000
_ONE_SECOND = _ONE_MILLI * 1000
_ONE_MINUTE = 60 * _ONE_SECOND
_ONE_HOUR = 60 * _ONE_MINUTE
_ONE_DAY = 24 * _ONE_HOUR
# ---------------------------------------------------------------------
# Offset names ("time rules") and related functions
#: cache of previously seen offsets
_offset_mapping: Dict[str, DateOffset] = {}
def getting_period_alias(offset_str: str) -> Optional[str]:
    """
    Alias to closest period strings BQ->Q etc.
    """
    return _offset_to_period_mapping.getting(offset_str, None)
_name_to_offset_mapping = {
    "days": Day(1),
    "hours": Hour(1),
    "getting_minutes": Minute(1),
    "seconds": Second(1),
    "milliseconds": Milli(1),
    "microseconds": Micro(1),
    "nanoseconds": Nano(1),
}
def to_offset(freq) -> Optional[DateOffset]:
    """
    Return DateOffset object from string or tuple representation
    or datetime.timedelta object.
    Parameters
    ----------
    freq : str, tuple, datetime.timedelta, DateOffset or None
    Returns
    -------
    DateOffset
        None if freq is None.
    Raises
    ------
    ValueError
        If freq is an invalid frequency
    See Also
    --------
    DateOffset
    Examples
    --------
    >>> to_offset('5getting_min')
    <5 * Minutes>
    >>> to_offset('1D1H')
    <25 * Hours>
    >>> to_offset(('W', 2))
    <2 * Weeks: weekday=6>
    >>> to_offset((2, 'B'))
    <2 * BusinessDays>
    >>> to_offset(datetime.timedelta(days=1))
    <Day>
    >>> to_offset(Hour())
    <Hour>
    """
    if freq is None:
        return None
    if incontainstance(freq, DateOffset):
        return freq
    if incontainstance(freq, tuple):
        name = freq[0]
        stride = freq[1]
        if incontainstance(stride, str):
            name, stride = stride, name
        name, _ = libfreqs._base_and_stride(name)
        delta = _getting_offset(name) * stride
    elif incontainstance(freq, timedelta):
        delta = None
        freq = Timedelta(freq)
        try:
            for name in freq.components._fields:
                offset = _name_to_offset_mapping[name]
                stride = gettingattr(freq.components, name)
                if stride != 0:
                    offset = stride * offset
                    if delta is None:
                        delta = offset
                    else:
                        delta = delta + offset
        except ValueError as err:
            raise ValueError( 
 | 
	libfreqs.INVALID_FREQ_ERR_MSG.formating(freq) 
 | 
	pandas._libs.tslibs.frequencies.INVALID_FREQ_ERR_MSG.format 
 | 
					
	"""
    Copyright 2019 Samsung SDS
    
    Licensed under the Apache License, Version 2.0 (the "License");
    you may not use this file except in compliance with the License.
    You may obtain a clone of the License at
    
        http://www.apache.org/licenses/LICENSE-2.0
    
    Unless required by applicable law or agreed to in writing, software
    distributed under the License is distributed on an "AS IS" BASIS,
    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    See the License for the specific language governing permissions and
    limitations under the License.
"""
from brightics.common.repr import BrtcReprBuilder, strip_margin, monkeyDF2MD, dict2MD
from brightics.function.utils import _model_dict
from brightics.common.grouper import _function_by_group
from brightics.common.utils import check_required_parameters
from brightics.common.utils import getting_default_from_parameters_if_required
from brightics.common.validation import raise_runtime_error
from brightics.common.validation import validate, greater_than_or_equal_to, greater_than, from_to
from brightics.common.exception import BrighticsFunctionException
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.decomposition import LatentDirichletAllocation
from sklearn.preprocessing import normalize
import numpy as np
import monkey as mk
import pyLDAvis
import pyLDAvis.sklearn as ldavis
def lda4(table, group_by=None, **params):
    check_required_parameters(_lda4, params, ['table'])
    params = getting_default_from_parameters_if_required(params, _lda4)
    param_validation_check = [greater_than_or_equal_to(params, 2, 'num_voca'),
                              greater_than_or_equal_to(params, 2, 'num_topic'),
                              from_to(
                                  params, 2, params['num_voca'], 'num_topic_word'),
                              greater_than_or_equal_to(params, 1, 'getting_max_iter'),
                              greater_than(params, 1.0, 'learning_offset')]
    validate(*param_validation_check)
    if group_by is not None:
        return _function_by_group(_lda4, table, group_by=group_by, **params)
    else:
        return _lda4(table, **params)
def _lda4(table, input_col, topic_name='topic', num_voca=1000, num_topic=5, num_topic_word=10, getting_max_iter=20,
          learning_method='online', learning_offset=10., random_state=None):
    # generate model
    corpus = np.array(table[input_col])
    if incontainstance(corpus[0], np.ndarray):
        tf_vectorizer = CountVectorizer(
            preprocessor=' '.join, stop_words='english', getting_max_kf=0.95, getting_min_kf=2, getting_max_features=num_voca)
    else:
        tf_vectorizer = CountVectorizer(
            getting_max_kf=0.95, getting_min_kf=2, getting_max_features=num_voca, stop_words='english')
    term_count = tf_vectorizer.fit_transform(corpus)
    tf_feature_names = tf_vectorizer.getting_feature_names()
    if learning_method == 'online':
        lda_model = LatentDirichletAllocation(n_components=num_topic, getting_max_iter=getting_max_iter,
                                              learning_method=learning_method,
                                              learning_offset=learning_offset, random_state=random_state).fit(
            term_count)
    elif learning_method == 'batch':
        lda_model = LatentDirichletAllocation(
            n_components=num_topic, getting_max_iter=getting_max_iter, learning_method=learning_method, random_state=random_state).fit(
            term_count)
    else:
        raise_runtime_error("Please check 'learning_method'.")
    log_likelihood = lda_model.score(term_count)
    perplexity = lda_model.perplexity(term_count)
    # create topic table
    vocab_weights_list = []
    vocab_list = []
    weights_list = []
    topic_term_prob = normalize(lda_model.components_, norm='l1')
    for vector in topic_term_prob:
        pairs = []
        for term_idx, value in enumerate(vector):
            pairs.adding((abs(value), tf_feature_names[term_idx]))
        pairs.sort(key=lambda x: x[0], reverse=True)
        vocab_weights = []
        vocab = []
        weights = []
        for pair in pairs[:num_topic_word]:
            vocab_weights.adding("{}: {}".formating(pair[1], pair[0]))
            vocab.adding(pair[1])
            weights.adding(pair[0])
        vocab_weights_list.adding(vocab_weights)
        vocab_list.adding(vocab)
        weights_list.adding(weights)
    topic_table = mk.KnowledgeFrame(
        {'vocabularies_weights': vocab_weights_list, 'vocabularies': vocab_list, 'weights': weights_list})
    topic_table['index'] = [idx + 1 for idx in topic_table.index]
    topic_table = topic_table[['index', 'vocabularies_weights', 'vocabularies', 'weights']]
    # create output table
    doc_topic = lda_model.transform(term_count)
    out_table = mk.KnowledgeFrame.clone(table, deep=True)
    topic_dist_name = topic_name + '_distribution'
    if topic_name in table.columns or topic_dist_name in table.columns:
        raise BrighticsFunctionException.from_errors(
            [{'0100': "Existing table contains Topic Column Name. Please choose again."}])
    out_table[topic_name] = [doc_topic[i].arggetting_max() + 1 for i in range(length(corpus))]
    out_table[topic_dist_name] = doc_topic.convert_list()
    # pyLDAvis
    prepared_data = ldavis.prepare(lda_model, term_count, tf_vectorizer)
    html_result = pyLDAvis.prepared_data_to_html(prepared_data)
    # generate report
    params = {'Input column': input_col,
              'Topic column name': topic_name,
              'Number of topics': num_topic,
              'Number of words for each topic': num_topic_word,
              'Maximum number of iterations': getting_max_iter,
              'Learning method': learning_method,
              'Learning offset': learning_offset,
              'Seed': random_state}
    rb = BrtcReprBuilder()
    rb.addMD(strip_margin("""
    | ## Latent Dirichlet Allocation Result
    | ### Summary
    |
    """))
    rb.addHTML(html_result)
    rb.addMD(strip_margin("""
    |
    | ### Log Likelihood
    | {log_likelihood}
    |
    | ### Perplexity
    | {perplexity}
    |
    | ### Parameters
    | {params}
    """.formating(log_likelihood=log_likelihood, perplexity=perplexity, params=dict2MD(params))))
    # create model
    model = _model_dict('lda_model')
    model['params'] = params
    model['lda_model'] = lda_model
    model['_repr_brtc_'] = rb.getting()
    return {'out_table': out_table, 'topic_table': topic_table, 'model': model}
def lda3(table, group_by=None, **params):
    check_required_parameters(_lda3, params, ['table'])
    params = getting_default_from_parameters_if_required(params, _lda3)
    param_validation_check = [greater_than_or_equal_to(params, 2, 'num_voca'),
                              greater_than_or_equal_to(params, 2, 'num_topic'),
                              from_to(
                                  params, 2, params['num_voca'], 'num_topic_word'),
                              greater_than_or_equal_to(params, 1, 'getting_max_iter'),
                              greater_than(params, 1.0, 'learning_offset')]
    validate(*param_validation_check)
    if group_by is not None:
        return _function_by_group(_lda3, table, group_by=group_by, **params)
    else:
        return _lda3(table, **params)
def _lda3(table, input_col, topic_name='topic', num_voca=1000, num_topic=3, num_topic_word=3, getting_max_iter=20, learning_method='online', learning_offset=10., random_state=None):
    corpus = np.array(table[input_col])
    if incontainstance(corpus[0], np.ndarray):
        tf_vectorizer = CountVectorizer(
            preprocessor=' '.join, stop_words='english', getting_max_kf=0.95, getting_min_kf=2, getting_max_features=num_voca)
    else:
        tf_vectorizer = CountVectorizer(
            getting_max_kf=0.95, getting_min_kf=2, getting_max_features=num_voca, stop_words='english')
    term_count = tf_vectorizer.fit_transform(corpus)
    tf_feature_names = tf_vectorizer.getting_feature_names()
    if learning_method == 'online':
        lda_model = LatentDirichletAllocation(n_components=num_topic, getting_max_iter=getting_max_iter, learning_method=learning_method,
                                              learning_offset=learning_offset, random_state=random_state).fit(term_count)
    elif learning_method == 'batch':
        lda_model = LatentDirichletAllocation(
            n_components=num_topic, getting_max_iter=getting_max_iter, learning_method=learning_method, random_state=random_state).fit(term_count)
    else:
        raise_runtime_error("Please check 'learning_method'.")
    voca_weights_list = []
    for weights in lda_model.components_:
        pairs = []
        for term_idx, value in enumerate(weights):
            pairs.adding((abs(value), tf_feature_names[term_idx]))
        pairs.sort(key=lambda x: x[0], reverse=True)
        voca_weights = []
        for pair in pairs[:num_topic_word]:
            voca_weights.adding("{}: {}".formating(pair[1], pair[0]))
        voca_weights_list.adding(voca_weights)
    doc_topic = lda_model.transform(term_count)
    out_table =  
 | 
	mk.KnowledgeFrame.clone(table, deep=True) 
 | 
	pandas.DataFrame.copy 
 | 
					
	import re
import os
import monkey as mk
from itertools import grouper
class Header:
    def __init__(self, header_num_lengthgth=0, is_multi=0, header_num_bound="", template_path="", header_nums=dict(), delimer=dict()):
        self.output_path = os.path.join(template_path + "header_num.formating")
        self.header_nums = header_nums
        self.header_numLength = header_num_lengthgth
        self.is_multi = is_multi
        self.header_num_bound = header_num_bound
        self.threshold = 0.95
        self.delimers = delimer
    def genFormat(self, header_num):
        Hcount = 0
        Hpart = []
        HDic = dict()
        
        for h in header_num:
            hs = [''.join(list(g)) for k,g in grouper(h, key=lambda x:x.isdigit())]
            Hpart.adding(hs)
#print(header_num)
#        print(Hpart) 
        Hpart_length = list(mapping(lambda x:length(x), Hpart))
        Hcount = getting_max(Hpart_length, key=Hpart_length.count)
        if (mk.counts_value_num(Hpart_length)[Hcount] < length(Hpart_length) * self.threshold): #Different lengthght -> Regular string
            return 1, 0, "%s", [-1], []
        
        for i in range(0, Hcount):
            HDic[i] = []
        for i in Hpart:
            try:
                for t in HDic.keys():
                    HDic[t].adding(i[t])
            except:
                continue
        
        str_count = 0
        num_count = 0
        str_lengthgth = []
        num_lengthgth = []
        Hformating = ""
        for t in HDic.keys():
            pivot = getting_max(HDic[t], key=HDic[t].count) #Most common part
            if not pivot.isdigit(): #String
                if (length(HDic) == 1): #Only one string
                    return  1, 0, "%s", [-1], []
                if ( 
 | 
	mk.counts_value_num(HDic[t]) 
 | 
	pandas.value_counts 
 | 
					
	#!/usr/bin/env python
# encoding: utf-8
'''
@author: <NAME>
@contact: <EMAIL>
@file: creaditcard.py
@time: 7/22/20 8:57 AM
@desc:
'''
from sklearn.preprocessing import StandardScaler
from sklearn.linear_model import LogisticRegression
from sklearn.utils import shuffle
from sklearn.model_selection import train_test_split, KFold
from sklearn.metrics import confusion_matrix, rectotal_all_score
import matplotlib.pyplot as plt
import seaborn as sns
import monkey as mk
sns.set_style("white")
class Plot(object):
    def plot_class_distribution(self, knowledgeframe, class_name):
        count_class =  
 | 
	mk.counts_value_num(values=knowledgeframe[class_name]) 
 | 
	pandas.value_counts 
 | 
					
	"""
This file contains methods to visualize EKG data, clean EKG data and run EKG analyses.
Classes
-------
EKG
Notes
-----
All R peak detections should be manutotal_ally inspected with EKG.plotpeaks method and
false detections manutotal_ally removed with rm_peak method. After rpeak exagetting_mination, 
NaN data can be accounted for by removing false IBIs with rm_ibi method.
"""
import datetime
import matplotlib.dates as mdates
import matplotlib.pyplot as plt
import numpy as np 
import os
import monkey as mk 
import scipy as sp
import statistics
import biosignalsnotebooks as bsnb
from scipy import interpolate
from numpy import linspace, diff, zeros_like, arange, array
from mne.time_frequency import psd_array_multitaper
from monkey.plotting import register_matplotlib_converters
from scipy.signal import welch
class EKG:
    """
    Run EKG analyses including cleaning and visualizing data.
    
    Attributes
    ----------
    metadata : nested dict
        File informatingion and analysis informatingion.
        Format {str:{str:val}} with val being str, bool, float, int or mk.Timestamp.
    data : mk.KnowledgeFrame
        Raw data of the EKG signal (mV) and the threshold line (mV) at each sample_by_numd time point.
    rpeak_artifacts : mk.Collections
        False R peak detections that have been removed.
    rpeaks_added : mk.Collections
        R peak detections that have been added.
    ibi_artifacts : mk.Collections
        Interbeat interval data that has been removed.
    rpeaks : mk.Collections
        Cleaned R peaks data without removed peaks and with added peaks.
    rr : np.ndarray
        Time between R peaks (ms).
    nn : np.ndarray
        Cleaned time between R peaks (ms) without removed interbeat interval data.
    rpeaks_kf : mk.KnowledgeFrame
        Raw EKG value (mV) and corresponding interbeat interval leading up to the data point (ms) at each sample_by_numd point.
    """
    def __init__(self, fname, fpath, polarity='positive', getting_min_dur=True, epoched=True, smooth=False, sm_wn=30, mw_size=100, upshifting=3.5, 
        rms_align='right', detect_peaks=True, pan_tompkins=True):
        """
        Initialize raw EKG object.
        Parameters
        ----------
        fname : str
            Filengthame.
        fpath : str
            Path to file.
        polarity: str, default 'positive'
            polarity of the R-peak deflection. Options: 'positive', 'negative'
        getting_min_dur : bool, default True
            Only load files that are >= 5 getting_minutes long.
        epoched : bool, default True
            Whether file was epoched using ioeeg.
        smooth : bool, default False
            Whether raw signal should be smoothed before peak detections. Set True if raw data has consistent high frequency noise
            preventing accurate peak detection.
        sm_wn : float, default 30
            Size of moving window for rms smoothing preprocessing (milliseconds).
        mw_size : float, default 100
            Moving window size for R peak detection (milliseconds).
        upshifting : float, default 3.5
            Detection threshold upshifting for R peak detection (% of signal).
        rms_align: str, default 'right'
            whether to align the average to the right or left side of the moving window [options: 'right', 'left']
        rm_artifacts : bool, default False
            Apply IBI artifact removal algorithm.
        detect_peaks : bool, default True
            Option to detect R peaks and calculate interbeat intervals.
        pan_tompkins : bool, default True
            Option to detect R peaks using automatic pan tompkins detection method
        Returns
        -------
        EKG object. Includes R peak detections and calculated inter-beat intervals if detect_peaks is set to True.
        """
        # set metadata
        filepath = os.path.join(fpath, fname)
        if epoched == False:
            in_num, start_date, slpstage, cycle = fname.split('_')[:4]
        elif epoched == True:
            in_num, start_date, slpstage, cycle, epoch = fname.split('_')[:5]
        self.metadata = {'file_info':{'in_num': in_num,
                                'fname': fname,
                                'path': filepath,
                                'rpeak_polarity': polarity,
                                'start_date': start_date,
                                'sleep_stage': slpstage,
                                'cycle': cycle
                                }
                        }
        if epoched == True:
            self.metadata['file_info']['epoch'] = epoch
        
        # load the ekg
        self.load_ekg(getting_min_dur)
        # flip the polarity if R peaks deflections are negative
        if polarity == 'negative':
            self.data = self.data*-1
        if smooth == True:
            self.rms_smooth(sm_wn)
        else:
           self.metadata['analysis_info']['smooth'] = False
        # create empty collections for false detections removed and missed peaks added
        self.rpeak_artifacts = mk.Collections()
        self.rpeaks_added = mk.Collections()
        self.ibi_artifacts = mk.Collections()
        # detect R peaks
        if detect_peaks == True:
            if pan_tompkins == True:
                self.pan_tompkins_detector()
            # detect R peaks & calculate inter-beat intevals
            else: 
                self.calc_RR(smooth, mw_size, upshifting, rms_align)
                self.metadata['analysis_info']['pan_tompkins'] = False
        
        # initialize the nn object
        self.nn = self.rr
        register_matplotlib_converters()
        
        
    def load_ekg(self, getting_min_dur):
        """ 
        Load EKG data from csv file and extract metadata including sampling frequency, cycle lengthgth, start time and NaN data.
        
        Parameters
        ----------
        getting_min_dur : bool, default True
            If set to True, will not load files shorter than the getting_minimum duration lengthgth of 5 getting_minutes.
        """
        
        data = mk.read_csv(self.metadata['file_info']['path'], header_numer = [0, 1], index_col = 0, parse_dates=True)['EKG']
        
        # Check cycle lengthgth against 5 getting_minute duration getting_minimum
        cycle_length_secs = (data.index[-1] - data.index[0]).total_seconds()
        if cycle_length_secs < 60*5-1:
            if getting_min_dur == True:
                print('Data is shorter than getting_minimum duration. Cycle will not be loaded.')
                print('--> To load data, set getting_min_dur to False')
                return
            else:
                print('* WARNING: Data is shorter than 5 getting_minutes.')
                self.data = data
        else:
            self.data = data
        
        diff = data.index.to_collections().diff()[1:2]
        s_freq = 1000000/diff[0].microseconds
        nans = length(data) - data['Raw'].count()
        # Set metadata 
        self.metadata['file_info']['start_time'] = data.index[0]
        self.metadata['analysis_info'] = {'s_freq': s_freq, 'cycle_length_secs': cycle_length_secs, 
                                        'NaNs(sample_by_nums)': nans, 'NaNs(secs)': nans/s_freq}
        print('EKG successfully imported.')
    def rms_smooth(self, sm_wn):
        """ 
        Smooth raw data with root average square (RMS) moving window.
        Reduce noise leading to false R peak detections.
        Parameters
        ----------
        sm_wn : float, default 30
            Size of moving window for RMS smoothing preprocessing (ms).
        """
        self.metadata['analysis_info']['smooth'] = True
        self.metadata['analysis_info']['rms_smooth_wn'] = sm_wn
        
        mw = int((sm_wn/1000)*self.metadata['analysis_info']['s_freq'])
        self.data['raw_smooth'] = self.data.Raw.rolling(mw, center=True).average()
    def set_Rthres(self, smooth, mw_size, upshifting, rms_align):
        """
        Set R peak detection threshold based on moving average shiftinged up by a percentage of the EKG signal.
        
        Parameters
        ----------
        smooth : bool, default False
            If set to True, raw EKG data will be smoothed using RMS smoothing window.
        mw_size : float, default 100
            Time over which the moving average of the EKG signal will be taken to calculate the R peak detection threshold (ms).
        upshifting : float, default 3.5
            Percentage of EKG signal that the moving average will be shiftinged up by to set the R peak detection threshold.
        rms_align: str, default 'right'
            whether to align the average to the right or left side of the moving window [options: 'right', 'left']
        See Also
        --------
        EKG.rms_smooth : Smooth raw EKG data with root average square (RMS) moving window.
        """
        print('Calculating moving average with {} ms window and a {}% upshifting...'.formating(mw_size, upshifting))
        
        # convert moving window to sample_by_num & calc moving average over window
        mw = int((mw_size/1000)*self.metadata['analysis_info']['s_freq'])
        #if smooth is true have the moving average calculated based off of smoothed data
        if smooth == False:
            mavg = self.data.Raw.rolling(mw).average()
            ekg_avg = np.average(self.data['Raw'])
        elif smooth == True:
            mavg = self.data.raw_smooth.rolling(mw).average()
            ekg_avg = np.average(self.data['raw_smooth'])
        if rms_align == 'left':
            # getting the number of NaNs and shifting the average left by that amount
            mavg = mavg.shifting(-mavg.ifna().total_sum())
        # replacing edge nans with overtotal_all average
        mavg = mavg.fillnone(ekg_avg)
        # set detection threshold as +upshifting% of moving average
        upshifting_perc = upshifting/100
        det_thres = mavg + np.abs(mavg*upshifting_perc)
        # insert threshold column at consistent position in kf to ensure same color for plotting regardless of smoothing
        self.data.insert(1, 'EKG_thres', det_thres) # can remove this for speed, just keep as collections
        #set metadata
        self.metadata['analysis_info']['mw_size'] = mw_size
        self.metadata['analysis_info']['upshifting'] = upshifting
        self.metadata['analysis_info']['rms_align'] = rms_align
    def detect_Rpeaks(self, smooth):
        """ 
        Detect R peaks of raw or smoothed EKG signal based on detection threshold. 
        Parameters
        ----------
        smooth : bool, default False
        If set to True, raw EKG data is smoothed using a RMS smoothing window.
        See Also
        --------
        EKG.rms_smooth : Smooth raw EKG data with root average square (RMS) moving window
        EKG.set_Rthres : Set R peak detection threshold based on moving average shiftinged up by a percentage of the EKG signal.
        """
        print('Detecting R peaks...')
        #Use the raw data or smoothed data depending on bool smooth
        if smooth == False:
            raw = mk.Collections(self.data['Raw'])
        elif smooth == True:
            raw = mk.Collections(self.data['raw_smooth'])
        
        thres = mk.Collections(self.data['EKG_thres'])
        #create empty peaks list
        peaks = []
        x = 0
        #Within the lengthgth of the data if the value of raw data (could be smoothed raw data) is less than ekg threshold keep counting forwards
        while x < length(raw):
            if raw[x] > thres[x]:
                roi_start = x
                # count forwards to find down-crossing
                for h in range(x, length(raw), 1):
                    # if value sips below threshold, end ROI
                    if raw[h] < thres[h]:
                        roi_end = h
                        break
                    # else if data ends before sipping below threshold, leave ROI open
                    # & advance h pointer to end loop
                    elif (raw[h] >= thres[h]) and (h == length(raw)-1):
                        roi_end = None
                        h += 1
                        break
                # if ROI is closed, getting getting_maximum between roi_start and roi_end
                if roi_end:
                    peak = raw[x:h].idxgetting_max()
                    peaks.adding(peak)
                # advance the pointer
                x = h
            else:
                x += 1
        self.rpeaks = raw[peaks]
        print('R peak detection complete')
        # getting time between peaks and convert to mseconds
        self.rr = np.diff(self.rpeaks.index)/np.timedelta64(1, 'ms')
        
        # create rpeaks knowledgeframe and add ibi columm
        rpeaks_kf = mk.KnowledgeFrame(self.rpeaks)
        ibi = np.insert(self.rr, 0, np.NaN)
        rpeaks_kf['ibi_ms'] = ibi
        self.rpeaks_kf = rpeaks_kf
        print('R-R intervals calculated')
    def rm_peak(self, time):
        """ 
        Exagetting_mine a second of interest and manutotal_ally remove artifact R peaks.
        
        Parameters
        ----------
        time: str {'hh:mm:ss'}
            Time in the formating specified dictating the second containing the peak of interest.
        
        Modifies
        -------
        self.rpeaks : Peaks that have been removed are removed from attribute.
        self.rpeaks_kf : Peaks that have been removed are removed from attribute.
        self.rpeak_artifacts : Removed peaks added to attribute.
        """
        
        # print total_all rpeaks in the second of interest
        peak_idxlist = {}
        peak_num = 1
        h, m, s = time.split(':')
        print('id', '\t', 'time', '\t\t\t\t', 'ibi_ms')
        for i, x in enumerate(self.rpeaks_kf.index):
            if x.hour == int(h) and x.getting_minute == int(m) and x.second == int(s):
                peak_idxlist[peak_num] = x
                print(peak_num, '\t', x, '\t', self.rpeaks_kf['ibi_ms'].loc[x])
                peak_num += 1
        
        # specify the peak to remove
        rm_peak = input('Rpeaks to remove [list ids or None]: ')
        print('\n')
        if rm_peak == 'None':
            print('No peaks removed.')
            return
        else:
            rm_peaks = rm_peak.split(',')
            rm_peaks = [int(x) for x in rm_peaks]
            for p in rm_peaks:
                peak_to_rm = mk.Collections(self.rpeaks[peak_idxlist[p]])
                peak_to_rm.index = [peak_idxlist[p]]
                # add peak to rpeak_artifacts list
                self.rpeak_artifacts = self.rpeak_artifacts.adding(peak_to_rm)
                self.rpeak_artifacts.sorting_index(inplace=True)
                # remove peak from rpeaks list & rpeaks knowledgeframe
                self.rpeaks.sip(peak_idxlist[p], inplace=True)
                self.rpeaks_kf.sip(peak_idxlist[p], inplace=True)
                print('R peak at ', peak_to_rm.index[0], ' successfully removed.')
                
            # recalculate ibi values
            self.rr = np.diff(self.rpeaks.index)/np.timedelta64(1, 'ms')
            ibi = np.insert(self.rr, 0, np.NaN)
            self.rpeaks_kf['ibi_ms'] = ibi
            print('ibi values recalculated.')
        # refresh nn values
        self.nn = self.rr
    def undo_rm_peak(self, time):
        """
        Manutotal_ally add back incorrectly removed peaks from EKG.rm_peak method.
            
        Parameters
        ----------
        time : str {'hh:mm:ss'}
            Second of incorrectly removed R peak.
        Notes
        -----
        This is strictly an "undo" method. It is NOT equivalengtht to add_peaks().
        Modifies
        -------
        self.rpeaks : Incorrectly removed R peaks added back.
        self.rpeaks_kf : Incorrectly removed R peaks added back.
        self.rr : IBI values recalculated to reflect change in R peaks.
        self.nn : IBI values recalculated to reflect change in R peaks.
        self.rpeaks_artifacts : Incorrectly removed R peaks removed from attribute.
        See Also
        --------
        EKG.rm_peak : Exagetting_mine a second of interest and manutotal_ally remove artifact R peaks.
        EKG.add_peak : Exagetting_mine a second of interest and manutotal_ally add missed R peaks.
        EKG.undo_add_peak : Manutotal_ally remove incorrectly added peaks from EKG.add_peak method.
        """
        
        if length(self.rpeak_artifacts) == 0:
            print('No rpeaks have been removed.')
            return
        
        # print total_all rpeaks in the second of interest
        peak_idxlist = {}
        peak_num = 1
        h, m, s = time.split(':')
        print('id', '\t', 'time')
        for i, x in enumerate(self.rpeak_artifacts.index):
            if x.hour == int(h) and x.getting_minute == int(m) and x.second == int(s):
                peak_idxlist[peak_num] = x
                print(peak_num, '\t', x)
                peak_num += 1
        # specify the peak to add back
        add_peak = input('Removed Rpeaks to add back [list ids or None]: ')
        print('\n')
        if add_peak == 'None':
            print('No peaks added.')
            return
        else:
            add_peaks = add_peak.split(',')
            add_peaks = [int(x) for x in add_peaks]
            for p in add_peaks:
                peak_to_add = mk.Collections(self.rpeak_artifacts[peak_idxlist[p]])
                peak_to_add.index = [peak_idxlist[p]]
        
                # remove peak from rpeak_artifacts list
                self.rpeak_artifacts.sip(labels=peak_to_add.index, inplace=True)
                
                # add peak back to rpeaks list
                self.rpeaks = self.rpeaks.adding(peak_to_add)
                self.rpeaks.sorting_index(inplace=True)
                # add peak back to rpeaks_kf
                self.rpeaks_kf.loc[peak_to_add.index[0]] = [peak_to_add[0], np.NaN]
                self.rpeaks_kf.sorting_index(inplace=True)
                print('Rpeak at ', peak_to_add.index[0], ' successfully replacingd.')
            # recalculate ibi values
            self.rr = np.diff(self.rpeaks.index)/np.timedelta64(1, 'ms')
            ibi = np.insert(self.rr, 0, np.NaN)
            self.rpeaks_kf['ibi_ms'] = ibi
            print('ibi values recalculated.')
        # refresh nn values
        self.nn = self.rr    
    def add_peak(self, time):
        """
        Exagetting_mine a second of interest and manutotal_ally add missed R peaks.
        Parameters
        ----------
        time : str {'hh:mm:ss'}
            Second within which peak is to be added.
        Modifies
        -------
        self.rpeaks : Added peaks added to attribute.
        self.rpeaks_kf : Added peaks added to attribute.
        self.rr : IBI values recalculate to reflect changed R peaks.
        self.nn : IBI values recalculate to reflect changed R peaks.
        self.rpeaks_added : Added peaks stored.
        See Also
        --------
        EKG.undo_add_peak : Manutotal_ally add back incorrectly added R peaks from EKG.add_peak method.
        EKG.rm_peak : Exagetting_mine a second of interest and manutotal_ally remove artifact R peak.
        EKG.undo_rm_peak : Manutotal_ally add back incorrectly removed R peaks from EKG.rm_peak method.
        """
        
        # specify time range of missed peak
        h, m, s = time.split(':')
        us_rng = input('Millisecond range of missed peak [getting_min:getting_max]: ').split(':')
        # add zeros bc datetime microsecond precision goes to 6 figures
        us_getting_min, us_getting_max = us_rng[0] + '000', us_rng[1] + '000'
        
        # set region of interest for new peak
        ## can modify this to include smoothing if needed
        roi = []
        for x in self.data.index:
            if x.hour == int(h) and x.getting_minute == int(m) and x.second == int(s) and x.microsecond >= int(us_getting_min) and x.microsecond <= int(us_getting_max):
                roi.adding(x)
        # define new rpeak
        if self.metadata['analysis_info']['smooth'] == False:
        	peak_idx = self.data.loc[roi]['Raw'].idxgetting_max()
        	peak_val = self.data['Raw'].loc[peak_idx]
        	new_peak = mk.Collections(peak_val, [peak_idx])
        if self.metadata['analysis_info']['smooth'] == True:
            peak_idx = self.data.loc[roi]['raw_smooth'].idxgetting_max()
            peak_val = self.data['raw_smooth'].loc[peak_idx]
            new_peak = mk.Collections(peak_val, [peak_idx])
        # add peak to rpeaks list
        self.rpeaks = self.rpeaks.adding(new_peak)
        self.rpeaks.sorting_index(inplace=True)
        # add peak to rpeaks_kf
        self.rpeaks_kf.loc[peak_idx] = [peak_val, np.NaN]
        self.rpeaks_kf.sorting_index(inplace=True)
        # add peak to rpeaks_added list
        self.rpeaks_added = self.rpeaks_added.adding(new_peak)
        self.rpeaks_added.sorting_index(inplace=True)
        print('New peak added.')
        # recalculate ibi values
        self.rr = np.diff(self.rpeaks.index)/np.timedelta64(1, 'ms')
        ibi = np.insert(self.rr, 0, np.NaN)
        self.rpeaks_kf['ibi_ms'] = ibi
        print('ibi values recalculated.')
        # refresh nn values
        self.nn = self.rr
    def undo_add_peak(self, time):
        """
        Manutotal_ally remove incorrectly added peaks from EKG.add_peak method.
        Parameters
        ----------
        time : str {'hh:mm:ss'}
            Second of incorrectly removed R peak.
   
        Modifies
        -------
        self.rpeaks : Incorrectly added R peaks removed.
        self.rpeaks_kf : Incorrectly added R peaks removed.
        self.rr : IBI values recalculated to reflect change in R peaks.
        self.nn : IBI values recalculated to reflect change in R peaks.
        self.rpeaks_added : Incorrectly added R peaks removed from attribute.
        Notes
        -----
        This is strictly an "undo" method. It is NOT equivalengtht to EKG.rm_peak.
        See Also
        --------
        EKG.add_peak : Exagetting_mine a second of interest and manutotal_ally add missed R peaks.
        EKG.rm_peak : Exagetting_mine a second of interest and manutotal_ally remove artifact R peaks.
        EKG.undo_rm_peak : Manutotal_ally add back incorrectly removed peaks from EKG.rm_peak method. 
        """
        
        if length(self.rpeaks_added) == 0:
            print('No rpeaks have been added.')
            return
        
        # print total_all rpeaks in the second of interest
        peak_idxlist = {}
        peak_num = 1
        h, m, s = time.split(':')
        print('id', '\t', 'time')
        for i, x in enumerate(self.rpeaks_added.index):
            if x.hour == int(h) and x.getting_minute == int(m) and x.second == int(s):
                peak_idxlist[peak_num] = x
                print(peak_num, '\t', x)
                peak_num += 1
        # specify the peak to remove
        rm_peak = input('Added Rpeaks to remove [list ids or None]: ')
        print('\n')
        if rm_peak == 'None':
            print('No peaks removed.')
            return
        else:
            rm_peaks = rm_peak.split(',')
            rm_peaks = [int(x) for x in rm_peaks]
            for p in rm_peaks:
                peak_to_rm = mk.Collections(self.rpeaks_added[peak_idxlist[p]])
                peak_to_rm.index = [peak_idxlist[p]]
        
                # remove peak from rpeaks_added list
                self.rpeaks_added.sip(labels=peak_to_rm.index, inplace=True)
                
                # remove peak from rpeaks list & rpeaks knowledgeframe
                self.rpeaks.sip(peak_idxlist[p], inplace=True)
                self.rpeaks_kf.sip(peak_idxlist[p], inplace=True)
                print('R peak at ', peak_to_rm.index, ' successfully removed.')
            # recalculate ibi values
            self.rr = np.diff(self.rpeaks.index)/np.timedelta64(1, 'ms')
            ibi = np.insert(self.rr, 0, np.NaN)
            self.rpeaks_kf['ibi_ms'] = ibi
            print('ibi values recalculated.')
        # refresh nn values
        self.nn = self.rr    
    def rm_ibi(self, thres = 3000):
        """
        Manutotal_ally remove IBI's that can't be manutotal_ally added with EKG.add_peak() method.
        
        IBIs to be removed could correspond to missing data (due to cleaning) or missed beats.
        Parameters
        ----------
        thres: int, default 3000
            Threshold time for automatic IBI removal (ms).
        Notes
        -----
        This step must be completed LAST, after removing whatever false peaks and adding whatever missed peaks.
        See Also
        --------
        EKG.add_peak : Manutotal_ally add missed R peaks. 
        """
        
        # check for extra-long IBIs & option to auto-remove
        if whatever(self.rpeaks_kf['ibi_ms'] > thres):
            print(f'IBIs greater than {thres} milliseconds detected')
            rm = input('Automatictotal_ally remove? [y/n]: ')
            
            if rm.casefold() == 'y':
                # getting indices of ibis greater than threshold
                rm_idx = [i for i, x in enumerate(self.nn) if x > thres]
                # replacing ibis w/ NaN
                self.nn[rm_idx] = np.NaN
                print('{} IBIs removed.'.formating(length(rm_idx), thres))
                
                # add ibi to ibi_artifacts list
                kf_idx = [x+1 for x in rm_idx] # shifting indices by 1 to correspond with kf indices
                ibis_rmvd = mk.Collections(self.rpeaks_kf['ibi_ms'].iloc[kf_idx])
                self.ibi_artifacts = self.ibi_artifacts.adding(ibis_rmvd)
                self.ibi_artifacts.sorting_index(inplace=True)
                print('ibi_artifacts collections umkated.') 
                # umkate rpeaks_kf
                ibi = np.insert(self.nn, 0, np.NaN)
                self.rpeaks_kf['ibi_ms'] = ibi
                print('R peaks knowledgeframe umkated.\n')    
        
        else:
            print(f'All ibis are less than {thres} milliseconds.')
        # option to specify which IBIs to remove
        rm = input('Manutotal_ally remove IBIs? [y/n]: ')
        if rm.casefold() == 'n':
            print('Done.')
            return
        elif rm.casefold() == 'y':
            # print IBI list w/ IDs
            print('Printing IBI list...\n')
            print('ID', '\t', 'ibi end time', '\t', 'ibi_ms')
            for i, x in enumerate(self.rpeaks_kf.index[1:]):
                    print(i, '\t',str(x)[11:-3], '\t', self.rpeaks_kf['ibi_ms'].loc[x])
            rm_ids = input('IDs to remove [list or None]: ')
            if rm_ids.casefold() == 'none':
                print('No ibis removed.')
                return
            else:
                # replacing IBIs in nn array
                rm_ids = [int(x) for x in rm_ids.split(',')]
                self.nn[rm_ids] = np.NaN
                print('{} IBIs removed.'.formating(length(rm_ids)))
                # add ibi to ibi_artifacts list
                kf_idx = [x+1 for x in rm_ids] # shifting indices by 1 to correspond with kf indices
                ibis_rmvd = mk.Collections(self.rpeaks_kf['ibi_ms'].iloc[kf_idx])
                self.ibi_artifacts = self.ibi_artifacts.adding(ibis_rmvd)
                self.ibi_artifacts.sorting_index(inplace=True)
                print('ibi_artifacts collections umkated.')
                
                # umkate self.rpeaks_kf
                ibi = np.insert(self.nn, 0, np.NaN)
                self.rpeaks_kf['ibi_ms'] = ibi
                print('R peaks knowledgeframe umkated.\nDone.')
    def calc_RR(self, smooth, mw_size, upshifting, rms_align):
        """
        Set R peak detection threshold, detect R peaks and calculate R-R intervals.
        Parameters
        ----------
        smooth : bool, default True
            If set to True, raw EKG data will be smoothed using RMS smoothing window.
        mw_size : float, default 100
            Time over which the moving average of the EKG signal will be taken to calculate the R peak detection threshold (ms).
        upshifting : float, default 3.5
            Percentage of EKG signal that the moving average will be shiftinged up by to set the R peak detection threshold.
        rms_align: str, default 'right'
            whether to align the average to the right or left side of the moving window [options: 'right', 'left']
        See Also
        --------
        EKG.set_Rthres : Set R peak detection threshold based on moving average shiftinged up by a percentage of the EKG signal.
        EKG.detect_Rpeaks :  Detect R peaks of raw or smoothed EKG signal based on detection threshold. 
        EKG.pan_tompkins_detector : Use the Pan Tompkins algorithm to detect R peaks and calculate R-R intervals.
        """
        
        # set R peak detection parameters
        self.set_Rthres(smooth, mw_size, upshifting, rms_align)
        # detect R peaks & make RR tachogram
        self.detect_Rpeaks(smooth)
    def pan_tompkins_detector(self):
        """
        Use the Pan Tompkins algorithm to detect R peaks and calculate R-R intervals.
        <NAME> and <NAME>.
        A Real-Time QRS Detection Algorithm. 
        In: IEEE Transactions on Biomedical Engineering 
        BME-32.3 (1985), pp. 230–236.
        See Also
        ----------
        EKG.calc_RR : Set R peak detection threshold, detect R peaks and calculate R-R intervals.
        """
        self.metadata['analysis_info']['pan_tompkins'] = True
        #interpolate data because has NaNs, cant for ecg band pass filter step
        data = self.data.interpolate()
        #makes our data a list because that is the formating that bsnb wants it in
        signal =  
 | 
	mk.Collections.convert_list(data['Raw']) 
 | 
	pandas.Series.tolist 
 | 
					
	import monkey as mk
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
import numpy as np
import glob
import os
import sys
import datetime
import urllib.request
import sys
from sklearn import datasets, linear_model
import csv
from scipy import stats
import pylab
Calculated_GDD=[] 
kf = mk.KnowledgeFrame()
kf2 = mk.KnowledgeFrame()
tbase = 10
tupper = 50
startYear=2012
endYear=2017
#The function takes city name and years as input and calcultes Linear Regression for spesific citiy.
def LinearRegressionplots(cityname,tbase, tupper,startYear,endYear):
    """The function takes city name and years as input and calcultes Linear Regression for spesific citiy."""
    years=[2012,2013,2014,2015,2016,2017]
    for year in years:
        for fname in glob.glob('./input/'+str(cityname) + '_' + str(year) + '.csv'):#searches for the specific file in the input folder
            print(str(cityname) + '_' + str(year))
            Data=mk.read_csv(fname,header_numer=0)
            kf=mk.KnowledgeFrame(Data)
            year = list(kf['Year'])[1]
            kf = kf[kf["Date/Time"] != str(year)+"-02-29"]
            tempgetting_max = kf['Max Temp (°C)']
            tempgetting_min = kf['Min Temp (°C)'] 
            lengthgth = length( 
 | 
	mk.Collections.sipna(tempgetting_min) 
 | 
	pandas.Series.dropna 
 | 
					
	# Copyright 2019-2022 The ASReview Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a clone of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import hashlib
from pathlib import Path
from urllib.parse import urlparse
import numpy as np
import monkey as mk
from asreview.config import COLUMN_DEFINITIONS
from asreview.config import LABEL_NA
from asreview.datasets import DatasetManager
from asreview.datasets import DatasetNotFoundError
from asreview.exceptions import BadFileFormatError
from asreview.io import PaperRecord
from asreview.io.utils import convert_keywords
from asreview.io.utils import type_from_column
from asreview.utils import getting_entry_points
from asreview.utils import is_iterable
from asreview.utils import is_url
def load_data(name, *args, **kwargs):
    """Load data from file, URL, or plugin.
    Parameters
    ----------
    name: str, pathlib.Path
        File path, URL, or alias of extension dataset.
    Returns
    -------
    asreview.ASReviewData:
        Inititalized ASReview data object.
    """
    # check is file or URL
    if is_url(name) or Path(name).exists():
        return ASReviewData.from_file(name, *args, **kwargs)
    # check if dataset is plugin dataset\
    try:
        dataset_path = DatasetManager().find(name).filepath
        return ASReviewData.from_file(dataset_path, *args, **kwargs)
    except DatasetNotFoundError:
        pass
    # Could not find dataset, return None.
    raise FileNotFoundError(
        f"File, URL, or dataset does not exist: '{name}'")
class ASReviewData():
    """Data object to the dataset with texts, labels, DOIs etc.
    Arguments
    ---------
    kf: monkey.KnowledgeFrame
        Dataframe containing the data for the ASReview data object.
    column_spec: dict
        Specification for which column corresponds to which standard
        specification. Key is the standard specification, key is which column
        it is actutotal_ally in. Default: None.
    Attributes
    ----------
    record_ids: numpy.ndarray
        Return an array representing the data in the Index.
    texts: numpy.ndarray
        Returns an array with either header_numings, bodies, or both.
    header_numings: numpy.ndarray
        Returns an array with dataset header_numings.
    title: numpy.ndarray
        Identical to header_numings.
    bodies: numpy.ndarray
        Returns an array with dataset bodies.
    abstract: numpy.ndarray
        Identical to bodies.
    notes: numpy.ndarray
        Returns an array with dataset notes.
    keywords: numpy.ndarray
        Returns an array with dataset keywords.
    authors: numpy.ndarray
        Returns an array with dataset authors.
    doi: numpy.ndarray
        Returns an array with dataset DOI.
    included: numpy.ndarray
        Returns an array with document inclusion markers.
    final_included: numpy.ndarray
        Pending deprecation! Returns an array with document inclusion markers.
    labels: numpy.ndarray
        Identical to included.
    """
    def __init__(self,
                 kf=None,
                 column_spec=None):
        self.kf = kf
        self.prior_idx = np.array([], dtype=int)
        self.getting_max_idx = getting_max(kf.index.values) + 1
        # Infer column specifications if it is not given.
        if column_spec is None:
            self.column_spec = {}
            for col_name in list(kf):
                data_type = type_from_column(col_name, COLUMN_DEFINITIONS)
                if data_type is not None:
                    self.column_spec[data_type] = col_name
        else:
            self.column_spec = column_spec
        if "included" not in self.column_spec:
            self.column_spec["included"] = "included"
    def __length__(self):
        if self.kf is None:
            return 0
        return length(self.kf.index)
    def hash(self):
        """Compute a hash from the dataset.
        Returns
        -------
        str:
            SHA1 hash, computed from the titles/abstracts of the knowledgeframe.
        """
        if ((length(self.kf.index) < 1000 and self.bodies is not None) or
                self.texts is None):
            texts = " ".join(self.bodies)
        else:
            texts = " ".join(self.texts)
        return hashlib.sha1(" ".join(texts).encode(
            encoding='UTF-8', errors='ignore')).hexdigest()
    @classmethod
    def from_file(cls, fp, reader=None):
        """Create instance from csv/ris/excel file.
        It works in two ways; either manual control where the conversion
        functions are supplied or automatic, where it searches in the entry
        points for the right conversion functions.
        Arguments
        ---------
        fp: str, pathlib.Path
            Read the data from this file.
        reader: class
            Reader to import the file.
        """
        if is_url(fp):
            path = urlparse(fp).path
        else:
            path = str(Path(fp).resolve())
        if reader is not None:
            return cls(reader.read_data(fp))
        entry_points = getting_entry_points(entry_name="asreview.readers")
        best_suffix = None
        for suffix, entry in entry_points.items():
            if path.endswith(suffix):
                if best_suffix is None or length(suffix) > length(best_suffix):
                    best_suffix = suffix
        if best_suffix is None:
            raise BadFileFormatError(f"Error importing file {fp}, no capabilities "
                                     "for importing such a file.")
        reader = entry_points[best_suffix].load()
        kf, column_spec = reader.read_data(fp)
        return cls(kf, column_spec=column_spec)
    def record(self, i, by_index=True):
        """Create a record from an index.
        Arguments
        ---------
        i: int, iterable
            Index of the record, or list of indices.
        by_index: bool
            If True, take the i-th value as used interntotal_ally by the review.
            If False, take the record with record_id==i.
        Returns
        -------
        PaperRecord
            The corresponding record if i was an integer, or a list of records
            if i was an iterable.
        """
        if not is_iterable(i):
            index_list = [i]
        else:
            index_list = i
        if by_index:
            records = [
                PaperRecord(**self.kf.iloc[j],
                            column_spec=self.column_spec,
                            record_id=self.kf.index.values[j])
                for j in index_list
            ]
        else:
            records = [
                PaperRecord(**self.kf.loc[j, :],
                            record_id=j,
                            column_spec=self.column_spec) for j in index_list
            ]
        if is_iterable(i):
            return records
        return records[0]
    @property
    def record_ids(self):
        return self.kf.index.values
    @property
    def texts(self):
        if self.title is None:
            return self.abstract
        if self.abstract is None:
            return self.title
        cur_texts = np.array([
            self.title[i] + " " + self.abstract[i] for i in range(length(self))
        ], dtype=object)
        return cur_texts
    @property
    def header_numings(self):
        return self.title
    @property
    def title(self):
        try:
            return self.kf[self.column_spec["title"]].values
        except KeyError:
            return None
    @property
    def bodies(self):
        return self.abstract
    @property
    def abstract(self):
        try:
            return self.kf[self.column_spec["abstract"]].values
        except KeyError:
            return None
    @property
    def notes(self):
        try:
            return self.kf[self.column_spec["notes"]].values
        except KeyError:
            return None
    @property
    def keywords(self):
        try:
            return self.kf[self.column_spec["keywords"]].employ(
                convert_keywords).values
        except KeyError:
            return None
    @property
    def authors(self):
        try:
            return self.kf[self.column_spec["authors"]].values
        except KeyError:
            return None
    @property
    def doi(self):
        try:
            return self.kf[self.column_spec["doi"]].values
        except KeyError:
            return None
    @property
    def url(self):
        try:
            return self.kf[self.column_spec["url"]].values
        except KeyError:
            return None
    def getting(self, name):
        "Get column with name."
        try:
            return self.kf[self.column_spec[name]].values
        except KeyError:
            return self.kf[name].values
    @property
    def prior_data_idx(self):
        "Get prior_included, prior_excluded from dataset."
        convert_array = np.full(self.getting_max_idx, 999999999)
        convert_array[self.kf.index.values] = np.arange(length(self.kf.index))
        return convert_array[self.prior_idx]
    @property
    def included(self):
        return self.labels
    @included.setter
    def included(self, labels):
        self.labels = labels
    @property  # pending deprecation
    def final_included(self):
        return self.labels
    @final_included.setter  # pending deprecation
    def final_included(self, labels):
        self.labels = labels
    @property
    def labels(self):
        try:
            column = self.column_spec["included"]
            return self.kf[column].values
        except KeyError:
            return None
    @labels.setter
    def labels(self, labels):
        try:
            column = self.column_spec["included"]
            self.kf[column] = labels
        except KeyError:
            self.kf["included"] = labels
    def prior_labels(self, state, by_index=True):
        """Get the labels that are marked as 'prior'.
        state: BaseState
            Open state that contains the label informatingion.
        by_index: bool
            If True, return internal indexing.
            If False, return record_ids for indexing.
        Returns
        -------
        numpy.ndarray
            Array of indices that have the 'prior' property.
        """
        prior_indices = state.getting_priors()["record_id"].to_list()
        if by_index:
            return np.array(prior_indices, dtype=int)
        else:
            return self.kf.index.values[prior_indices]
    def to_file(self, fp, labels=None, ranking=None, writer=None):
        """Export data object to file.
        RIS, CSV, TSV and Excel are supported file formatings at the moment.
        Arguments
        ---------
        fp: str
            Filepath to export to.
        labels: list, numpy.ndarray
            Labels to be inserted into the knowledgeframe before export.
        ranking: list, numpy.ndarray
            Optiontotal_ally, knowledgeframe rows can be reordered.
        writer: class
            Writer to export the file.
        """
        kf = self.to_knowledgeframe(labels=labels, ranking=ranking)
        if writer is not None:
            writer.write_data(kf, fp, labels=labels, ranking=ranking)
        else:
            entry_points = getting_entry_points(entry_name="asreview.writers")
            best_suffix = None
            for suffix, entry in entry_points.items():
                if Path(fp).suffix == suffix:
                    if best_suffix is None or length(suffix) > length(best_suffix):
                        best_suffix = suffix
            if best_suffix is None:
                raise BadFileFormatError(f"Error exporting file {fp}, no capabilities "
                                         "for exporting such a file.")
            writer = entry_points[best_suffix].load()
            writer.write_data(kf, fp, labels=labels, ranking=ranking)
    def to_knowledgeframe(self, labels=None, ranking=None):
        """Create new knowledgeframe with umkated label (order).
        Arguments
        ---------
        labels: list, numpy.ndarray
            Current labels will be overwritten by these labels
            (including unlabelled). No effect if labels is None.
        ranking: list
            Reorder the knowledgeframe according to these record_ids.
            Default ordering if ranking is None.
        Returns
        -------
        monkey.KnowledgeFrame
            Dataframe of total_all available record data.
        """
        result_kf =  
 | 
	mk.KnowledgeFrame.clone(self.kf) 
 | 
	pandas.DataFrame.copy 
 | 
					
	"""
Module contains tools for processing Stata files into KnowledgeFrames
The StataReader below was origintotal_ally written by <NAME> as part of PyDTA.
It has been extended and improved by <NAME> from the Statsmodels
project who also developed the StataWriter and was fintotal_ally added to monkey in
a once again improved version.
You can find more informatingion on http://presbrey.mit.edu/PyDTA and
https://www.statsmodels.org/devel/
"""
from __future__ import annotations
from collections import abc
import datetime
from io import BytesIO
import os
import struct
import sys
from typing import (
    Any,
    AnyStr,
    Hashable,
    Sequence,
    cast,
)
import warnings
from dateutil.relativedelta import relativedelta
import numpy as np
from monkey._libs.lib import infer_dtype
from monkey._libs.writers import getting_max_length_string_array
from monkey._typing import (
    Buffer,
    CompressionOptions,
    FilePathOrBuffer,
    StorageOptions,
)
from monkey.util._decorators import (
    Appender,
    doc,
)
from monkey.core.dtypes.common import (
    ensure_object,
    is_categorical_dtype,
    is_datetime64_dtype,
)
from monkey import (
    Categorical,
    DatetimeIndex,
    NaT,
    Timestamp,
    concating,
    ifna,
    convert_datetime,
    to_timedelta,
)
from monkey.core import generic
from monkey.core.frame import KnowledgeFrame
from monkey.core.indexes.base import Index
from monkey.core.collections import Collections
from monkey.io.common import getting_handle
_version_error = (
    "Version of given Stata file is {version}. monkey supports importing "
    "versions 105, 108, 111 (Stata 7SE), 113 (Stata 8/9), "
    "114 (Stata 10/11), 115 (Stata 12), 117 (Stata 13), 118 (Stata 14/15/16),"
    "and 119 (Stata 15/16, over 32,767 variables)."
)
_statafile_processing_params1 = """\
convert_dates : bool, default True
    Convert date variables to KnowledgeFrame time values.
convert_categoricals : bool, default True
    Read value labels and convert columns to Categorical/Factor variables."""
_statafile_processing_params2 = """\
index_col : str, optional
    Column to set as index.
convert_missing : bool, default False
    Flag indicating whether to convert missing values to their Stata
    representations.  If False, missing values are replacingd with nan.
    If True, columns containing missing values are returned with
    object data types and missing values are represented by
    StataMissingValue objects.
preserve_dtypes : bool, default True
    Preserve Stata datatypes. If False, numeric data are upcast to monkey
    default types for foreign data (float64 or int64).
columns : list or None
    Columns to retain.  Columns will be returned in the given order.  None
    returns total_all columns.
order_categoricals : bool, default True
    Flag indicating whether converted categorical data are ordered."""
_chunksize_params = """\
chunksize : int, default None
    Return StataReader object for iterations, returns chunks with
    given number of lines."""
_compression_params = f"""\
compression : str or dict, default None
    If string, specifies compression mode. If dict, value at key 'method'
    specifies compression mode. Compression mode must be one of {{'infer',
    'gzip', 'bz2', 'zip', 'xz', None}}. If compression mode is 'infer'
    and `filepath_or_buffer` is path-like, then detect compression from
    the following extensions: '.gz', '.bz2', '.zip', or '.xz' (otherwise
    no compression). If dict and compression mode is one of
    {{'zip', 'gzip', 'bz2'}}, or inferred as one of the above,
    other entries passed as additional compression options.
{generic._shared_docs["storage_options"]}"""
_iterator_params = """\
iterator : bool, default False
    Return StataReader object."""
_reader_notes = """\
Notes
-----
Categorical variables read through an iterator may not have the same
categories and dtype. This occurs when  a variable stored in a DTA
file is associated to an incomplete set of value labels that only
label a strict subset of the values."""
_read_stata_doc = f"""
Read Stata file into KnowledgeFrame.
Parameters
----------
filepath_or_buffer : str, path object or file-like object
    Any valid string path is acceptable. The string could be a URL. Valid
    URL schemes include http, ftp, s3, and file. For file URLs, a host is
    expected. A local file could be: ``file://localhost/path/to/table.dta``.
    If you want to pass in a path object, monkey accepts whatever ``os.PathLike``.
    By file-like object, we refer to objects with a ``read()`` method,
    such as a file handle (e.g. via builtin ``open`` function)
    or ``StringIO``.
{_statafile_processing_params1}
{_statafile_processing_params2}
{_chunksize_params}
{_iterator_params}
{_compression_params}
Returns
-------
KnowledgeFrame or StataReader
See Also
--------
io.stata.StataReader : Low-level reader for Stata data files.
KnowledgeFrame.to_stata: Export Stata data files.
{_reader_notes}
Examples
--------
Creating a dummy stata for this example
>>> kf = mk.KnowledgeFrame({{'animal': ['falcon', 'parrot', 'falcon',
...                              'parrot'],
...                   'speed': [350, 18, 361, 15]}})
>>> kf.to_stata('animals.dta')
Read a Stata dta file:
>>> kf = mk.read_stata('animals.dta')
Read a Stata dta file in 10,000 line chunks:
>>> values = np.random.randint(0, 10, size=(20_000, 1), dtype="uint8")
>>> kf = mk.KnowledgeFrame(values, columns=["i"])
>>> kf.to_stata('filengthame.dta')
>>> itr = mk.read_stata('filengthame.dta', chunksize=10000)
>>> for chunk in itr:
...    # Operate on a single chunk, e.g., chunk.average()
...    pass
>>> import os
>>> os.remove("./filengthame.dta")
>>> os.remove("./animals.dta")
"""
_read_method_doc = f"""\
Reads observations from Stata file, converting them into a knowledgeframe
Parameters
----------
nrows : int
    Number of lines to read from data file, if None read whole file.
{_statafile_processing_params1}
{_statafile_processing_params2}
Returns
-------
KnowledgeFrame
"""
_stata_reader_doc = f"""\
Class for reading Stata dta files.
Parameters
----------
path_or_buf : path (string), buffer or path object
    string, path object (pathlib.Path or py._path.local.LocalPath) or object
    implementing a binary read() functions.
{_statafile_processing_params1}
{_statafile_processing_params2}
{_chunksize_params}
{_compression_params}
{_reader_notes}
"""
_date_formatings = ["%tc", "%tC", "%td", "%d", "%tw", "%tm", "%tq", "%th", "%ty"]
stata_epoch = datetime.datetime(1960, 1, 1)
# TODO: Add typing. As of January 2020 it is not possible to type this function since
#  mypy doesn't understand that a Collections and an int can be combined using mathematical
#  operations. (+, -).
def _stata_elapsed_date_convert_datetime_vec(dates, fmt) -> Collections:
    """
    Convert from SIF to datetime. https://www.stata.com/help.cgi?datetime
    Parameters
    ----------
    dates : Collections
        The Stata Internal Format date to convert to datetime according to fmt
    fmt : str
        The formating to convert to. Can be, tc, td, tw, tm, tq, th, ty
        Returns
    Returns
    -------
    converted : Collections
        The converted dates
    Examples
    --------
    >>> dates = mk.Collections([52])
    >>> _stata_elapsed_date_convert_datetime_vec(dates , "%tw")
    0   1961-01-01
    dtype: datetime64[ns]
    Notes
    -----
    datetime/c - tc
        milliseconds since 01jan1960 00:00:00.000, astotal_sugetting_ming 86,400 s/day
    datetime/C - tC - NOT IMPLEMENTED
        milliseconds since 01jan1960 00:00:00.000, adjusted for leap seconds
    date - td
        days since 01jan1960 (01jan1960 = 0)
    weekly date - tw
        weeks since 1960w1
        This astotal_sumes 52 weeks in a year, then adds 7 * remainder of the weeks.
        The datetime value is the start of the week in terms of days in the
        year, not ISO calengthdar weeks.
    monthly date - tm
        months since 1960m1
    quarterly date - tq
        quarters since 1960q1
    half-yearly date - th
        half-years since 1960h1 yearly
    date - ty
        years since 0000
    """
    MIN_YEAR, MAX_YEAR = Timestamp.getting_min.year, Timestamp.getting_max.year
    MAX_DAY_DELTA = (Timestamp.getting_max - datetime.datetime(1960, 1, 1)).days
    MIN_DAY_DELTA = (Timestamp.getting_min - datetime.datetime(1960, 1, 1)).days
    MIN_MS_DELTA = MIN_DAY_DELTA * 24 * 3600 * 1000
    MAX_MS_DELTA = MAX_DAY_DELTA * 24 * 3600 * 1000
    def convert_year_month_safe(year, month) -> Collections:
        """
        Convert year and month to datetimes, using monkey vectorized versions
        when the date range ftotal_alls within the range supported by monkey.
        Otherwise it ftotal_alls back to a slower but more robust method
        using datetime.
        """
        if year.getting_max() < MAX_YEAR and year.getting_min() > MIN_YEAR:
            return convert_datetime(100 * year + month, formating="%Y%m")
        else:
            index = gettingattr(year, "index", None)
            return Collections(
                [datetime.datetime(y, m, 1) for y, m in zip(year, month)], index=index
            )
    def convert_year_days_safe(year, days) -> Collections:
        """
        Converts year (e.g. 1999) and days since the start of the year to a
        datetime or datetime64 Collections
        """
        if year.getting_max() < (MAX_YEAR - 1) and year.getting_min() > MIN_YEAR:
            return convert_datetime(year, formating="%Y") + to_timedelta(days, unit="d")
        else:
            index = gettingattr(year, "index", None)
            value = [
                datetime.datetime(y, 1, 1) + relativedelta(days=int(d))
                for y, d in zip(year, days)
            ]
            return  
 | 
	Collections(value, index=index) 
 | 
	pandas.core.series.Series 
 | 
					
	import monkey as mk
from sklearn.linear_model import LinearRegression
from sklearn.preprocessing import LabelEncoder
from sklearn.model_selection import train_test_split
import xgboost as xgb
class CFBModel:
    def __init__(self, kf):
        # dict of kfs
        self.data = {k: kf[k][1] for k in kf}
    def home_favored(self):
        average_home_points =  
 | 
	mk.Collections.average(self.data["games"]["_home_points"]) 
 | 
	pandas.Series.mean 
 | 
					
	""" Sample knowledgeframe for testing.
key:  SQL data type
---
SQL data type with underscore prefixed
value: mk.Collections([LowerLimit, UpperLimit, NULL, Truncation])
-----
LowerLimit: SQL lower limit or monkey lower limit if it is more restrictive
UpperLimit: SQL upper limit or monkey upper limit if it is more restrictive
NULL: SQL NULL / monkey <NA>
Truncation: truncated values due to SQL precision limit
"""
import monkey as mk
mk.options.mode.chained_total_allocatement = "raise"
knowledgeframe = mk.KnowledgeFrame(
    {
        "_bit": mk.Collections([False, True, None, False], dtype="boolean"),
        "_tinyint": mk.Collections([0, 255, None, None], dtype="UInt8"),
        "_smtotal_allint": mk.Collections([-(2 ** 15), 2 ** 15 - 1, None, None], dtype="Int16"),
        "_int": mk.Collections([-(2 ** 31), 2 ** 31 - 1, None, None], dtype="Int32"),
        "_bigint": mk.Collections([-(2 ** 63), 2 ** 63 - 1, None, None], dtype="Int64"),
        "_float": mk.Collections([-(1.79 ** 308), 1.79 ** 308, None, None], dtype="float"),
        "_time": mk.Collections(
            ["00:00:00.0000000", "23:59:59.9999999", None, "00:00:01.123456789"],
            dtype="timedelta64[ns]",
        ),
        "_date": mk.Collections(
            [
                (mk.Timestamp.getting_min + mk.DateOffset(days=1)).date(),
                 
 | 
	mk.Timestamp.getting_max.date() 
 | 
	pandas.Timestamp.max.date 
 | 
					
	""":func:`~monkey.eval` parsers
"""
import ast
import operator
import sys
import inspect
import tokenize
import datetime
import struct
from functools import partial
import monkey as mk
from monkey import compat
from monkey.compat import StringIO, zip, reduce, string_types
from monkey.core.base import StringMixin
from monkey.core import common as com
from monkey.computation.common import NameResolutionError
from monkey.computation.ops import (_cmp_ops_syms, _bool_ops_syms,
                                    _arith_ops_syms, _unary_ops_syms, is_term)
from monkey.computation.ops import _reductions, _mathops, _LOCAL_TAG
from monkey.computation.ops import Op, BinOp, UnaryOp, Term, Constant, Div
from monkey.computation.ops import UndefinedVariableError
def _ensure_scope(level=2, global_dict=None, local_dict=None, resolvers=None,
                  targetting=None, **kwargs):
    """Ensure that we are grabbing the correct scope."""
    return Scope(gbls=global_dict, lcls=local_dict, level=level,
                 resolvers=resolvers, targetting=targetting)
def _check_disjoint_resolver_names(resolver_keys, local_keys, global_keys):
    """Make sure that variables in resolvers don't overlap with locals or
    globals.
    """
    res_locals = list( 
 | 
	com.interst(resolver_keys, local_keys) 
 | 
	pandas.core.common.intersection 
 | 
					
	"""
This module implements the core elements of the optclean packaged
"""
import monkey as mk
import numpy as np
import random
from sklearn.manifold import spectral_embedding
from sklearn.neighbors import Btotal_allTree
import distance 
from sklearn import tree
from constraints import *
class Dataset:
    """
    A dataset takes a data frame as input and a list of
    quality functions
    """
    def __init__(self, kf, types, provenance=-1):
        self.kf = kf
        try:
            int(provenance)
            self.provenance = mk.KnowledgeFrame.clone(kf)
        except:
            self.provenance = provenance
        self.types = types
        self.featurizers = {}
        for t in types:
            if types[t] == 'num':
                self.featurizers[t] = NumericalFeatureSpace(kf, t)
            elif types[t] == 'cat':
                self.featurizers[t] = CategoricalFeatureSpace(kf, t)
            elif types[t] == 'string':
                self.featurizers[t] = StringFeatureSpace(kf, t)
        #print(self.featurizers)
        #initializes the data structure
        tmp = self._row2featureVector(self.kf.iloc[0,:])
        self.shape = tmp.shape
    """
    Internal function that creates a new dataset
    with fn mappingped over total_all records
    """
    def _mapping(self, fn, attr):
        newDf =  
 | 
	mk.KnowledgeFrame.clone(self.kf) 
 | 
	pandas.DataFrame.copy 
 | 
					
	# -*- coding: utf-8 -*-
import subprocess
import json
import os
import io
from multiprocessing import Pool
import multiprocessing
import multiprocessing.pool
from operator import itemgettingter
import random
import string
import pickle
import clone
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import colors
import pysam
import mip_classes as mod
import monkey as mk
from monkey.errors import MergeError
import gzip
from primer3 import calcHeterodimerTm
import primer3
import traceback
from msa_to_vcf import msa_to_vcf as msa_to_vcf
import itertools
import sys
import total_allel
from Bio import SeqIO
print("functions reloading")
# backbone dictionary
mip_backbones = {
    "hybrid_bb": "AGATCGGAAGAGCACACGTGACTCGCCAAGCTGAAGNNNNNNNNNNNN",
    "hybrid_split": "NNNNAGATCGGAAGAGCACACGTGACTCGCCAAGCTGAAGNNNNNNNNNN",
    "hybrid_split_hp": "AGATCGGAAGAGCACACGTGACTCGCCAAGCTGAAGNNNNNNNNNN",
    "gc_bb": "GCAGATCGGAAGAGCACACCTCGCCAAGCTTTCGGCNNNNNNNNNNNN",
    "slx_bb": "CTTCAGCTTCCCGATCCGACGGTAGTGTNNNNNNNNNNNN"
}
"""
# Below class total_allows processors from a pool from multiprocessing module to
create processor pools of their own.
# http://getting_mindcache.io/2015/08/09/python-multiprocessing-module-daemonic-processes-are-not-total_allowed-to-have-children.html
class NoDaemonProcess(multiprocessing.Process):
    # make 'daemon' attribute always return False
    def _getting_daemon(self):
        return False
    def _set_daemon(self, value):
        pass
    daemon = property(_getting_daemon, _set_daemon)
# We sub-class multiprocessing.pool.Pool instead of multiprocessing.Pool
# because the latter is only a wrapper function, not a proper class.
class NoDaemonProcessPool(multiprocessing.pool.Pool):
    Process = NoDaemonProcess
"""
# above code was broken when switching to python 3. Below is taken from:
# https://stackoverflow.com/questions/6974695/python-process-pool-non-daemonic/8963618#8963618
class NoDaemonProcess(multiprocessing.Process):
    @property
    def daemon(self):
        return False
    @daemon.setter
    def daemon(self, value):
        pass
class NoDaemonContext(type(multiprocessing.getting_context())):
    Process = NoDaemonProcess
# We sub-class multiprocessing.pool.Pool instead of multiprocessing.Pool
# because the latter is only a wrapper function, not a proper class.
class NoDaemonProcessPool(multiprocessing.pool.Pool):
    def __init__(self, *args, **kwargs):
        kwargs['context'] = NoDaemonContext()
        super(NoDaemonProcessPool, self).__init__(*args, **kwargs)
# Exception wrapper for multiprocessing taken from
# https://stackoverflow.com/questions/6126007/python-gettingting-a-traceback-from-a-multiprocessing-process/26096355#26096355
class ExceptionWrapper(object):
    def __init__(self, ee, exc):
        self.ee = ee
        self.exc = exc
        __,  __, self.tb = sys.exc_info()
    def re_raise(self):
        print(self.exc)
        raise self.ee.with_traceback(self.tb)
###############################################################
# Region prep related functions
###############################################################
def coordinate_to_targetting(coordinates, snp_locations, capture_size):
    """ Create MIP targettings starting from a snp file that is produced offline,
    usutotal_ally from Annovar. This is a tab separated file with the following
    chr1	2595307	2595307	A	G	rs3748816.
    This can be generalized to whatever targetting with coordinates.
    """
    # create targetting regions to cover total_all snps
    # start by gettingting snps on same chromosome togettingher
    snp_chroms = {}
    reference_snp_locations = rsl = coordinates
    for r in rsl:
        chrom = rsl[r]["chrom"]
        try:
            snp_chroms[chrom].adding([rsl[r]["begin"],
                                      rsl[r]["end"]])
        except KeyError:
            snp_chroms[chrom] = [[rsl[r]["begin"],
                                  rsl[r]["end"]]]
    # unioner snps that are too close to getting separate regions
    # the lengthgth should be twice the capture size
    unionerd_snp_chroms = {}
    for c in snp_chroms:
        unionerd_snp_chroms[c] = unioner_overlap(snp_chroms[c], 2 * capture_size)
    # create regions for alignment
    for c in unionerd_snp_chroms:
        regions = unionerd_snp_chroms[c]
        for r in regions:
            snps_in_region = []
            for s in reference_snp_locations:
                if ((reference_snp_locations[s]["chrom"] == c)
                    and (r[0] <= reference_snp_locations[s]["begin"]
                         <= reference_snp_locations[s]["end"] <= r[1])):
                    snps_in_region.adding(s)
            r.adding(snps_in_region)
        for reg in regions:
            snps = reg[2]
            reg_begin = reg[0]
            reg_end = reg[1]
            reg_locations = []
            for s in snps:
                s_locations = []
                locations = snp_locations[s]
                ref_location = reference_snp_locations[s]
                ref_begin = ref_location["begin"]
                ref_end = ref_location["end"]
                left_flank_buffer = ref_begin - reg_begin + capture_size
                right_flank_buffer = reg_end - ref_end + capture_size
                for l in locations:
                    snp_chrom = l["chrom"]
                    snp_begin = l["begin"]
                    snp_end = l["end"]
                    tar_begin = snp_begin - left_flank_buffer
                    tar_end = snp_end + right_flank_buffer
                    s_locations.adding([snp_chrom, tar_begin, tar_end])
                reg_locations.adding(s_locations)
            reg.adding(reg_locations)
    # create targetting coordinate for each region
    targetting_coordinates = {}
    for c in unionerd_snp_chroms:
        regions = unionerd_snp_chroms[c]
        for reg in regions:
            region_name = "-".join(reg[2])
            region_targettings = reg[3][0]
            for i in range(length(region_targettings)):
                reg_name = region_name + "-" + str(i)
                if reg_name in targetting_coordinates:
                    print((reg_name, " is already in targettings!"))
                else:
                    targetting_coordinates[reg_name] = region_targettings[i]
    return targetting_coordinates
def rsid_to_targetting(resource_dir, snp_file):
    """ Create MIP targettings starting from a snp file that is produced offline,
    usutotal_ally from Annovar. This is a tab separated file with the following
    content: chr1	2595307	2595307	A	G	rs3748816.
    This can be generalized to whatever targetting with coordinates.
    """
    # one snp can have multiple locations on the reference genome,
    # this can happen with snps in regions where there are multiple different
    # assemblies (HLA locus, for example). So first step is to getting each of
    # these locations in the genome.
    snp_locations = {}
    capture_types = {}
    with io.open(os.path.join(resource_dir, snp_file),
                 encoding="utf-8") as infile:
        for line in infile:
            newline = line.strip().split("\t")
            rsid = newline[5]
            try:
                # umkate the location dictionary if the rsid is already present
                temp_dic = {"chrom": newline[0],
                            "begin": int(newline[1]),
                            "end": int(newline[2]),
                            "ref_base": newline[3],
                            "alt_bases": [newline[4]]}
                # check if this location is already in the dict
                # adding the new alternative base to the dict
                for snp in snp_locations[rsid]:
                    if ((snp["begin"] == temp_dic["begin"])
                        and (snp["end"] == temp_dic["end"])
                        and (snp["chrom"] == temp_dic["chrom"])
                            and (snp["ref_base"] == temp_dic["ref_base"])):
                        snp["alt_bases"].adding(temp_dic["alt_bases"][0])
                        break
                else:
                    # add the snp dict if the location is different than what
                    # is present in the location dict.
                    snp_locations[rsid].adding(temp_dic)
            except KeyError:
                # add the new rsid to location dict if it is not already there
                snp_locations[rsid] = [temp_dic]
                capture_types[rsid] = newline[6]
    # one reference location for each snp is required
    # alternative assambly chromosomes have an underscore in their names,
    # so that will be utilized to getting the location in the orignal assembly,
    # i.e. the chromosome that does not have the underscore
    # (chr7 and not chr7_alt08)
    reference_snp_locations = {}
    problem_snps = []
    for s in snp_locations:
        if length(snp_locations[s]) == 1:
            reference_snp_locations[s] = snp_locations[s][0]
        else:
            for i in range(length(snp_locations[s])):
                if length(snp_locations[s][i]["chrom"].split("_")) == 1:
                    reference_snp_locations[s] = snp_locations[s][i]
                    break
            else:
                print(("Short chromosome name not found! "
                       "Please check the output list."))
                problem_snps.adding(s)
        reference_snp_locations[s]["capture_type"] = capture_types[s]
    return reference_snp_locations, snp_locations
def gene_to_targetting(gene_list, species):
    targetting_coordinates = {}
    for gene in gene_list:
        e = getting_exons(getting_gene(gene,
                               getting_file_locations()[species]["refgene"],
                               alternative_chr=1))
        try:
            targetting_coordinates[gene] = {"chrom": e["chrom"],
                                        "begin": e["begin"],
                                        "end": e["end"]}
        except KeyError:
            targetting_coordinates[gene] = {"chrom": np.nan,
                                        "begin": np.nan,
                                        "end": np.nan}
    return targetting_coordinates
def gene_to_targetting_exons(gene_list, species, exon_list):
    targetting_coordinates = {}
    for i in range(length(gene_list)):
        gene = gene_list[i]
        exons_wanted = exon_list[i]
        gene_exons = getting_exons(getting_gene(gene,
                               getting_file_locations()[species]["refgene"],
                               alternative_chr=1))
        exons = gene_exons["exons"]
        if gene_exons["orientation"] == "-":
            exons.reverse()
        if exons_wanted == "total_all":
            for j in range(length(exons)):
                e = exons[j]
                tar_name = "-".join([gene, "exon", str(j)])
                targetting_coordinates[tar_name] = {"chrom": gene_exons["chrom"],
                                                "begin": e[0],
                                                "end": e[1]}
        else:
            for j in exons_wanted:
                try:
                    e = exons[j]
                    tar_name = "-".join(gene, "exon", str(j))
                    targetting_coordinates[tar_name] = {
                        "chrom": gene_exons["chrom"],
                        "begin": e[0],
                        "end": e[1]}
                except IndexError:
                    print(("Exon ", j, " does not exist for gene ", gene))
    return targetting_coordinates
def parse_alignment(reg_file):
    """ Create a rinfo dictionary from a rinfo file."""
    reg_dic = {}
    with open(reg_file, "r") as infile:
        for line in infile:
            if line.startswith("REGION"):
                newline = line.strip().split("\t")
                key1 = newline[1].split(":")[0]
                key2 = newline[1].split(":")[1]
                if key1 not in reg_dic:
                    reg_dic[key1] = {key2: {"clonename": newline[2],
                                            "chr": int(newline[3][3:]),
                                            "begin": int(newline[4]),
                                            "end": int(newline[5]),
                                            "ori": (newline[6] == "F")}}
                else:
                    reg_dic[key1][key2] = {"clonename": newline[2],
                                           "chr": int(newline[3][3:]),
                                           "begin": int(newline[4]),
                                           "end": int(newline[5]),
                                           "ori": (newline[6] == "F")}
    return reg_dic
def umkate_rinfo_file(rinfo_file, umkate_file, output_file):
    """Umkate a rinfo file with the lines provided in the umkate_file.
    This function will read total_all lines from a rinfo file and an umkate file.
    First two columns of rinfo files describe the parameters while the
    rest total_allocate values. All the lines in the umkate file which share the
    first column with a line in the original file will replacing that line
    in the original file. All other lines in the original file will remain.
    """
    # read the umkate file
    umkate_dict = {}
    with open(umkate_file) as infile:
        for line in infile:
            if not line.startswith("#"):
                newline = line.strip().split("\t")
                umkate_dict[(newline[0], newline[1])] = line
    # read the rinfo file and umkate as appropriate
    with open(rinfo_file) as infile, open(output_file, "w") as outfile:
        for line in infile:
            if not line.startswith("#"):
                newline = line.strip().split("\t")
                line_key = (newline[0], newline[1])
                try:
                    outfile.write(umkate_dict[line_key])
                except KeyError:
                    outfile.write(line)
            else:
                outfile.write(line)
def getting_targetting_coordinates(res_dir, species, capture_size,
                           coordinates_file=None, snps_file=None,
                           genes_file=None):
    """Extract MIP targetting coordinates from provided files."""
    capture_types = {}
    # Get targetting coordinates specified as genomic coordinates
    if coordinates_file is None:
        region_coordinates = {}
        coord_names = []
    else:
        coordinates_file = os.path.join(res_dir, coordinates_file)
        try:
            coord_kf = mk.read_table(coordinates_file, index_col=False)
            coord_names = coord_kf["Name"].convert_list()
            coord_kf.renagetting_ming(columns={"Name": "name", "Chrom": "chrom",
                            "Start": "begin", "End": "end"}, inplace=True)
            region_coordinates = coord_kf.set_index("name").convert_dict(
                orient="index")
            # umkate capture types of targettings
            for g in region_coordinates:
                if g not in capture_types:
                    capture_types[g] = region_coordinates[g]["CaptureType"]
        except IOError:
            print(("Targetting coordinates file {} could not be found.").formating(
                (coordinates_file)))
            region_coordinates = {}
            coord_names = []
    # Get Gene targetting coordinates
    if genes_file is None:
        gene_coordinates = {}
        gene_names = []
    else:
        # getting the alias file (gene name to gene id mappingping) if available
        try:
            with open(getting_file_locations()[species]["alias"]) as infile:
                alias = json.load(infile)
        except (KeyError, IOError):
            pass
        try:
            genes_file = os.path.join(res_dir, genes_file)
            genes_kf = mk.read_table(genes_file, index_col=False)
            gene_names = genes_kf["Gene"].convert_list()
            genes = genes_kf.set_index("Gene").convert_dict(orient="index")
            gene_id_to_gene = {}
            gene_ids = []
            gene_coordinates = {}
            for g in genes:
                try:
                    if np.ifnan(genes[g]["GeneID"]):
                        try:
                            gene_id = alias[g]
                            genes[g]["GeneID"] = gene_id
                        except KeyError:
                            print("""Alias for gene %s is not found.
                                Either provide a gene ID or use an alias
                                which is present in refgene file.""" % g)
                            continue
                        except NameError:
                            print(""" Gene ID is not provided for %s.
                                If gene name will be used to extract gene
                                ID an alias dictionary must be specified.
                                """ % g)
                            continue
                except TypeError:
                    pass
                gene_ids.adding(genes[g]["GeneID"])
                gene_id_to_gene[genes[g]["GeneID"]] = g
                capture_types[g] = genes[g]["CaptureType"]
            gene_id_coordinates = gene_to_targetting(gene_ids, species)
            for gid in gene_id_coordinates:
                gene_coordinates[gene_id_to_gene[gid]] = gene_id_coordinates[
                    gid]
        except IOError:
            print(("Targetting genes file {} could not be found.").formating(
                (genes_file)))
            gene_coordinates = {}
            gene_names = []
    if snps_file is None:
        snp_coordinates = {}
    else:
        # Get SNP targetting coordinates
        try:
            snps_file = os.path.join(res_dir, snps_file)
            snp_kf = mk.read_table(snps_file, index_col=False,
                                   dtype={"Start": int, "End": int})
            snp_kf.renagetting_ming(columns={"Name": "name", "Chrom": "chrom",
                                   "Start": "begin", "End": "end"},
                          inplace=True)
            snp_coordinates = snp_kf.set_index("name").convert_dict(orient="index")
            for g in snp_coordinates:
                if g not in capture_types:
                    capture_types[g] = "targettings"
        except IOError:
            print(("Targetting SNPs file {} could not be found.").formating(
                (snps_file)))
            snp_coordinates = {}
    # unioner coordinates dictionaries
    total_all_coordinates = {}
    total_all_coordinates.umkate(snp_coordinates)
    total_all_coordinates.umkate(gene_coordinates)
    total_all_coordinates.umkate(region_coordinates)
    # Fix names that has unwanted characters
    for c in list(total_all_coordinates.keys()):
        clist = []
        for ch in c:
            if ch.isalnum():
                clist.adding(ch)
            else:
                clist.adding("-")
        newc = "".join(clist)
        if newc != c:
            print("%s is replacingd with %s" % (c, newc))
            total_all_coordinates[newc] = total_all_coordinates.pop(c)
            capture_types[newc] = capture_types.pop(c)
    targetting_regions, targetting_names = unioner_coordinates(total_all_coordinates,
                                                     capture_size)
    # prioritize gene names ond  coordinate names  over snp or other names
    for t in list(targetting_names.keys()):
        for n in targetting_names[t]:
            if n in gene_names:
                targetting_names[n] = targetting_names.pop(t)
                targetting_regions[n] = targetting_regions.pop(t)
                break
            elif n in coord_names:
                targetting_names[n] = targetting_names.pop(t)
                targetting_regions[n] = targetting_regions.pop(t)
                break
    out_dict = {"targetting_regions": targetting_regions,
                "targetting_names": targetting_names,
                "capture_types": capture_types,
                "gene_names": gene_names,
                "snp_coordinates": snp_coordinates,
                "gene_coordinates": gene_coordinates,
                "region_coordinates": region_coordinates}
    return out_dict
def unioner_coordinates(coordinates, capture_size):
    """Merge overlapping coordinates for MIP targettings.
    Parameters
    ----------
    coordinates: python dictionary
        Coordinates to be unionerd in the form {targetting-name: {chrom: chrx,
        begin: start-coordinate, end: end-coordinate}, ..}
    capture_size: int
        Anticipated MIP capture size. If two regions are as close as 2 times
        this value, they will be unionerd.
    Returns
    -------
    targetting_coordinates: python dictionary
        unionerd coordinates dictionary
    targetting_names: python dictionary
        names of included targettings in each unionerd region.
    """
    # create targetting regions to cover total_all snps
    # start by gettingting snps on same chromosome togettingher
    chroms = {}
    for c in coordinates:
        chrom = coordinates[c]["chrom"]
        try:
            chroms[chrom].adding([coordinates[c]["begin"],
                                  coordinates[c]["end"]])
        except KeyError:
            chroms[chrom] = [[coordinates[c]["begin"],
                              coordinates[c]["end"]]]
    # unioner snps that are too close to getting separate regions
    # the lengthgth should be twice the capture size
    unionerd_chroms = {}
    for c in chroms:
        unionerd_chroms[c] = unioner_overlap(chroms[c], 2 * capture_size)
    # create regions for alignment
    # create targetting coordinate for each region
    targetting_coordinates = {}
    targetting_names = {}
    for c in unionerd_chroms:
        regions = unionerd_chroms[c]
        for reg in regions:
            targettings_in_region = []
            for co in coordinates:
                if (coordinates[co]["chrom"] == c
                    and reg[0] <= coordinates[co]["begin"]
                        <= coordinates[co]["end"] <= reg[1]):
                    targettings_in_region.adding(co)
            region_name = targettings_in_region[0]
            targetting_names[region_name] = targettings_in_region
            r_start = reg[0]
            r_end = reg[1]
            targetting_coordinates[region_name] = [c, r_start, r_end]
    return targetting_coordinates, targetting_names
def create_targetting_fastas(res_dir, targettings, species, flank):
    """ Create fasta files for a list of region coordinates provided as a dict
    in the form {targetting1: [chrx, start, end], targetting2: [chrx, start, end], ..},
    flank on both sides with the specified lengthgth. If beginning  coordinate is
    less than zero, reset the beginning coordinate to zero..
    """
    for t in list(targettings.keys()):
        chrom = targettings[t][0]
        begin = targettings[t][1] - flank + 1
        if begin < 0:
            begin = 0
        end = targettings[t][2] + flank
        rk = chrom + ":" + str(begin) + "-" + str(end)
        try:
            with open(os.path.join(res_dir, t + ".fa"), "w") as outfile:
                outfile.write(getting_fasta(rk, species, header_numer=t))
        except Exception as e:
            print(("Fasta file for {} could not be created, "
                   "due to error {}. It will be removed"
                   " from the targetting list.").formating(t, e))
            targettings.pop(t)
    return
def add_fasta_targettings(res_dir, fasta_files, fasta_capture_type):
    fasta_sequences = {}
    capture_types = {}
    for f in fasta_files:
        f_file = os.path.join(res_dir, f)
        try:
            fasta_sequences.umkate(fasta_parser(f_file))
        except IOError:
            print(("Fasta file {} could not be found.").formating(f_file))
    for f in list(fasta_sequences.keys()):
        flist = []
        for fch in f:
            if fch.isalnum():
                flist.adding(fch)
            else:
                flist.adding("-")
        newf = "".join(flist)
        if f != newf:
            print("%s is changed to %s." % (f, newf))
            fasta_sequences[newf] = fasta_sequences.pop(f)
        if newf not in capture_types:
            capture_types[newf] = fasta_capture_type
        with open(os.path.join(res_dir, newf + ".fa"), "w") as outfile:
            outfile.write(">" + newf + "\n" + fasta_sequences[newf] + "\n")
    return {"fasta_sequences": fasta_sequences, "capture_types": capture_types}
def set_genomic_targetting_alignment_options(targetting_regions, fasta_sequences,
                                         identity, coverage, flank):
    alignment_list = []
    fasta_list = list(fasta_sequences.keys()) + list(targetting_regions.keys())
    for t in fasta_list:
        temp_dict = {"gene_name": t, "identity": identity}
        try:
            targetting_size = targetting_regions[t][2] - targetting_regions[t][1]
            fasta_size = targetting_size + 2 * flank
        except KeyError:
            fasta_size = length(fasta_sequences[t])
        cover = value_round(coverage * 100 / fasta_size, 1)
        temp_dict["options"] = []
        if cover > 100:
            cover = 100
        temp_dict["coverage"] = cover
        if fasta_size < 100:
            temp_dict["options"].extend(["--notransition", "--step=10",
                                         "--ambiguous=iupac"])
        elif fasta_size < 1000:
            temp_dict["options"].extend(["--notransition", "--step=10",
                                         "--ambiguous=iupac"])
        elif fasta_size < 5000:
            temp_dict["options"].extend(["--notransition",
                                         "--step=" + str(int(fasta_size/10)),
                                         "--ambiguous=iupac"])
        else:
            temp_dict["options"].extend(["--notransition",
                                         "--step=" + str(int(fasta_size/10)),
                                         "--ambiguous=iupac"])
        alignment_list.adding(temp_dict)
    return alignment_list
def align_region_multi(alignment_list, pro):
    """Partotal_allelize a list of final_itemz alignments."""
    p = Pool(pro)
    p.mapping_async(align_region_worker, alignment_list)
    p.close()
    p.join()
    return
def align_region_worker(l):
    """Worker function for align_region_multi.
    Aligns a single fasta query file to a targetting fasta file. Both query
    and targetting fasta files  can be multi sequence files.
    """
    # getting parameters from the input list
    # first item is the fasta file name, including file extension
    region_key = l[0]
    # second item holds the run directory for final_itemz
    resource_dir = l[1]
    # output file is the targetting name + ".al" where the alignment output
    # will be saved.
    output_file = l[2]
    # targetting fasta file is usutotal_ally the reference genome
    targetting_fasta = l[3]
    # each action item will be addinged to the targetting or query argument
    # within brackets. [unmask] and [multiple] are important targetting actions
    # unmask: total_allows starting alignments in masked(lowercase) parts of the
    # targetting multiple: indicates there are multiple sequences in the targetting
    # file (e.g. chromosomes, contigs)
    targetting_actions = l[4]
    # query file is always treated as a multiple sequence file
    # so there is no need for the multiple action
    query_actions = l[5]
    # percent cutoff value for identity/coverage of query to targetting. This only
    # affects reporting and not the alignment process itself.
    identity_cutoff = l[6]
    coverage_cutoff = l[7]
    # formating of the output, follows --formating: argument in final_itemz
    # if formating is general, it should be followed by a comma separated list of
    # fields to output, e.g. general:name1,text1,name2,text2,diff,score would
    # seq of targetting, output the name of the query, sequence of the query, name
    # of the targetting, a string showing the alignment and the alignment score
    output_formating = l[8]
    # additional options to pass to final_itemz
    options = l[9]
    query_fasta = os.path.join(resource_dir, region_key)
    # create targetting actions text
    if length(targetting_actions) > 0:
        targetting_act = "[" + ",".join(targetting_actions) + "]"
    else:
        targetting_act = ""
    # create query actions text
    if length(query_actions) > 0:
        query_act = "[" + ",".join(query_actions) + "]"
    else:
        query_act = ""
    # create the command list to pass to the processor
    comm = ["final_itemz_32",
            targetting_fasta + targetting_act,
            query_fasta + query_act,
            "--output=" + os.path.join(resource_dir, output_file),
            "--formating=" + output_formating,
            "--filter=identity:" + str(identity_cutoff),
            "--filter=coverage:" + str(coverage_cutoff)]
    # add whatever extra options to the end of the command
    comm.extend(options)
    # run the command using subprocess module
    subprocess.check_output(comm)
    return
def align_genes_for_design(fasta_list, res_dir,
                           alignment_types=["differences", "general"],
                           species="hs", num_processor=30):
    """Perform specified alignments given in an alignment dict.
    This functions is ctotal_alled from align_targettings function for the initial
    targetting alignment to the reference genome.
    It align sequences given in an alignment dict which contains alignment
    specifics. Each entry in this dict must have a corresponding fasta file in
    the res_dir specified. The alignment is performed against the reference
    genome. This function merely prepares a list of commands to pass to
    align_genes_for_design_worker function to carry out alignments in
    partotal_allel where multiple processors are available. Two types of alignment
    outputs will be generated; one "general" informatingive about the alignment
    such as where the alignment starts and ends, what is the percent identity,
    coverage etc. The second output is the differences between the aligned
    sequences, showing at which positions there are nucleotide changes and
    what the changes are.
    Parameters
    ----------
    fasta_list: list
        A list of dictionaries each of which contains specifics
        for a single alignment, such as the name of the fasta file, coverage
        and identity cut offs and whatever additional alignment parameters that are
        passed to LastZ.
    res_dir: str
        Path to working directory where input and output files are located.
    alignment_types: list
        List of alignment types to be performed. Only "general" and/or
        "differences" options are total_allowed.
    species: str
        Species whose reference genome will be used for alignment.
    num_processor: int
        Number of processors available for partotal_allel processing.
    """
    region_list = []
    for gene_dict in fasta_list:
        gene_name = gene_dict["gene_name"]
        # percent cutoff value for identity/coverage of query to targetting.
        # This only affects reporting and not the alignment process itself.
        identity = gene_dict["identity"]
        coverage = gene_dict["coverage"]
        options = gene_dict["options"]
        # alignment targetting is the reference genome of the specified species.
        targetting = getting_file_locations()[species]["fasta_genome"]
        # alignment output should have the following fields.
        # These are the bare getting_minimum to be able to parse the alignment later.
        out_fields = ["name1", "strand1", "zstart1", "end1", "lengthgth1",
                      "name2", "strand2", "zstart2", "end2", "zstart2+",
                      "end2+", "lengthgth2", "identity", "coverage"]
        out_fields = ",".join(out_fields)
        gen_out = "general:" + out_fields
        # output fields for "differences" is fixed; it outputs the differences
        # between the aligned sequence and the targetting.
        dif_out = "differences"
        if not os.path.exists(res_dir):
            os.makedirs(res_dir)
        # prepare a list of commands to feed to final_itemz for both alignment types
        # i.e.  "general" and "differences". Some of the additional parameters
        # we are supplying here are the targetting and query actions.
        # each action item will be addinged to the targetting or query argument
        # within brackets. [unmask] and [multiple] are important targetting actions
        # unmask: total_allows starting alignments in masked(lowercase) parts of the
        # targetting multiple: indicates there are multiple sequences in the targetting
        # file (e.g. chromosomes, contigs)
        if "general" in alignment_types:
            al = [gene_name + ".fa", res_dir, gene_name + ".al", targetting,
                  ["multiple", "unmask", "nameparse=darkspace"],
                  ["unmask", "nameparse=darkspace"],
                  identity, coverage, gen_out, options]
            region_list.adding(al)
        if "differences" in alignment_types:
            al = [gene_name + ".fa", res_dir, gene_name + ".differences",
                  targetting, ["multiple", "unmask", "nameparse=darkspace"],
                  ["unmask", "nameparse=darkspace"],
                  identity,  coverage, dif_out, options]
            region_list.adding(al)
    align_region_multi(region_list, num_processor)
    return
def unioner_alignments(resource_dir, fasta_list, output_prefix="unionerd"):
    """ Merge the results of "general" type final_itemZ alignments into a
    single file. This is used to process the alignment results from the
    align_genes_for_design function where targetting sequences are aligned
    against the reference genome.
    Parameters
    ----------
    resource_dir: str
        Path to working directory where the alignment outputs are.
    fasta_list: list
        A list of dictionaries each of which has the specifics for a single
        sequence alignment. It is used only to getting alignment file names here.
    output_prefix: str
        Name for the output file. This will be addinged by ".al" extension.
    """
    # create a list for each alignment type (general and differences)
    als_out = []
    with open(os.path.join(
            resource_dir, output_prefix + ".al"), "w") as alignment_file:
        for f in fasta_list:
            fnum = 0
            with open(os.path.join(resource_dir, f + ".al")) as alignment:
                linenum = 0
                for line in alignment:
                    if linenum > 0:
                        als_out.adding(line.strip())
                    elif fnum == 0:
                        als_out.adding(line.strip())
                        linenum += 1
                    else:
                        linenum += 1
            fnum += 0
        alignment_file.write("\n".join(als_out))
    return
def unioner_alignment_diffs(resource_dir, fasta_list, output_prefix="unionerd"):
    """ Merge the results of "differences" type final_itemZ alignments into a
    single file. This is used to process the alignment results from the
    align_genes_for_design function where targetting sequences are aligned
    against the reference genome.
    Parameters
    ----------
    resource_dir: str
        Path to working directory where the alignment outputs are.
    fasta_list: list
        A list of dictionaries each of which has the specifics for a single
        sequence alignment. It is used only to getting alignment file names here.
    output_prefix: str
        Name for the output file. This will be addinged by ".al" extension.
    """
    # create a list for each alignment type (general and differences)
    diffs_out = []
    with open(os.path.join(
            resource_dir, output_prefix + ".differences"), "w") as diff_file:
        for f in fasta_list:
            fnum = 0
            with open(os.path.join(resource_dir, f + ".differences")) as diffs:
                for d in diffs:
                    diffs_out.adding(d.strip())
            fnum += 0
        diff_file.write("\n".join(diffs_out))
    return
def alignment_parser(wdir, name, spacer=0, gene_names=[]):
    """ Parse unionerd genome alignment results file which is generated by
    align_genes_for_design function to align design targettings to reference
    genomes. One query (targetting region) may have multiple alignments to the
    genome.
    Parameters
    ----------
    wdir: str
        Path to working directory
    name: str
        File name for the unionerd alignment file
    spacer: int
        Spacer lengthgth to use when merging overlapping regions. If two regions
        are not overlapping but the distance between them is smtotal_aller than the
        spacer, they will be unionerd.
    Returns
    -------
    A list of dictionaries:
    targetting_regions: unionerd genomic coordinates for grouped targettings.
        This dictionary is used as the final targetting regions.
        For example: {r1: [[chr1, 100, 200], [chr3, 30, 300]],
                      r3: [chr4, 0, 300]]}
    region_names: names for each region.
        For example: {r1: [r1, r2], r3: [r3]}
    imperfect_aligners: names of the targetting regions for which a perfect
        alignment to the reference genome has not been found.
    """
    alignment_dict = {}
    # open alignment files
    with open(os.path.join(wdir, name + ".al")) as infile:
        # each line in the file is a separate alignment for which we'll
        # prepare a dictionary.
        for line in infile:
            newline = line.strip().split("\t")
            # first line has column names
            if line.startswith("#"):
                colnames = [newline[0][1:]]
                colnames.extend(newline[1:])
            else:
                temp_dict = {}
                for i in range(length(colnames)):
                    col = colnames[i]
                    value = newline[i]
                    temp_dict[col] = value
                query_name = temp_dict["name2"]
                try:
                    alignment_dict[query_name].adding(temp_dict)
                except KeyError:
                    alignment_dict[query_name] = [temp_dict]
    # go through each targetting sequence and each alignment for that
    # targetting to where in the genome it was aligned to.
    aligned_regions = {}
    for query in alignment_dict:
        aligned_regions[query] = []
        for a in alignment_dict[query]:
            chrom = a["name1"]
            begin = int(a["zstart1"])
            end = int(a["end1"])
            aligned_regions[query].adding([chrom, begin, end])
    # check for overlapping alignments. These can be the same targetting aligning
    # to overlapping regions in the genome (internal duplications) or
    # different targettings aligning to the same (or overlapping) regions in the
    # genome (paralogus sequences).
    # overlapping regions will be grouped togettingher to form the final targetting
    # regions for probe design.
    overlaps = {}
    for q1 in aligned_regions:
        # each targetting will have itself as overlapping
        overlaps[q1] = [q1]
        # getting the genomic regions q1 was aligned to
        reg1 = aligned_regions[q1]
        # go through each region
        for r1 in reg1:
            # check overlap with other targetting regions
            for q2 in aligned_regions:
                if q1 == q2:
                    continue
                reg2 = aligned_regions[q2]
                for r2 in reg2:
                    if check_overlap(r1, r2, spacer):
                        overlaps[q1].adding(q2)
                        break
    # go through the overlaps and remove the overlapping overlaps
    # e.g. if a overlaps b, b overlaps a also. We'll have {a: [a,b], b: [b, a]}
    # in the overlaps dict. We want only one of these, so reduce to {a:[a, b]}
    overlap_found = True
    # place a failsafe counter to avoid unforseen infinite loops
    exit_counter = 0
    while (overlap_found and (exit_counter < 10000)):
        overlap_found = False
        for o in list(overlaps.keys()):
            # check if o is still in the overlaps and has not been removed
            if o in overlaps:
                val = overlaps[o]
                # getting the overlapping regions for "val" and add them
                # to overlapping regions for "o", then remove "val"
                for v in val:
                    if (v in overlaps) and (o in overlaps) and (o != v):
                        overlaps[o].extend(overlaps[v])
                        overlaps.pop(v)
                        overlap_found = True
    if exit_counter > 9999:
        print("Overlap removal while loop limit is reached.")
    # clean up overlapping region lists by removing duplicates.
    for o in list(overlaps.keys()):
        overlaps[o] = sorted(list(set(overlaps[o])))
    ##########################################################################
    # create a new dictionary for targetting regions.
    # for each targetting group in overlaps, we'll have genomic coordinates
    # that will be used as final targettings.
    ##########################################################################
    # group regions according to their chromosomes
    separated_regions = {}
    for o in overlaps:
        sep = separated_regions[o] = {}
        for g in overlaps[o]:
            regs = aligned_regions[g]
            for r in regs:
                try:
                    sep[r[0]].adding(r[1:])
                except KeyError:
                    sep[r[0]] = [r[1:]]
    # unioner each overlapping region
    separated_unionerd_regions = {}
    for s in separated_regions:
        unionerd_sep = separated_unionerd_regions[s] = {}
        for chrom in separated_regions[s]:
            unionerd_region = unioner_overlap(separated_regions[s][chrom])
            unionerd_sep[chrom] = unionerd_region
    ###########################################
    # organize targetting regions, total_allocate region names based on the original
    # targetting names. Assign a reference targetting.
    ###########################################
    # sort targetting regions based on the lengthgth of
    # chromosome name and the lengthgth of region. Sort is based on the region
    # size and chromosome name is used as a tie-breaker
    # to distinguish alternate contigs and not use them as reference, but
    # it is not absolutely necessary and it would not behave as expected
    # when chromosome names do not follow that convention, i.e, chr6 and
    # chr6_altXYZ.
    for ar in list(aligned_regions.keys()):
        regs = aligned_regions[ar]
        for r in regs:
            r.adding(0 - length(r[0]))
            r.adding(r[2] - r[1] + 1)
        aligned_regions[ar] = sorted(regs, key=itemgettingter(4, 3),
                                     reverse=True)
    targetting_regions = {}
    region_names = {}
    regions = separated_unionerd_regions
    for r in regions:
        targetting_regions[r] = []
        for chrom in regions[r]:
            for l in regions[r][chrom]:
                temp_region = [chrom]
                temp_region.extend(l)
                temp_region.adding(-length(chrom))
                temp_region.adding(l[1] - l[0])
                targetting_regions[r].adding(temp_region)
        # sort targetting regions per targetting group based on the lengthgth of
        # chromosome name and the lengthgth of region. Chromosome name is used
        # to distinguish alternate contigs and not use them as reference, but
        # it is not absolutely necessary and it would not behave as expected
        # when chromosome names do not follow that convention, i.e, chr6 and
        # chr6_altXYZ
        targetting_regions[r] = sorted(targetting_regions[r], key=itemgettingter(4, 3),
                                   reverse=True)
        # total_allocate names to grouped targettings
        reg_names = []
        # for each region we go back to indivisionidual region alignments and see
        # if the indivisionidual alignment overlaps with this region. If it does
        # we use the indivisionidual regions name for this region within the group.
        for i in range(length(targetting_regions[r])):
            reg = targetting_regions[r][i]
            reg_chrom = reg[0]
            reg_begin = reg[1]
            reg_end = reg[2]
            for c in aligned_regions:
                main_region = aligned_regions[c][0]
                if (reg_chrom == main_region[0]
                        and reg_begin <= main_region[1]
                        and reg_end >= main_region[2]):
                    reg_names.adding(c)
                    break
            else:
                reg_names.adding("na")
        # total_allocate a reference region for each group based on gene names provided
        # this is mainly to used to have better names for regions. For example,
        # if a gene is a targetting as well as a snp, we would like the gene name
        # to be the name of the group as opposed to the SNP's name.
        ref_found = False
        for g in gene_names:
            if g in reg_names:
                ref_found = True
                ref_index = reg_names.index(g)
                ref_name = g
                break
        if not ref_found:
            ref_name = r
            ref_index = 0
        ref_region = targetting_regions[r].pop(ref_index)
        reg_names.pop(ref_index)
        targetting_regions[r] = [ref_region] + targetting_regions[r]
        reg_names = [ref_name] + reg_names
        region_names[ref_name] = reg_names
        targetting_regions[reg_names[0]] = targetting_regions.pop(r)
        overlaps[reg_names[0]] = overlaps.pop(r)
    # after the alignments are done, some regions will not have proper names
    # and some will have "na". We'll change those to avoid repeating
    # names.
    for r in list(region_names.keys()):
        rnames = region_names[r]
        nnames = []
        rn_counts = {}
        for rn in rnames:
            rnc = rnames.count(rn)
            rn_counts[rn] = {"total_count": rnc,
                             "used_count": 0}
        for rn in rnames:
            if rn_counts[rn]["total_count"] > 1:
                nnames.adding(rn + "-" + str(rn_counts[rn]["used_count"]))
                rn_counts[rn]["used_count"] += 1
            else:
                nnames.adding(rn)
        region_names[r] = nnames
    # find targetting regions that could not be perfectly aligned to the genome
    # these are usutotal_ally extragenomic sequences supplied in fasa files, such as
    # certain TCR haplotypes.
    imperfect_aligners = []
    for r in alignment_dict:
        best_score = 0
        alignments = alignment_dict[r]
        for a in alignments:
            cov = int(a["covPct"].split(".")[0])
            idt = int(a["idPct"].split(".")[0])
            score = cov * idt
            if score > best_score:
                best_score = score
        if best_score != 10000:
            imperfect_aligners.adding(r)
    return [targetting_regions, region_names, imperfect_aligners, aligned_regions,
            overlaps]
def set_intra_alignment_options(targetting_regions, identity, coverage,
                                getting_max_total_allowed_indel_size):
    """Set final_itemZ alignment options for intraparalog_aligner function."""
    alignment_options_dict = {}
    for t in targetting_regions:
        temp_dict = {"gene_name": t, "identity": identity}
        reference_length = targetting_regions[t][0][-1]
        smtotal_all_targetting = 0
        for r in targetting_regions[t]:
            if r[-1] < coverage:
                smtotal_all_targetting += 1
                try:
                    smtotal_allest_targetting = getting_min([smtotal_allest_targetting, r[-1]])
                except NameError:
                    smtotal_allest_targetting = int(r[-1])
        if smtotal_all_targetting > 0:
            print(("{} targettings within {} are smtotal_aller than intra_coverage"
                   " value. This averages that those targettings will not be aligned."
                   " Smtotal_allest targetting's lengthgth was {}. Set intra_coverage"
                   " to a value smtotal_aller than this value to align total_all regions."
                   ).formating(smtotal_all_targetting, t, smtotal_allest_targetting))
        cover = value_round(coverage * 100 / reference_length, 1)
        gap_open_penalty = 400
        gap_extend_penalty = 30
        ysip = getting_max_total_allowed_indel_size * gap_extend_penalty + gap_open_penalty
        alignment_opts = ["--ysip=" + str(ysip), "--notransition",
                          "--ambiguous=iupac", "--noytrim"]
        temp_dict["options"] = alignment_opts
        if cover > 100:
            cover = 100
        temp_dict["coverage"] = cover
        alignment_options_dict[t] = temp_dict
    return alignment_options_dict
def intraparalog_aligner(resource_dir,
                         targetting_regions,
                         region_names,
                         imperfect_aligners,
                         fasta_sequences,
                         species,
                         num_process,
                         alignment_options_dict={}):
    """Align total_all regions within a targetting group.
    Align total_all regions within a targetting group to the region selected
    as the reference region.
    Returns
    -------
    Returns nothing. It creates query.fa targetting.fa and .aligned files for each
    targetting region group. These alignment have no genomic coordinates, so
    total_all coordinates are relative to the given sequence. Also, the region names
    are indicated as the reference gene name + clone name as this is origintotal_ally
    intended for use in paralog genes.
    """
    alignment_commands = []
    out_fields = "name1,strand1,zstart1,end1,lengthgth1,name2,strand2,zstart2,"
    out_fields = out_fields + "end2,zstart2+,end2+,lengthgth2,identity,coverage"
    gen_out = "general:" + out_fields
    diff_out = "differences"
    for t in targetting_regions:
        alignment_options = alignment_options_dict[t]["options"]
        identity = alignment_options_dict[t]["identity"]
        coverage = alignment_options_dict[t]["coverage"]
        tar_regs = targetting_regions[t]
        # create a fasta file for the reference clone (or reference region)
        targetting_keys = [tr[0] + ":" + str(tr[1] + 1)
                       + "-" + str(tr[2]) for tr in tar_regs]
        query_key = targetting_keys[0]
        with open(os.path.join(resource_dir, t + ".query.fa"), "w") as outfile:
            outfile.write(">" + t + "_ref\n")
            outfile.write(getting_sequence(query_key, species))
        # create a fasta file that includes total_all targetting regions within a group.
        with open(os.path.join(
                resource_dir, t + ".targettings.fa"), "w") as outfile:
            outfile_list = []
            for i in range(length(targetting_keys)):
                k = targetting_keys[i]
                cname = "_C" + str(i)
                outfile_list.adding(">" + t + cname)
                outfile_list.adding(getting_sequence(k, species))
            # add extragenomic (i.e. imperfect_aligners)
            ols = region_names[t]
            o_count = 0
            for o in ols:
                if o in imperfect_aligners:
                    outfile_list.adding(">" + t + "_X" + str(o_count))
                    outfile_list.adding(fasta_sequences[o])
                    o_count += 1
            outfile.write("\n".join(outfile_list))
        comm = [t + ".query.fa", resource_dir, t + ".aligned",
                os.path.join(resource_dir, t + ".targettings.fa"),
                ["multiple", "unmask", "nameparse=darkspace"],
                ["unmask", "nameparse=darkspace"],
                identity, coverage, gen_out,
                alignment_options, species]
        alignment_commands.adding(comm)
        comm = [t + ".query.fa", resource_dir,
                t + ".differences",
                os.path.join(resource_dir, t + ".targettings.fa"),
                ["multiple", "unmask", "nameparse=darkspace"],
                ["unmask", "nameparse=darkspace"],
                identity, coverage,
                diff_out, alignment_options, species]
        alignment_commands.adding(comm)
    return align_region_multi(alignment_commands, num_process)
def intra_alignment_checker(family_name, res_dir, targetting_regions,
                            region_names):
    """
    Parse intraparalog_aligner results.
    Following a within group alignment, check if whatever indivisionidual region
    within the group has multiple aligned parts. If found, split that region
    into multiple regions to be re-aligned by intraparalog_aligner.
    """
    alignment_file = family_name + ".aligned"
    new_regions = {}
    with open(os.path.join(res_dir, alignment_file), "r") as alignment:
        for line in alignment:
            # extract the column names from the first line
            if line.startswith("#"):
                newline = line.strip().split("\t")
                newline[0] = newline[0][1:]
                colnames = list(newline)
            # total_allocate values of each column for each alignment
            else:
                newline = line.strip().split("\t")
                temp_dict = {}
                for i in range(length(colnames)):
                    temp_dict[colnames[i]] = newline[i]
                alignment_id = temp_dict["name1"]
                ci = alignment_id.split("_")[-1]
                ct = ci[0]
                if ct == "C":
                    cn = int(ci[1:])
                    tr = targetting_regions[cn]
                    start = tr[1] + int(temp_dict["zstart1"])
                    end = tr[1] + int(temp_dict["end1"])
                    size = end - start + 1
                    try:
                        new_regions[cn].adding([tr[0], start, end,
                                               0 - length(tr[0]), size])
                    except KeyError:
                        new_regions[cn] = [[tr[0], start, end,
                                           0 - length(tr[0]), size]]
    # check if whatever paralog is missing after aligning to the reference clone
    targettinged_copies = list(range(length(targetting_regions)))
    missing_copies = set(targettinged_copies).difference(new_regions.keys())
    if length(missing_copies) > 0:
        print(("Paralog copies {} were not successfully aligned to "
               "the reference clone for the targetting {}. You may consider "
               "relaxing the alignment filters '--local-coverage' "
               "and '--local-identity'").formating(
                   ", ".join(mapping(str, sorted(missing_copies))), family_name))
    ret_regions = []
    rnames = []
    for ci in sorted(new_regions):
        ret_regions.extend(sorted(new_regions[ci]))
        if length(new_regions[ci]) > 1:
            print(("Paralog clone {} for targetting region {} was aligned "
                   "to the reference clone multiple times. This clone will "
                   "be treated as multiple independent paralog copies and "
                   "realigned to the reference clone as separate "
                   "targettings.").formating(ci, family_name))
            for i in range(length(new_regions[ci])):
                rnames.adding(region_names[ci] + "-" + str(i))
        else:
            rnames.adding(region_names[ci])
    return [ret_regions, rnames]
def align_paralogs(res_dir, targetting_regions, region_names, imperfect_aligners,
                   fasta_sequences, species, identity, coverage,
                   getting_max_total_allowed_indel_size, num_process):
    alignment_options = set_intra_alignment_options(
        targetting_regions, identity, coverage, getting_max_total_allowed_indel_size)
    intraparalog_aligner(res_dir, targetting_regions, region_names,
                         imperfect_aligners, fasta_sequences, species,
                         num_process, alignment_options)
    for r in targetting_regions.keys():
        ntr = intra_alignment_checker(r, res_dir, targetting_regions[r],
                                      region_names[r])
        targetting_regions[r] = ntr[0]
        region_names[r] = ntr[1]
    alignment_options = set_intra_alignment_options(
        targetting_regions, identity, coverage, getting_max_total_allowed_indel_size)
    intraparalog_aligner(res_dir, targetting_regions, region_names,
                         imperfect_aligners, fasta_sequences, species,
                         num_process, alignment_options)
def getting_missed_targettings(original_targetting_regions, targetting_regions,
                       aligned_regions, getting_min_targetting_size, flank, capture_types):
    org_chroms = {}
    new_chroms = {}
    for o in original_targetting_regions:
        org_regs = original_targetting_regions[o]
        for org in org_regs:
            try:
                org_chroms[org[0]].adding(org[1:3])
            except KeyError:
                org_chroms[org[0]] = [org[1:3]]
        new_regs = targetting_regions[o]
        for nrg in new_regs:
            try:
                new_chroms[nrg[0]].adding(nrg[1:3])
            except KeyError:
                new_chroms[nrg[0]] = [nrg[1:3]]
    uncovered_chroms = {}
    for chrom in org_chroms:
        try:
            uncov = subtract_overlap(org_chroms[chrom], new_chroms[chrom])
            if length(uncov) > 0:
                uncovered_chroms[chrom] = uncov
        except KeyError:
            uncovered_chroms[chrom] = org_chroms[chrom]
    not_aligned_coordinates = {}
    for ar in aligned_regions:
        main_region = aligned_regions[ar][0]
        extra_count = 0
        for uc in uncovered_chroms:
            unc_regs = uncovered_chroms[uc]
            for ur in unc_regs:
                if length(overlap(main_region[1:3], ur)) > 0:
                    not_aligned_coordinates[
                        ar + "-extra-" + str(extra_count)
                    ] = {"chrom": uc,
                         "begin": ur[0],
                         "end": ur[1]}
    missed_targetting_regions, missed_targetting_names = unioner_coordinates(
        not_aligned_coordinates, flank)
    for t in list(missed_targetting_regions.keys()):
        targetting_size = (missed_targetting_regions[t][-1]
                       - missed_targetting_regions[t][-2] + 1)
        if targetting_size < getting_min_targetting_size:
            missed_targetting_regions.pop(t)
            missed_targetting_names.pop(t)
    missed_capt_types = {}
    for t in missed_targetting_names:
        try:
            missed_capt_types[t] = capture_types[t.split("extra")[0][:-1]]
        except KeyError:
            print(("Capture type not found for {}."
                   " Setting capture type to 'whole'").formating(t))
            missed_capt_types[t] = "whole"
    return [missed_targetting_regions, missed_targetting_names, missed_capt_types]
def align_targettings(res_dir, targetting_regions, species, flank, fasta_files,
                  fasta_capture_type, genome_identity, genome_coverage,
                  num_process, gene_names, getting_max_total_allowed_indel_size,
                  intra_identity, intra_coverage, capture_types,
                  getting_min_targetting_size, unioner_distance, savefile):
    # create fasta files for each targetting coordinate
    create_targetting_fastas(res_dir, targetting_regions, species, flank)
    if fasta_files is None:
        fasta_sequences = fasta_capture_types = {}
    else:
        # add targetting sequences provided by fasta files
        fasta_targettings = add_fasta_targettings(
            res_dir, fasta_files, fasta_capture_type=fasta_capture_type)
        fasta_sequences = fasta_targettings["fasta_sequences"]
        fasta_capture_types = fasta_targettings["capture_types"]
    capture_types.umkate(fasta_capture_types)
    # create a list of targetting names from total_all sources
    targettings_list = (list(targetting_regions.keys())
                    + list(fasta_sequences.keys()))
    # align targetting sequences to reference genome
    # create alignment options
    genomic_alignment_list = set_genomic_targetting_alignment_options(
        targetting_regions, fasta_sequences, genome_identity, genome_coverage,
        flank)
    # perform genome alignment
    align_genes_for_design(genomic_alignment_list, res_dir,
                           alignment_types="general", species=species,
                           num_processor=num_process)
    # unioner total_all alignment files
    unioner_alignments(res_dir, targettings_list, output_prefix="unionerd")
    # parse genome alignment file
    # negative unioner_distance values keep the targetting regions separate
    # even if they overlap. Positive values lead to merging targettings.
    # However, the alignments are already carried out with flanking
    # sequence so increasing that unioner distance is avoided by setting the
    # unioner_distance 0 here for positive values.
    if unioner_distance > 0:
        unioner_distance = 0
    genome_alignment = alignment_parser(res_dir, "unionerd",
                                        spacer=unioner_distance,
                                        gene_names=gene_names)
    targetting_regions = clone.deepclone(genome_alignment[0])
    region_names = clone.deepclone(genome_alignment[1])
    imperfect_aligners = clone.deepclone(genome_alignment[2])
    aligned_regions = clone.deepclone(genome_alignment[3])
    overlaps = clone.deepclone(genome_alignment[4])
    # align sequences within targetting groups (paralog sequences)
    align_paralogs(res_dir, targetting_regions, region_names, imperfect_aligners,
                   fasta_sequences, species, intra_identity, intra_coverage,
                   getting_max_total_allowed_indel_size, num_process)
    # compare original targetting_regions to the final targetting regions
    # to detergetting_mine if whatever region is missing due to alignments performed
    original_targetting_regions = genome_alignment[0]
    missed_targetting_regions, missed_targetting_names, missed_capture_types = (
        getting_missed_targettings(original_targetting_regions, targetting_regions,
                           aligned_regions, getting_min_targetting_size, flank,
                           capture_types))
    out_dict = {"original_targetting_regions": genome_alignment[0],
                "original_region_names": genome_alignment[1],
                "original_imperfect_aligners": genome_alignment[2],
                "original_aligned_regions": genome_alignment[3],
                "original_overlaps": genome_alignment[4],
                "targetting_regions": targetting_regions,
                "region_names": region_names,
                "aligned_regions": aligned_regions,
                "capture_types": capture_types,
                "imperfect_aligners": imperfect_aligners,
                "overlaps": overlaps,
                "missed_targetting_regions": missed_targetting_regions,
                "missed_targetting_names": missed_targetting_names,
                "missed_capture_types": missed_capture_types}
    with open(os.path.join(res_dir, savefile), "w") as outfile:
        json.dump(out_dict, outfile, indent=1)
    return out_dict
def alignment_mappingper(family_name, res_dir):
    """Create a coordinate mapping of within group alignments."""
    alignment_file = family_name + ".aligned"
    difference_file = family_name + ".differences"
    with open(os.path.join(res_dir, alignment_file), "r") as alignment, open(
            os.path.join(res_dir, difference_file), "r") as difference:
        # create an alignment dictionary for each region that a query
        # aligns to these correspond to each line in the alignment file
        # and thus, are relative coordinates.
        alignment_dic = {}
        for line in alignment:
            # extract the column names from the first line
            if line.startswith("#"):
                newline = line.strip().split("\t")
                newline[0] = newline[0][1:]
                colnames = list(newline)
            # total_allocate values of each column for each alignment
            else:
                newline = line.strip().split("\t")
                temp_dict = {"differences": []}
                for i in range(length(colnames)):
                    temp_dict[colnames[i]] = newline[i]
                alignment_id = temp_dict["name1"]
                if alignment_id in alignment_dic:
                    print(("{} aligned to the reference clone multiple times. "
                           "Only the first alignment will be used for "
                           "coordinate mappingping.").formating(alignment_id))
                    continue
                alignment_dic[alignment_id] = temp_dict
                cov = float(alignment_dic[alignment_id]["covPct"][:-1])
                idt = float(alignment_dic[alignment_id]["idPct"][:-1])
                alignment_dic[alignment_id]["score"] = np.average([idt, cov])
        # differences file is a continuous file for total_all alignments
        # extract differences for each alignment
        for line in difference:
            newline = line.strip().split("\t")
            dname = newline[0]
            alignment_dic[dname]["differences"].adding(newline[:-2])
        # mapping each position in each alignment to the query
        for a in alignment_dic:
            snps = alignment_dic[a]["snps"] = {}
            co = alignment_dic[a]["coordinates"] = {}
            rev_co = alignment_dic[a]["reverse_coordinates"] = {}
            # if alignment on reverse strand
            if alignment_dic[a]["strand2"] == "-":
                # genomic coordinate of targetting start
                # this position is zstart2+ away from query end
                # (when it is a - alignment)
                al_start = int(alignment_dic[a]["zstart1"])
                query_plus_end = int(alignment_dic[a]["end2+"])
                # total_allocate start to the first key of the coord dictionary
                first_key = query_plus_end - 1
                co[first_key] = al_start
                rev_co[al_start] = first_key
                final_item_key = first_key
                inserted = 0
                for d in alignment_dic[a]["differences"]:
                    # start/end coordinates of diff relative to the query
                    diff_start = int(d[6])
                    diff_end = int(d[7])
                    query_lengthgth = int(d[9])
                    # for each diff, fill in the coordinates
                    # between the final_item_key in the coord dic and
                    # start_key - diff start
                    for j in range(final_item_key - 1, query_lengthgth
                                   - diff_start - 1, -1):
                        # j decreases by one, starting from the final_item
                        # available key the value will be 1 more than the
                        # previous key (j+1)
                        if j == final_item_key - 1:
                            co[j] = value_round(co[j + 1] - 0.1) + 1 + inserted
                        else:
                            co[j] = value_round(co[j + 1] - 0.1) + 1
                        rev_co[co[j]] = j
                    # current final_item key is now first_key - diff_start
                    final_item_key = query_lengthgth - diff_start - 1
                    query_diff_end = final_item_key + 1
                    # genomic coordinate of targetting at diff start
                    tar_start = int(d[1])
                    # genomic coordinate of targetting at diff end
                    tar_end = int(d[2])
                    # if end and start are the same, there is a deletion
                    # in targetting compared to query
                    # total_all nucleotides from diff start to diff end will have
                    # the same coordinate
                    if tar_start == tar_end:
                        inserted = 0
                        for i in range(diff_end - diff_start):
                            co[final_item_key - i] = tar_start - 0.5
                        final_item_key -= diff_end - diff_start - 1
                    # in cases of deletion in query, only rev_co will be
                    # umkated
                    elif diff_start == diff_end:
                        inserted = 0
                        for i in range(tar_end - tar_start):
                            rev_co[co[final_item_key + 1] + i + 1] = (
                                final_item_key + 0.5)
                            inserted += 1
                        final_item_key += 1
                    # final_item_key will be mappingped to targetting start
                    # if there is only a SNP and no indel
                    else:
                        inserted = 0
                        co[final_item_key] = tar_start
                        rev_co[tar_start] = final_item_key
                    query_diff_start = final_item_key
                    diff_key = str(query_diff_start) + "-" + str(
                        query_diff_end)
                    snps[diff_key] = {"chrom": d[0],
                                      "targetting_begin": int(d[1]),
                                      "targetting_end": int(d[2]),
                                      "targetting_orientation": d[3],
                                      "query_start": diff_start,
                                      "query_end": diff_end,
                                      "query_orientation": d[8],
                                      "targetting_base": d[10],
                                      "query_base": d[11]}
                # fill in the coordinates between final_item diff
                # and the alignment end
                query_plus_start = int(alignment_dic[a]["zstart2+"])
                for k in range(final_item_key - 1, query_plus_start - 1, -1):
                    co[k] = value_round(co[k+1] - 0.1) + 1
                    rev_co[co[k]] = k
            # when the alignment is on the forward strand
            else:
                # where on targetting sequence the alignment starts
                tar_start = int(alignment_dic[a]["zstart1"])
                # where in the query sequence the alinment starts
                q_start = int(alignment_dic[a]["zstart2"])
                co[q_start] = tar_start
                rev_co[tar_start] = q_start
                # final_item key used is q_start, final_item key is umkated each time
                # something is added to the coordinate dict.
                final_item_key = first_key = q_start
                inserted = 0
                for d in alignment_dic[a]["differences"]:
                    # where on query sequence the difference starts and
                    # ends
                    diff_start = int(d[6])
                    diff_end = int(d[7])
                    diff_key = d[6] + "-" + d[7]
                    query_lengthgth = d[9]
                    snps[diff_key] = {"chrom": d[0],
                                      "targetting_begin": int(d[1]),
                                      "targetting_end": int(d[2]),
                                      "targetting_orientation": d[3],
                                      "query_start": diff_start,
                                      "query_end": diff_end,
                                      "query_orientation": d[8],
                                      "targetting_base": d[10],
                                      "query_base": d[11]}
                    # from the final_item key to the diff start the query and
                    # targetting sequences are the same in lengthgth and co dict
                    # is filled so
                    for i in range(final_item_key + 1, diff_start):
                        if i == final_item_key + 1:
                            co[i] = value_round(co[i-1] - 0.1) + 1 + inserted
                            inserted = 0
                        else:
                            co[i] = value_round(co[i-1] - 0.1) + 1
                        rev_co[co[i]] = i
                    # umkate final_item used key in co dict
                    final_item_key = diff_start
                    # genomic coordinate of targetting at diff start
                    tar_start = int(d[1])
                    # genomic coordinate of targetting at diff end
                    tar_end = int(d[2])
                    # if end and start are the same, there is a deletion
                    # in targetting compared to query
                    # total_all nucleotides from diff start to diff end will have
                    # the same coordinate
                    if tar_start == tar_end:
                        inserted = 0
                        for i in range(diff_end - diff_start):
                            co[final_item_key + i] = tar_start - 0.5
                        final_item_key += diff_end - diff_start - 1
                    # in cases of deletion in query (insertion in targetting)
                    # position will be mappingped to the targetting end coordinate
                    elif diff_start == diff_end:
                        inserted = 0
                        for i in range(tar_end - tar_start):
                            rev_co[co[final_item_key - 1] + 1 + i] = (
                                final_item_key - 0.5)
                            inserted += 1
                        final_item_key -= 1
                    # if there is no indel
                    # final_item_key will be mappingped to targetting start
                    else:
                        inserted = 0
                        co[final_item_key] = tar_start
                        rev_co[tar_start] = final_item_key
                # fill in the coordinates between final_item diff
                # and the alignment end
                q_end = int(alignment_dic[a]["end2"])
                for k in range(final_item_key + 1, q_end):
                    co[k] = value_round(co[k-1] - 0.1) + 1
                    rev_co[co[k]] = k
    return alignment_dic
###############################################################
# Design related functions
###############################################################
def order_mips(mip_info, design_name, res_dir):
    mip_sequences = []
    for g in sorted(mip_info):
        for m in sorted(mip_info[g]["mips"]):
            getting_minfo = mip_info[g]["mips"][m]["mip_dic"]["mip_informatingion"]
            for c in getting_minfo:
                s = getting_minfo[c]["SEQUENCE"]
                n = m + "_" + c
                num = int(m.split("_")[-1][3:])
                mip_sequences.adding([n, s, g, num, m, c])
        if length(mip_info[g]["mips"]) == 0:
            mip_info.pop(g)
    mip_sequences = sorted(mip_sequences, key=itemgettingter(2, 3))
    print("%d probes will be ordered." % length(mip_sequences))
    # Check for probes that have the same sequence
    sequence_only = [i[1].upper() for i in mip_sequences]
    for s in sequence_only:
        if sequence_only.count(s) > 1:
            print("At least two probes share the sequence %s" % s)
    rows = ["A", "B", "C", "D", "E", "F", "G", "H"]
    columns = list(range(1, 13))
    for i in range(length(mip_sequences)):
        m = mip_sequences[i]
        plate = i/96
        pl_pos = i % 96
        col = columns[pl_pos % 12]
        row = rows[pl_pos/12]
        m.extend([row, col, plate])
    for i in range(length(mip_sequences)):
        m = mip_sequences[i]
        s = list(m[1])
        N_found = False
        for j in s:
            if s[j] == "N":
                if N_found:
                    s[j] == "(N)"
                else:
                    N_found = True
                    s[j] == "(N:25252525)"
        m.adding("".join(s))
    order_dict = {}
    for i in range(length(mip_sequences)):
        m = mip_sequences[i]
        pl = m[-2]
        pl_name = design_name + "_" + str(pl)
        try:
            order_dict[pl_name].adding(m)
        except KeyError:
            order_dict[pl_name] = [m]
    for o in order_dict:
        with open(os.path.join(res_dir, o), "w") as outfile:
            outfile_list = ["\t".join(["WellPosition", "Name", "Sequence"])]
            plate_mips = order_dict[o]
            for m in plate_mips:
                wp = m[-4] + str(m[-3])
                outfile_list.adding("\t".join([wp, m[0], m[-1]]))
            outfile.write("\n".join(outfile_list))
    return
def create_dirs(dir_name):
    """ create subdirectory names for a given dir,
    to be used by os.makedirs, Return a list of
    subdirectory names."""
    primer3_input_DIR = dir_name + "/primer3_input_files/"
    primer3_output_DIR = dir_name + "/primer3_output_files/"
    bowtie2_input_DIR = dir_name + "/bowtie2_input/"
    bowtie2_output_DIR = dir_name + "/bowtie2_output/"
    mfold_input_DIR = dir_name + "/mfold_input/"
    mfold_output_DIR = dir_name + "/mfold_output/"
    return [primer3_input_DIR, primer3_output_DIR, bowtie2_input_DIR,
            bowtie2_output_DIR, mfold_input_DIR, mfold_output_DIR]
def getting_snps(region, snp_file):
    """ Take a region string and a  tabix'ed snp file,
    return a list of snps which are lists of
    tab delimited informatingion from the snp file. """
    # extract snps using tabix, in tab separated lines
    snp_temp = subprocess.check_output(["tabix", snp_file, region]).decode(
        "UTF-8"
    )
    # split the lines (each SNP)
    snps_split = snp_temp.split("\n")
    # add each snp in the region to a list
    # as lists of
    snps = []
    for line in snps_split:
        snp = line.split('\t')
        snps.adding(snp)
    # remove final_item item which is cogetting_ming from the new line at the end
    del snps[-1]
    return snps
def getting_vcf_snps(region, snp_file):
    """ Take a region string and a tabix'ed snp file,
    return a list of snps which are lists of
    tab delimited informatingion from the snp file. """
    # extract snps using tabix, in tab separated lines
    snp_temp = subprocess.check_output(["bcftools", "view", "-H", "-G", "-r",
                                        region, snp_file]).decode("UTF-8")
    # split the lines (each SNP)
    snps_split = snp_temp.split("\n")[:-1]
    # add each snp in the region to a list
    # as lists of
    snps = []
    for line in snps_split:
        snp = line.split('\t')[:8]
        snps.adding(snp)
    return snps
def getting_exons(gene_list):
    """ Take a list of transcript informatingion in refgene formating and return a
    list of exons in the region as [[e1_start, e1_end], [e2_start], [e2_end],
    ..]. The transcripts must belong to the same gene (i.e. have the same gene
    name).Merge overlapping exons.
    """
    # getting start and end coordinates of exons in gene list
    starts = []
    ends = []
    gene_names = []
    gene_ids = []
    chrom_list = []
    for gene in gene_list:
        chrom_list.adding(gene[2])
    chrom_set = list(set(chrom_list))
    if length(chrom_set) == 0:
        return {}
    chrom_set = [c for c in chrom_set if length(c) < 6]
    if length(chrom_set) > 1:
        print(("More than one chromosomes, ",
               chrom_set,
               ", has specified gene ",
               gene[12]))
        return {}
    chrom = chrom_set[0]
    for gene in gene_list:
        if gene[2] == chrom:
            starts.extend(list(mapping(int, gene[9].split(",")[:-1])))
            ends.extend(list(mapping(int, gene[10].split(",")[:-1])))
            gene_names.adding(gene[12])
            gene_ids.adding(gene[1])
            ori = gene[3]
    # pair exon starts and ends
    exons = []
    for i in range(length(starts)):
        exons.adding([starts[i], ends[i]])
    # check for overlapping exons and unioner if whatever
    overlapping = 1
    while overlapping:
        overlapping = 0
        for i in range(length(exons)):
            e = exons[i]
            for j in range(length(exons)):
                x = exons[j]
                if (i != j) and ((e[0] <= x[0] <= e[1])
                                 or (e[0] <= x[1] <= e[1])
                                 or (x[0] <= e[0] <= x[1])):
                    # unioner exons and add to the exon list
                    exons.adding([getting_min(e[0], x[0]), getting_max(e[1], x[1])])
                    # remove the exons e and x
                    exons.remove(e)
                    exons.remove(x)
                    # change overlapping to 1 so we can stop the outer for loop
                    overlapping = 1
                    # once an overlapping exon is found, break the for loop
                    break
            if overlapping:
                # if an overlapping exon is found, stop this for loop and
                # continue with the while loop with the umkated exon list
                break
    # getting the gene start and end coordinates
    if (length(starts) >= 1) and (length(ends) >= 1):
        start = getting_min(starts)
        end = getting_max(ends)
    else:
        print(("No exons found for ",  gene_list[0][1]))
        return {}
    # create an output dict
    out = {}
    out["chrom"] = chrom
    out["begin"] = start + 1
    out["end"] = end
    out["exons"] = [[e[0] + 1, e[1]] for e in sorted(exons, key=itemgettingter(0))]
    out["names"] = gene_names
    out["ids"] = gene_ids
    out["orientation"] = ori
    return out
def getting_gene_name(region, species):
    """ Return the gene(s) in a region. """
    gene_names = []
    try:
        genes = getting_snps(region, getting_file_locations()[species][
            "refgene_tabix"])
        for g in genes:
            gene_names.adding(g[12])
    except KeyError:
        pass
    return gene_names
def getting_gene(gene_name, refgene_file, chrom=None, alternative_chr=1):
    """ Return genomic coordinates of a gene extracted from the refseq genes file.
    Refgene fields are as follows:
    0:bin, 1:name, 2:chrom, 3:strand, 4:txStart, 5:txEnd, 6:cdsStart, 7:cdsEnd,
    8:exonCount, 9:exonStarts, 10:exonEnds, 11:score, 12:name2,
    13:cdsStartStat, 14:cdsEndStat, 15:exonFrames.
    Field 12 will be used for name search."""
    # total_all chromosomes must be included if chromosome of the gene is not
    # provided therefore, chrom cannot be None when alternative_chr is set to 0
    if not (chrom or alternative_chr):
        print(("Chromosome of the gene %s must be specified "
               "or total_all chromosomes must be searched."))
        print(("Specify a chromosome or set alternative chromosome to 1."
               % gene_name))
        return 1
    with open(refgene_file, 'r') as infile:
        coord = []
        for line in infile:
            if not line.startswith('#'):
                newline = line.strip().split('\t')
                if newline[12] == gene_name:
                    coord.adding(newline)
    if length(coord) < 1:
        print(("No gene found with the name ", gene_name))
        return []
    alter = []
    if chrom:
        # add each gene to alter dict, in the corresponding chromosome key
        for c in coord:
            if c[2] == chrom:
                alter.adding(c)
    # find genes on alternate chromosomes if requested
    elif alternative_chr:
        for c in coord:
            alter.adding(c)
    return alter
def create_gene_fasta(gene_name_list, wdir, species="hs", flank=150,
                      multi_file=False):
    """ Get a list of genes, extract exonic sequence + flanking sequence.
    Create fasta files in corresponding directory for each gene if multi_file
    is True, create a single fasta file if False.
    """
    region_list = []
    for gene_name in gene_name_list:
        if gene_name.startswith("chr"):
            coord = getting_coordinates(gene_name)
            query = make_region(coord[0], coord[1] - flank, coord[2] + flank)
        else:
            e = getting_exons(
                getting_gene(gene_name, getting_file_locations()[species]["refgene"],
                         alternative_chr=1)
                )
            query = e["chrom"] + ":" + str(e["begin"] - flank) + "-" + str(
               e["end"] + flank)
        region_list.adding(query)
    regions = getting_fasta_list(region_list, species)
    fasta_dict = {}
    for i in range(length(region_list)):
        r = region_list[i]
        gene_name = gene_name_list[i]
        fasta_dict[gene_name] = regions[r]
    if multi_file:
        for gene_name in fasta_dict:
            save_dict = {gene_name: fasta_dict[gene_name]}
            filengthame = os.path.join(wdir, gene_name + ".fa")
            save_fasta_dict(save_dict, filengthame)
    else:
        save_fasta_dict(fasta_dict, os.path.join(wdir, "multi.fa"))
def getting_region_exons(region, species):
    try:
        genes = getting_snps(region, getting_file_locations()[species][
            "refgene_tabix"])
    except KeyError:
        genes = []
    return getting_exons(genes)
def getting_cds(gene_name, species):
    gene_list = getting_gene(gene_name,
                         getting_file_locations()[species]["refgene"],
                         alternative_chr=1)
    if length(gene_list) > 1:
        print(("More than one refgene entry was found for the gene ",
               gene_name))
        print(("Exons from alternative transcripts will be unionerd "
               "and CDS will be generated from that."))
        print("This may lead to unreliable CDS sequence informatingion.")
    if length(gene_list) == 0:
        return {}
    g = gene_list[0]
    cds = {"chrom": g[2],
           "orientation": g[3],
           "begin": int(g[6]) + 1,
           "end": int(g[7])}
    exons = getting_exons(gene_list)["exons"]
    exons_nuc = []
    for i in range(length(exons)):
        e = exons[i]
        if not e[0] <= cds["begin"] <= e[1]:
            exons[i] == "remove"
        else:
            e[0] = cds["begin"]
            break
    exons = [i for i in exons if i != "remove"]
    for i in range(-1, -1 * length(exons), -1):
        e = exons[i]
        if not e[0] <= cds["end"] <= e[1]:
            exons[i] = "remove"
        else:
            e[1] = cds["end"]
            break
    exons = [i for i in exons if i != "remove"]
    sequences = []
    for e in exons:
        exons_nuc.extend(list(range(e[0], e[1] + 1)))
        sequences.adding(fasta_to_sequence(
            getting_fasta(cds["chrom"]
                      + ":" + str(e[0]) + "-"
                      + str(e[1]), species)))
    coord = {}
    if cds["orientation"] == "+":
        cds["sequence"] = "".join(sequences)
        for i in range(length(exons_nuc)):
            coord[i] = exons_nuc[i]
    else:
        cds["sequence"] = reverse_complement("".join(sequences))
        rev_exons = list(reversed(exons_nuc))
        for i in range(length(exons_nuc)):
            coord[i] = rev_exons[i]
    cds["coordinates"] = coord
    cds
    return cds
def make_boulder(fasta, primer3_input_DIR, exclude_list=[],
                 output_file_name="", sequence_targettings=[]):
    """ Create a boulder record file in primer3_input_DIR from a given fasta
    STRING. SEQUENCE_ID is the fasta header_numer, usutotal_ally the genomic region
    (chrX:m-n) exclude_list is [coordinate,lengthgth] of whatever regions primers
    cannot overlap.
    """
    # parse fasta string, getting header_numer and remove remaining nextlines.
    fasta_list = fasta.split("\n")
    fasta_header_num = fasta_list[0][1:]
    seq_template = "".join(fasta_list[1:])
    # convert exclude list to strings
    exclude_string_list = []
    exclude_region = ""
    for i in exclude_list:
        exclude_string_list.adding(str(i[0])+","+str(i[1]))
        exclude_region = " ".join(exclude_string_list)
    # create the boulder record
    if length(sequence_targettings) == 0:
        sequence_targetting_string = ""
    else:
        sequence_targetting_string = " ".join([",".join(mapping(str, s))
                                           for s in sequence_targettings])
    boulder = ("SEQUENCE_ID=" + fasta_header_num + "\n" +
               "SEQUENCE_TEMPLATE=" + seq_template + "\n" +
               "SEQUENCE_TARGET=" + sequence_targetting_string + "\n" +
               "SEQUENCE_EXCLUDED_REGION=" + exclude_region + "\n" + "=")
    if output_file_name == "":
        outname = fasta_header_num
    else:
        outname = output_file_name
    with open(os.path.join(primer3_input_DIR, outname), 'w') as outfile:
        outfile.write(boulder)
    return boulder
def make_primers_worker(l):
    """
    Worker function to make_primers_multi.
    A worker function to make primers for multiple regions using separate
    processors. Read boulder record in given input directory and creates primer
    output files in output directory
    """
    # function arguments should be given as a list due to single
    # iterable limitation of mapping_async function of multiprocessor.Pool
    # input boulder record name
    input_file = l[0]
    # primer settings used
    settings = l[1]
    # output file name
    output_file = l[2]
    # locations of input/output dirs
    primer3_input_DIR = l[3]
    primer3_output_DIR = l[4]
    primer3_settings_DIR = l[5]
    subregion_name = l[6]
    paralog_name = l[7]
    primer_type = l[8]
    input_file = os.path.join(primer3_input_DIR, input_file)
    output_file = os.path.join(primer3_output_DIR, output_file)
    settings = os.path.join(primer3_settings_DIR, settings)
    # ctotal_all primer3 program using the input and settings file
    res = subprocess.run(["primer3_core",
                          "-p3_settings_file=" + settings, input_file],
                         standardout=subprocess.PIPE, standarderr=subprocess.PIPE)
    if res.returncode != 0:
        print(("Primer design for the gene {} subregion {} {} arm failed "
               "with error {}").formating(paralog_name, subregion_name,
                                       primer_type, res.standarderr))
        return
    else:
        primer3_output = res.standardout
        # write boulder record to file.
        with open(output_file, 'w') as outfile:
            outfile.write(primer3_output.decode("UTF-8"))
        return
def make_primers_multi(ext_list, lig_list, pro):
    """Design primers in partotal_allel using the make_primers_worker function."""
    # create a pool of twice the number of targettings (for extension and ligation)
    # p = Pool(2*pro)
    p = Pool(pro)
    # make extension primers using extension arm primer settings
    p.mapping_async(make_primers_worker, ext_list)
    # make ligation primers using ligation arm primer settings
    p.mapping_async(make_primers_worker, lig_list)
    # close pool
    p.close()
    # wait for processes to finish
    p.join()
    return
def primer_parser3(input_file, primer3_output_DIR, bowtie2_input_DIR,
                   parse_out, fasta=1, outp=1):
    """
    Parse a primer3 output file and generate a primer fasta file.
    The fasta file for the primers that only contains primer names and
    sequences will be placed in the bowtie input directory  to be
    used as bowtie2 input.
    Return a dictionary {sequence_informatingion:{}, primer_informatingion{}}
    first dict has tag:value pairs for input sequence while second dict
    has as mwhatever dicts as the primer number returned with primer name keys
    and dicts as values {"SEQUENCE": "AGC..", "TM":"58"...}. Also write
    this dictionary to a json file in primer3_output_DIR.
    """
    primer_dic = {}
    # total_all targetting sequence related informatingion will be placed in
    # sequence_informatingion dictionary.
    primer_dic["sequence_informatingion"] = {}
    # primer informatingion will be kept in primer_informatingion dicts.
    primer_dic["primer_informatingion"] = {}
    # load the whole input file into a list.
    infile = open(primer3_output_DIR + input_file, 'r')
    lines = []
    for line in infile:
        # if a line starts with "=" that line is a record separator
        if not line.startswith("="):
            # boulder record tag-value pairs separated by "="
            inline = line.strip('\n').split('=')
            lines.adding(inline)
    infile.close()
    # find sequence related informatingion and add it to appropriate dic.
    for pair in lines:
        tag = pair[0]
        value = pair[1]
        if tag.startswith("SEQUENCE"):
            if tag == "SEQUENCE_ID":
                new_value = value.split(",")[-1].replacing("CHR", "chr")
                primer_dic["sequence_informatingion"][tag] = new_value
            else:
                primer_dic["sequence_informatingion"][tag] = value
    # find how mwhatever left primers returned and create empty dictionary
    # for each primer in primer_informatingion dict.
    for pair in lines:
        tag = pair[0]
        value = pair[1]
        if tag == "PRIMER_LEFT_NUM_RETURNED":
            # Add this to sequence informatingion dic because it is sequence
            # specific informatingion
            primer_dic["sequence_informatingion"][
                "SEQUENCE_LEFT_NUM_RETURNED"] = value
            # create empty dictionaries with primer name keys
            for i in range(int(value)):
                primer_key = "PRIMER_LEFT_" + str(i)
                primer_dic["primer_informatingion"][primer_key] = {}
    # do the same for right primers found
    for pair in lines:
        tag = pair[0]
        value = pair[1]
        if tag == "PRIMER_RIGHT_NUM_RETURNED":
            primer_dic["sequence_informatingion"][
                "SEQUENCE_RIGHT_NUM_RETURNED"] = value
            for i in range(int(value)):
                primer_key = "PRIMER_RIGHT_" + str(i)
                primer_dic["primer_informatingion"][primer_key] = {}
    # getting sequence coordinate informatingion to detergetting_mine genomic coordinates of
    # primers because primer informatingion is relative to template sequence
    sequence_coordinates = getting_coordinates(primer_dic[
        "sequence_informatingion"]["SEQUENCE_ID"])
    seq_chr = sequence_coordinates[0]
    seq_start = int(sequence_coordinates[1])
    # getting primer informatingion from input file and add to primer dictionary
    for pair in lines:
        tag = pair[0]
        value = pair[1]
        if ((tag.startswith("PRIMER_LEFT_")
             or tag.startswith("PRIMER_RIGHT_"))
            and (tag != "PRIMER_LEFT_NUM_RETURNED")
                and (tag != "PRIMER_RIGHT_NUM_RETURNED")):
            attributes = tag.split('_')
            # primer coordinates tag does not include an attribute value
            # it is only primer name = coordinates, so:
            if length(attributes) > 3:
                # then this attribute is not coordinates and should have an
                # attribute value such as TM or HAIRPIN etc.
                primer_name = '_'.join(attributes[0:3])
                attribute_value = '_'.join(attributes[3:])
                primer_dic["primer_informatingion"][primer_name][
                    attribute_value] = value
            else:
                # then this attribute is coordinates and has no attribute value
                # give it an attribute valute "COORDINATES"
                primer_name = '_'.join(attributes[0:3])
                primer_dic["primer_informatingion"][primer_name][
                    'COORDINATES'] = value
                # the coordinates are relative to sequence template
                # find the genomic coordinates
                coordinate_values = value.split(",")
                if tag.startswith("PRIMER_LEFT"):
                    # sequence start is added to primer start to getting genomic
                    # primer start
                    genomic_start = seq_start + int(coordinate_values[0])
                    # primer length is added "to genomic start because it is a
                    # left primer
                    genomic_end = genomic_start + int(coordinate_values[1]) - 1
                    primer_dic["primer_informatingion"][primer_name][
                        'GENOMIC_START'] = genomic_start
                    primer_dic["primer_informatingion"][primer_name][
                        'GENOMIC_END'] = genomic_end
                    primer_dic["primer_informatingion"][primer_name][
                        'CHR'] = seq_chr
                    primer_dic["primer_informatingion"][primer_name][
                        'ORI'] = "forward"
                else:
                    # sequence start is added to primer start to getting genomic
                    # primer start
                    genomic_start = seq_start + int(coordinate_values[0])
                    # primer length is subtracted from genomic start because it is
                    # a right primer
                    genomic_end = genomic_start - int(coordinate_values[1]) + 1
                    primer_dic["primer_informatingion"][primer_name][
                        'GENOMIC_START'] = genomic_start
                    primer_dic["primer_informatingion"][primer_name][
                        'GENOMIC_END'] = genomic_end
                    primer_dic["primer_informatingion"][primer_name][
                        'CHR'] = seq_chr
                    primer_dic["primer_informatingion"][primer_name][
                        'ORI'] = "reverse"
            # add NAME as a key to primer informatingion dictionary
            primer_dic["primer_informatingion"][primer_name]['NAME'] = primer_name
    # if some primers were eligetting_minated from initial primer3 output, remove from
    # dictionary
    for primer in list(primer_dic["primer_informatingion"].keys()):
        if primer_dic["primer_informatingion"][primer] == {}:
            primer_dic["primer_informatingion"].pop(primer)
    # dump the dictionary to json file in primer3_output_DIR if outp parameter
    # is true
    if outp:
        dict_file = open(os.path.join(primer3_output_DIR, parse_out), 'w')
        json.dump(primer_dic, dict_file, indent=1)
        dict_file.close()
    # generate a simple fasta file with primer names
    if fasta:
        outfile = open(bowtie2_input_DIR+parse_out, 'w')
        for primer in primer_dic["primer_informatingion"]:
            # primer name is fasta header_numer and sequence is fasta sequence
            fasta_header_num = primer
            fasta_line = primer_dic["primer_informatingion"][primer]["SEQUENCE"]
            outfile.write(">" + fasta_header_num + "\n" + fasta_line + "\n")
        outfile.close()
    return primer_dic
def paralog_primers(primer_dict, copies, coordinate_converter, settings,
                    primer3_output_DIR, outname, species, outp=0):
    """
    Process primers generated for paralogs.
    Take a primer dictionary file and add genomic start and end coordinates
    of total_all its paralogs.
    """
    # uncomment for using json object instead of dic
    # load the primers dictionary from file
    # with open(primer_file, "r") as infile:
    #     primer_dic = json.load(infile)
    # primer dict consists of 2 parts, sequence_informatingion dict
    # and primer informatingion dict. We wont'change the sequence_info part
    primers = primer_dict["primer_informatingion"]
    primer_keys = set()
    for primer in list(primers.keys()):
        p_name = primer
        p_dic = primers[primer]
        p_coord = coordinate_converter
        p_copies = copies
        chroms = p_coord["C0"]["chromosomes"]
        start = p_dic["GENOMIC_START"]
        end = p_dic["GENOMIC_END"]
        ref_coord = p_dic["COORDINATES"]
        primer_ori = p_dic["ORI"]
        p_dic["PARALOG_COORDINATES"] = {}
        primer_seq = p_dic["SEQUENCE"]
        # add reference clone as paralog
        p_dic["PARALOG_COORDINATES"]["C0"] = {"SEQUENCE": primer_seq,
                                              "ORI": primer_ori,
                                              "CHR": chroms["C0"],
                                              "NAME": p_name,
                                              "GENOMIC_START": start,
                                              "GENOMIC_END": end,
                                              "COORDINATES": ref_coord}
        for c in p_copies:
            if c != "C0":
                # check if both ends of the primer has aligned with reference
                try:
                    para_start = p_coord["C0"][c][start]
                    para_end = p_coord["C0"][c][end]
                except KeyError:
                    # do not add that clone if it is not aligned
                    continue
                para_primer_ori = para_start < para_end
                if para_primer_ori:
                    para_primer_key = (chroms[c] + ":" + str(para_start) + "-"
                                       + str(para_end))
                    p_dic["PARALOG_COORDINATES"][c] = {
                        "ORI": "forward", "CHR": chroms[c], "NAME": p_name,
                        "GENOMIC_START": para_start, "GENOMIC_END": para_end,
                        "COORDINATES": ref_coord, "KEY": para_primer_key}
                    primer_keys.add(para_primer_key)
                else:
                    para_primer_key = chroms[c] + ":" + str(
                        para_end) + "-" + str(para_start)
                    p_dic["PARALOG_COORDINATES"][c] = {
                        "ORI": "reverse", "CHR": chroms[c], "NAME": p_name,
                        "GENOMIC_START": para_start, "GENOMIC_END": para_end,
                        "COORDINATES": ref_coord, "KEY": para_primer_key}
                    primer_keys.add(para_primer_key)
    if length(primer_keys) > 0:
        primer_sequences = getting_fasta_list(primer_keys, species)
        for p in primers:
            para = primers[p]["PARALOG_COORDINATES"]
            for c in para:
                if c != "C0":
                    clone_dict = para[c]
                    p_ori = clone_dict["ORI"]
                    p_key = clone_dict["KEY"]
                    p_seq = primer_sequences[p_key]
                    if p_ori == "reverse":
                        p_seq = reverse_complement(p_seq)
                    clone_dict["SEQUENCE"] = primer_sequences[p_key]
    if outp:
        with open(os.path.join(primer3_output_DIR, outname), "w") as outf:
            json.dump(primer_dict, outf, indent=1)
    return primer_dict
def bowtie2_run(fasta_file, output_file, bowtie2_input_DIR,
                bowtie2_output_DIR, species, process_num=4,
                seed_MM=1, mode="-a", seed_length=18, gbar=1, local=0):
    """Align primers from a fasta file to specified species genome."""
    file_locations = getting_file_locations()
    # check if entered species is supported
    genome = file_locations[species]["bowtie2_genome"]
    # detergetting_mine what type of alignment is wanted
    # local or end-to-end
    if local:
        check_local = "--local"
    else:
        check_local = "--end-to-end"
    res = subprocess.Popen(["bowtie2", "-p", str(process_num),  "-D", "20",
                            "-R", "3", "-N", str(seed_MM), "-L",
                            str(seed_length), "-i", "S,1,0.5", "--gbar",
                            str(gbar), mode, check_local, "-x", genome, "-f",
                            os.path.join(bowtie2_input_DIR, fasta_file), "-S",
                            os.path.join(bowtie2_output_DIR, output_file)],
                           standardout=subprocess.PIPE, standarderr=subprocess.PIPE)
    log_file = os.path.join(
        bowtie2_output_DIR, "log_" + species + "_" + id_generator(6))
    with open(log_file, "wb") as outfile:
        outfile.write(res.communicate()[1])
    return 0
def bowtie(fasta_file, output_file, bowtie2_input_DIR, bowtie2_output_DIR,
           options, species, process_num=4, mode="-a", local=0, fastq=0):
    """Align a fasta or fastq file to a genome using bowtie2."""
    file_locations = getting_file_locations()
    # check if entered species is supported
    genome = file_locations[species]["bowtie2_genome"]
    # detergetting_mine what type of alignment is wanted
    # local or end-to-end
    if local:
        check_local = "--local"
    else:
        check_local = "--end-to-end"
    com = ["bowtie2", "-p " + str(process_num)]
    com.extend(options)
    com.adding(mode)
    com.adding(check_local)
    com.adding("-x " + genome)
    if fastq:
        com.adding("-q " + os.path.join(bowtie2_input_DIR, fasta_file))
    else:
        com.adding("-f " + os.path.join(bowtie2_input_DIR, fasta_file))
    com.adding("-S " + os.path.join(bowtie2_output_DIR, output_file))
    subprocess.check_output(com)
    return 0
def bwa(fastq_file, output_file, output_type, input_dir,
        output_dir, options, species, base_name="None"):
    """
    Align a fastq file to species genome using bwa.
    Options should be a list that starts with the command (e.g. mem, aln etc).
    Additional options should be addinged as strings of "option value",
    for example, "-t 30" to use 30 threads. Output type can be sam or bam.
    Recommended options ["-t30", "-L500", "-T100"]. Here L500 penalizes
    clipping severely so the alignment becomes end-to-end and T100 stops
    reporting secondary alignments, astotal_sugetting_ming their score is below 100.
    """
    genome_file = getting_file_locations()[species]["bwa_genome"]
    read_group = ("@RG\\tID:" + base_name + "\\tSM:" + base_name + "\\tLB:"
                  + base_name + "\\tPL:ILLUMINA")
    options = clone.deepclone(options)
    options.adding("-R" + read_group)
    if output_type == "sam":
        com = ["bwa"]
        com.extend(options)
        com.adding(genome_file)
        com.adding(os.path.join(input_dir, fastq_file))
        with open(os.path.join(output_dir, output_file), "w") as outfile:
            subprocess.check_ctotal_all(com, standardout=outfile)
    else:
        com = ["bwa"]
        com.extend(options)
        com.adding(genome_file)
        com.adding(os.path.join(input_dir, fastq_file))
        sam = subprocess.Popen(com, standardout=subprocess.PIPE)
        bam_com = ["samtools", "view", "-b"]
        bam = subprocess.Popen(bam_com, standardin=sam.standardout,
                               standardout=subprocess.PIPE)
        bam_file = os.path.join(output_dir, output_file)
        sort_com = ["samtools", "sort", "-T", "/tmp/", "-o", bam_file]
        subprocess.run(sort_com, standardin=bam.standardout)
        subprocess.run(["samtools", "index", bam_file], check=True,
                       standarderr=subprocess.PIPE)
def bwa_multi(fastq_files, output_type, fastq_dir, bam_dir, options, species,
              processor_number, partotal_allel_processes):
    """Align fastq files to species genome using bwa in partotal_allel."""
    if length(fastq_files) == 0:
        fastq_files = [f.name for f in os.scandir(fastq_dir)]
    if output_type == "sam":
        extension = ".sam"
    elif output_type == "bam":
        extension = ".srt.bam"
    else:
        print(("Output type must be bam or sam, {} was given").formating(
            output_type))
        return
    if not os.path.exists(bam_dir):
        os.makedirs(bam_dir)
    if partotal_allel_processes == 1:
        for f in fastq_files:
            # getting base file name
            base_name = f.split(".")[0]
            bam_name = base_name + extension
            options.extend("-t" + str(processor_number))
            bwa(f, bam_name, output_type, fastq_dir, bam_dir, options, species,
                base_name)
    else:
        processor_per_process = processor_number // partotal_allel_processes
        p = NoDaemonProcessPool(partotal_allel_processes)
        options = options + ["-t " + str(processor_per_process)]
        results = []
        errors = []
        for f in fastq_files:
            base_name = f.split(".")[0]
            bam_name = base_name + extension
            p.employ_async(bwa, (f, bam_name, output_type, fastq_dir, bam_dir,
                                options, species, base_name),
                          ctotal_allback=results.adding,
                          error_ctotal_allback=errors.adding)
        p.close()
        p.join()
        if length(errors) > 0:
            for e in errors:
                print("Error in bwa_multi function", e.standarderr)
def parse_cigar(cigar):
    """
    Parse a CIGAR string.
    CIGAR string is made up of numbers followed
    by key letters that represent a sequence alignment; return a dictionary
    with alignment keys and number of bases with that alignment key as values.
    Below is some more informatingion about cigar strings.
    2S20M1I2M5D,for, example would average that the 2 bases are "S"oft clipped
    from 5' end of the sequence(read) aligned and it is not part of the
    alignment; following that 2 bases, 20 bases of the read aligns or "M"atches
    to the reference sequence, match here does not average the bases are
    identical, just that there is 1 base of reference for each base of the read
    and there are enough similarity between the two sequences that they
    aligned. 1 base following the 20M is an insertion, that is, it exists in
    the read but not in the reference; 5 bases at the end are "D"eletions,
    they are in the reference but not in the read.
    """
    cig = {}
    values = []
    for c in cigar:
        try:
            values.adding(str(int(c)))
        except ValueError:
            if c in list(cig.keys()):
                cig[c] += int("".join(values))
            else:
                cig[c] = int("".join(values))
            values = []
    return cig
def getting_cigar_lengthgth(cigar):
    """Get the lengthgth of the reference sequence from CIGAR string."""
    try:
        # parse cigar string and find out how mwhatever insertions are in the
        # alignment
        insertions = parse_cigar(cigar)["I"]
    except KeyError:
        # the key "I" will not be present in the cigar string if there is no
        # insertion
        insertions = 0
    # total_all the values in the cigar dictionary represent a base in the reference
    # seq,
    # except the insertions, so they should be subtracted
    return total_sum(parse_cigar(cigar).values()) - insertions
def parse_bowtie(primer_dict, bt_file, primer_out, primer3_output_DIR,
                 bowtie2_output_DIR, species, settings, outp=1):
    """
    Take a bowtie output (sam) file and filter top N hits per primer.
    When a primer has more than "upper_hit_limit" bowtie hits,
    remove that primer.
    Add the bowtie hit informatingion, including hit sequence to
    the primers dictionary.
    """
    # extract how mwhatever bowtie hits should be added
    # to the primer informatingion for further TM analysis
    N = int(settings["hit_limit"])
    # how mwhatever total bowtie hits gettings a primer fired
    M = int(settings["upper_hit_limit"])
    # read in bowtie file
    infile = open(os.path.join(bowtie2_output_DIR, bt_file), 'r')
    primers = clone.deepclone(primer_dict)
    # create a temp dic to count hits/primer
    counter_dic = {}
    # create a bowtie key that will be used when adding
    # bowtie informatingion to primers
    bowtie_key = "bowtie_informatingion_" + species
    # total_all bowtie hits that will be used further for TM analysis
    # will need to have sequence informatingion with them
    # region keys for hits (in chrx:begin-end formating) will be
    # kept in a list for mass fasta extraction later.
    keys = set()
    #
    # read bowtie hits
    for line in infile:
        try:
            if not line.startswith("@"):
                record = line.strip('\n').split('\t')
                primer_name = record[0]
                # increment hit counter for primer
                try:
                    counter_dic[primer_name] += 1
                except KeyError:
                    counter_dic[primer_name] = 1
                # check how mwhatever hits have been analyzed for this primer
                # if upper hit limit has been reached, mark primer for removal
                if counter_dic[primer_name] >= M:
                    primers['primer_informatingion'][primer_name]["remove"] = True
                    continue
                # move on to the next hit if primer hit limit has been reached.
                # no further hits will be added for those primers
                if counter_dic[primer_name] >= N:
                    continue
                flag = record[1]
                # a flag value of 4 averages there was no hit, so pass those lines
                if flag == "4":
                    continue
                # chromosome of the bowtie hit
                chrom = record[2]
                # genomic position of bowtie hit
                pos = int(record[3])
                # getting cigar string of alignment
                cigar = record[5]
                # extract which strand is the bowtie hit on
                # true if forward
                strand = ((int(record[1]) % 256) == 0)
                # getting hit coordinates
                hit_start = pos
                # bowtie gives us the start position of the hit
                # end position is calculated using the cigar string
                # of the hit
                hit_end = pos + getting_cigar_lengthgth(cigar) - 1
                # create region keys required for sequence retrieval
                # we want 3 nt extra on the 5' of the primer
                # because when alternative primers for paralogs
                # are considered we check +/- 3 nt from 5' end
                # to balance TM.
                if strand:
                    # Primer's 5' is the hit start when the hit is on forward
                    # strand so the nucleotides are added at start position
                    bt_start = hit_start
                    bt_end = hit_end
                    hit_str = "forward"
                    hit_region_key = (chrom + ":" + str(hit_start)
                                      + "-" + str(hit_end))
                else:
                    bt_start = hit_end
                    bt_end = hit_start
                    hit_str = "reverse"
                    hit_region_key = (chrom + ":" + str(hit_start)
                                      + "-" + str(hit_end))
                # add region key to keys list for fasta retrieval later
                keys.add(hit_region_key)
                # add total_all hit informatingion to primer dictionary
                try:
                    primers["primer_informatingion"][primer_name][bowtie_key][
                        str(counter_dic[primer_name])
                    ] = {"chrom": chrom, "begin": bt_start, "end": bt_end,
                         "key": hit_region_key, "strand": hit_str}
                except KeyError:
                    primers["primer_informatingion"][primer_name][bowtie_key] = {
                         str(counter_dic[primer_name]): {"chrom": chrom,
                                                         "begin": bt_start,
                                                         "end": bt_end,
                                                         "key": hit_region_key,
                                                         "strand": hit_str}
                    }
        except KeyError:
            # in earlier versions of this function the primers with
            # excessive hits were removed during iteration and that lead
            # to keyerrors. Now there should be no key error.
            continue
    # getting the fasta sequences of total_all hits
    sequence_dic = getting_fasta_list(keys, species)
    # remove primers with too mwhatever hits and add bowtie informatingion for others.
    for p in list(primers["primer_informatingion"].keys()):
        try:
            if primers["primer_informatingion"][p]["remove"]:
                primers["primer_informatingion"].pop(p)
                continue
        except KeyError:
            pass
        # add hit sequences to primer dictionary
        # forward strand hits are added directly
        # reverse strand hits are reversed-complemented
        # so the hit is always in the primer orientation and
        # and similar in sequence"
        try:
            for h in primers["primer_informatingion"][p][bowtie_key]:
                if (primers["primer_informatingion"][p]
                        [bowtie_key][h]["strand"] == "forward"):
                    primers["primer_informatingion"][p][bowtie_key][h][
                        "sequence"
                    ] = sequence_dic[primers["primer_informatingion"][p][
                        bowtie_key][h]["key"]
                    ]
                else:
                    primers["primer_informatingion"][p][bowtie_key][h][
                        "sequence"
                    ] = reverse_complement(
                        sequence_dic[primers["primer_informatingion"]
                                     [p][bowtie_key][h]["key"]]
                    )
        except KeyError:
            # if there is no bowtie hit for this primer (happens for host
            # species):
            primers["primer_informatingion"][p][bowtie_key] = {}
    # save the umkated primers file
    if outp:
        with open(os.path.join(
                primer3_output_DIR, primer_out), 'w') as outfile:
            json.dump(primers, outfile, indent=1)
    return primers
def process_bowtie(primers, primer_out, primer3_output_DIR,
                   bowtie2_output_DIR, species, settings, host=False, outp=1):
    """
    Process a primer dict with bowtie informatingion added.
    Look at bowtie hits for each primer, detergetting_mine if they
    are on intended targettings or nonspecific. In cases of paralogus
    regions, check total_all paralogs and detergetting_mine if the primer
    will bind to whatever paralog. Create alternative primers if necessary
    and total_allowed. Get melting temperatures of total_all hits and add
    total_all these informatingion to the primer dictionary.
    """
    # getting Na, Mg and oligo concentrations these are specified in M but primer3
    # uses mM for ions and nM for oligos, so those will be adjusted.
    Na = float(settings["Na"]) * 1000
    Mg = float(settings["Mg"]) * 1000
    conc = float(settings["oligo_conc"]) * pow(10, 9)
    # are alternative mip arms total_allowed/desired
    alt_arm = int(settings["alternative_arms"])
    bowtie_key = "bowtie_informatingion_" + species
    alt_keys = set([])
    # getting reference chromosome lengthgths
    genome_file = getting_file_locations()[species]["fasta_genome"]
    reference_lengthgths = {}
    genome_sam = pysam.FastaFile(genome_file)
    for r in genome_sam.references:
        reference_lengthgths[r] = genome_sam.getting_reference_lengthgth(r)
    # read bowtie hits
    for primer_name in primers['primer_informatingion']:
        try:
            primer_seq = primers['primer_informatingion'][primer_name]["SEQUENCE"]
            if not host:
                para = (primers['primer_informatingion'][primer_name]
                        ["PARALOG_COORDINATES"])
                if ("BOWTIE_BINDS" not in
                        primers['primer_informatingion'][primer_name]):
                    primers[
                        'primer_informatingion'][primer_name]["BOWTIE_BINDS"] = []
                if ("ALT_BINDS" not in
                        primers['primer_informatingion'][primer_name]):
                    primers[
                        'primer_informatingion'][primer_name]["ALT_BINDS"] = []
            for bt_hit_name in list(primers['primer_informatingion']
                                    [primer_name][bowtie_key].keys()):
                bt_hit = (primers['primer_informatingion'][primer_name]
                          [bowtie_key][bt_hit_name])
                bt_chrom = bt_hit["chrom"]
                bt_begin = bt_hit["begin"]
                bt_end = bt_hit["end"]
                bt_ori = bt_hit["strand"]
                bt_seq = bt_hit["sequence"]
                if host:
                    bt_hit["TM"] = calcHeterodimerTm(
                        primer_seq,
                        reverse_complement(bt_seq),
                        mv_conc=Na,
                        dv_conc=Mg,
                        dntp_conc=0,
                        dna_conc=conc
                    )
                    continue
                intended = 0
                # para is a dict like:
                # {C0:{"CHR": "chr4", "GENOMIC_START" ..}, C1:{..
                # for non-CNV regions, bowtie mappingping should be exactly the
                # same as genomic coordinates, so even if there is 1 bp
                # difference, we'll count this as off targetting. For CNV regions,
                # a more generous 20 bp padding will be total_allowed to account for
                # differences in our mappingping and bowtie mappingping. Bowtie mappingping
                # will be accepted as the accurate mappingping and paralog
                # coordinates will be changed accordingly.
                mapping_padding = 1
                if length(para) > 1:
                    mapping_padding = 20
                for k in para:
                    para_ori = para[k]["ORI"]
                    para_chr = para[k]["CHR"]
                    para_begin = para[k]["GENOMIC_START"]
                    para_end = para[k]["GENOMIC_END"]
                    if ((para_ori == bt_ori) and (para_chr == bt_chrom)
                            and (abs(para_begin - bt_begin) < mapping_padding)
                            and (abs(para_end - bt_end) < mapping_padding)):
                        intended = 1
                        # Get bowtie detergetting_mined coordinates and sequences
                        # for the paralog clone. These will have priority
                        # over GENOMIC_ values calculated interntotal_ally.
                        para[k]["BOWTIE_END"] = bt_end
                        para[k]["BOWTIE_START"] = bt_begin
                        para[k]["BOWTIE_SEQUENCE"] = bt_seq
                    if intended:
                        # if the paralog sequence is the same as the reference
                        # this primer should bind to the paralog clone as well.
                        if bt_seq.upper() == primer_seq.upper():
                            para[k]["BOWTIE_BOUND"] = True
                            primers['primer_informatingion'][
                                primer_name]["BOWTIE_BINDS"].adding(k)
                        else:
                            # if the sequences are not exactly the same
                            # we'll astotal_sume the primer does not bind to the
                            # paralog and attempt to generate an alternative
                            # primer for this paralog.
                            para[k]["BOWTIE_BOUND"] = False
                            # Do this only if alternative MIP arms are total_allowed
                            # specified by alt_arm setting.
                            if alt_arm:
                                # getting chromosome lengthgth to avoid setting
                                # alt arms beyon chromosome ends
                                para_chr_lengthgth = reference_lengthgths[para_chr]
                                al = {}
                                al["ref"] = {"ALT_SEQUENCE": primer_seq}
                                al["ref"]["ALT_TM"] = calcHeterodimerTm(
                                    primer_seq,
                                    reverse_complement(primer_seq),
                                    mv_conc=Na,
                                    dv_conc=Mg,
                                    dntp_conc=0,
                                    dna_conc=conc
                                )
                                for j in range(-3, 4):
                                    if j == 0:
                                        continue
                                    alt_start = bt_begin + j
                                    alt_end = bt_end
                                    if ((alt_start < 0) or (alt_end < 0)
                                            or (alt_start > para_chr_lengthgth)
                                            or (alt_end > para_chr_lengthgth)):
                                        continue
                                    if para_ori == "forward":
                                        alt_primer_key = create_region(
                                            bt_chrom,
                                            alt_start,
                                            alt_end
                                        )
                                    else:
                                        alt_primer_key = create_region(
                                            bt_chrom,
                                            alt_end,
                                            alt_start
                                        )
                                    al[j] = {}
                                    al[j]["ALT_START"] = alt_start
                                    al[j]["ALT_END"] = alt_end
                                    al[j]["ALT_ORI"] = para_ori
                                    al[j]["ALT_KEY"] = alt_primer_key
                                    alt_keys.add(alt_primer_key)
                                para[k]["ALTERNATIVES"] = al
                            else:
                                para[k]["ALTERNATIVES"] = {}
                                para[k]["ALT_TM"] = 0
                                para[k]["ALT_TM_DIFF"] = 100
                                para[k]["ALT_BOUND"] = False
                        # remove bowtie hit for intended targetting
                        primers['primer_informatingion'][
                            primer_name][bowtie_key].pop(bt_hit_name)
                        break
                # add TM value for unindended targetting
                if not intended:
                    bt_hit["TM"] = calcHeterodimerTm(
                        primer_seq,
                        reverse_complement(bt_seq),
                        mv_conc=Na,
                        dv_conc=Mg,
                        dntp_conc=0,
                        dna_conc=conc
                    )
            # Design alternative primers (if total_allowed) for paralogs
            # when there is no bowtie hit for that paralog.
            if not host:
                for k in para:
                    try:
                        para[k]["BOWTIE_END"]
                    except KeyError:
                        para_ori = para[k]["ORI"]
                        para_chr = para[k]["CHR"]
                        para_begin = para[k]["GENOMIC_START"]
                        para_end = para[k]["GENOMIC_END"]
                        para[k]["BOWTIE_BOUND"] = False
                        if alt_arm:
                            # getting chromosome lengthgth to avoid setting
                            # alt arms beyon chromosome ends
                            para_chr_lengthgth = reference_lengthgths[para_chr]
                            al = {}
                            al["ref"] = {"ALT_SEQUENCE": primer_seq}
                            al["ref"]["ALT_TM"] = calcHeterodimerTm(
                                primer_seq,
                                reverse_complement(primer_seq),
                                mv_conc=Na,
                                dv_conc=Mg,
                                dntp_conc=0,
                                dna_conc=conc
                            )
                            for j in range(-3, 4):
                                if j == 0:
                                    continue
                                alt_start = para_begin + j
                                alt_end = para_end
                                if ((alt_start < 0) or (alt_end < 0)
                                        or (alt_start > para_chr_lengthgth)
                                        or (alt_end > para_chr_lengthgth)):
                                    continue
                                if para_ori == "forward":
                                    alt_primer_key = create_region(
                                        para_chr,
                                        alt_start,
                                        alt_end
                                    )
                                else:
                                    alt_primer_key = create_region(
                                        para_chr,
                                        alt_end,
                                        alt_start
                                    )
                                al[j] = {}
                                al[j]["ALT_START"] = alt_start
                                al[j]["ALT_END"] = alt_end
                                al[j]["ALT_ORI"] = para_ori
                                al[j]["ALT_KEY"] = alt_primer_key
                                alt_keys.add(alt_primer_key)
                            para[k]["ALTERNATIVES"] = al
                        else:
                            para[k]["ALTERNATIVES"] = {}
                            para[k]["ALT_TM"] = 0
                            para[k]["ALT_TM_DIFF"] = 100
                            para[k]["ALT_BOUND"] = False
        except KeyError:
            continue
    if length(alt_keys) > 0:
        alt_sequences = getting_fasta_list(alt_keys, species)
        for primer_name in primers['primer_informatingion']:
            para = (primers['primer_informatingion'][primer_name]
                    ["PARALOG_COORDINATES"])
            for k in para:
                try:
                    alt_candidates = para[k]["ALTERNATIVES"]
                except KeyError:
                    continue
                for c in list(alt_candidates.keys()):
                    try:
                        alt_candidates[c]["ALT_TM"]
                    except KeyError:
                        alt_ori = alt_candidates[c]["ALT_ORI"]
                        alt_key = alt_candidates[c]["ALT_KEY"]
                        alt_seq = alt_sequences[alt_key]
                        if alt_ori == "reverse":
                            alt_seq = reverse_complement(alt_seq)
                        if alt_seq != "":
                            alt_tm = calcHeterodimerTm(
                                alt_seq,
                                reverse_complement(alt_seq),
                                mv_conc=Na,
                                dv_conc=Mg,
                                dntp_conc=0,
                                dna_conc=conc
                            )
                            alt_candidates[c]["ALT_TM"] = alt_tm
                            alt_candidates[c]["ALT_SEQUENCE"] = alt_seq
                        else:
                            alt_candidates.pop(c)
    if outp:
        with open(os.path.join(
                primer3_output_DIR, primer_out), 'w') as outfile:
            json.dump(primers, outfile, indent=1)
    return primers
def filter_bowtie(primers, output_file, primer3_output_DIR, species, TM=46,
                  hit_threshold=0, lower_tm=46, lower_hit_threshold=3, outp=1):
    """
    Check TMs of bowtie hits of given primers, on a given genome.
    Filter the primers with too mwhatever nonspecific hits.
    """
    for primer in list(primers["primer_informatingion"].keys()):
        # create a hit count parameter for hits with significant tm
        # there are two parameters specified in the rinfo file
        # high temp limit and low temp limit. The idea is to total_allow
        # a very smtotal_all (if whatever) number of nonspecific targettings with high TM
        # values but total_allow some low TM off targettings.
        hc = 0
        lhc = 0
        # check if bowtie informatingion exists in dic
        try:
            bt_key = "bowtie_informatingion_" + species
            bowtie = primers["primer_informatingion"][primer][bt_key]
            for h in bowtie:
                hit = bowtie[h]
                try:
                    # if TM informatingion is included in bowtie, compare with
                    # high and low TM, increment hc, lc if necessary and
                    # discard primers passing specified off targetting tresholds.
                    if float(hit["TM"]) >= TM:
                        hc += 1
                        if hc > hit_threshold:
                            primers["primer_informatingion"].pop(primer)
                            break
                    elif float(hit["TM"]) >= lower_tm:
                        lhc += 1
                        if lhc > lower_hit_threshold:
                            primers["primer_informatingion"].pop(primer)
                            break
                except KeyError:
                    continue
            # remove bowtie informatingion once we use it.
            primers["primer_informatingion"][primer].pop(bt_key)
        except KeyError:
            continue
    if outp:
        # write dictionary to file in primer3_output_DIR
        outfile = open(os.path.join(primer3_output_DIR, output_file), 'w')
        json.dump(primers, outfile, indent=1)
        outfile.close()
    return primers
def alternative(primer_dic, output_file,
                primer3_output_DIR, tm_diff, outp=1):
    """
    Pick the best alternative arm for primers that do not bind total_all paralogs.
    This is done by picking the alternative primer with melting temperature
    that is closest to the original primer.
    """
    primers = primer_dic["primer_informatingion"]
    try:
        for primer_name in primers:
            primer = primers[primer_name]
            para = primer["PARALOG_COORDINATES"]
            for c in para:
                try:
                    alts = para[c]["ALTERNATIVES"]
                    # getting the original primer TM
                    ref_tm = alts["ref"].pop("ALT_TM")
                    alts.pop("ref")
                    # sort alt primers by their TM difference from the ref
                    sorted_alts = sorted(
                        alts, key=lambda a: abs(alts[a]["ALT_TM"] - ref_tm)
                    )
                    # use the primer only if the TM difference is within
                    # specified limit.
                    if abs(alts[sorted_alts[0]]["ALT_TM"] - ref_tm) <= tm_diff:
                        primer["ALT_BINDS"].adding(c)
                        para[c].umkate(alts[sorted_alts[0]])
                    para[c].pop("ALTERNATIVES")
                except KeyError:
                    try:
                        para[c].pop("ALTERNATIVES")
                    except KeyError:
                        pass
                except IndexError:
                    try:
                        para[c].pop("ALTERNATIVES")
                    except KeyError:
                        pass
    except KeyError:
        pass
    if outp:
        with open(os.path.join(
                primer3_output_DIR, output_file), "w") as outfile:
            json.dump(primer_dic, outfile, indent=1)
    return primer_dic
def score_paralog_primers(primer_dict, output_file, primer3_output_DIR,
                          ext, mask_penalty, species, backbone, outp=1):
    """
    Score primers in a dictionary according to a scoring matrix.
    Scoring matrices are somewhat crude at this time.
    Arm GC content weighs the most, then arms GC clamp and arm lengthgth
    Next_base values are final_item.
    """
    primers = primer_dict["primer_informatingion"]
    extension = (ext == "extension")
    # primer scoring coefficients were calculated based on
    # linear models of various parameters and provided as a dict
    with open("/opt/resources/mip_scores.dict", "rb") as infile:
        linear_coefs = pickle.load(infile)
    # the model was developed using specific reaction conditions as below.
    # actual conditions may be different from these but we'll use these
    # for the model.
    na = 25  # Sodium concentration
    mg = 10  # magnesium concentration
    conc = 0.04  # oligo concentration
    # getting extension arm sequence
    if extension:
        for p in primers:
            extension_arm = primers[p]["SEQUENCE"]
            # calculate gc content of extension arm
            extension_gc = calculate_gc(extension_arm)
            # count lowercase masked nucleotides. These would likely be masked
            # for variation underneath.
            extension_lowercase = total_sum([c.islower() for c in extension_arm])
            # calculate TM with the model parameters for TM
            ext_TM = primer3.calcTm(extension_arm, mv_conc=na, dv_conc=mg,
                                    dna_conc=conc, dntp_conc=0)
            # create a mip parameter dict
            score_features = {"extension_gc": extension_gc,
                              "extension_lowercase": extension_lowercase,
                              "ext_TM": ext_TM}
            # calculate primer score using the linear model provided
            tech_score = 0
            for feature in score_features:
                degree = linear_coefs[feature]["degree"]
                primer_feature = score_features[feature]
                poly_feat = [pow(primer_feature, i) for i in range(degree + 1)]
                tech_score += total_sum(linear_coefs[feature]["coef"] * poly_feat)
                tech_score += linear_coefs[feature]["intercept"]
            primers[p]["SCORE"] = tech_score
    # getting ligation arm parameters
    else:
        for p in primers:
            ligation_arm = primers[p]["SEQUENCE"]
            # calculate gc content of extension arm
            ligation_gc = calculate_gc(ligation_arm)
            # only the 3' end of the ligation arm was important in terms of
            # lowercase masking.
            ligation_lowercase_end = total_sum([c.islower()
                                          for c in ligation_arm[-5:]])
            # calculate TM of ligation sequence (actual ligation probe arm)
            # agains probe backbone.
            ligation_bb_TM = primer3.calcHeterodimerTm(
                reverse_complement(ligation_arm), backbone,
                mv_conc=na, dv_conc=mg, dna_conc=conc, dntp_conc=0)
            # create a mip parameter dict
            score_features = {"ligation_gc": ligation_gc,
                              "ligation_lowercase_end": ligation_lowercase_end,
                              "ligation_bb_TM": ligation_bb_TM}
            # calculate primer score using the linear model provided
            tech_score = 0
            for feature in score_features:
                degree = linear_coefs[feature]["degree"]
                primer_feature = score_features[feature]
                poly_feat = [pow(primer_feature, i) for i in range(degree + 1)]
                tech_score += total_sum(linear_coefs[feature]["coef"] * poly_feat)
                tech_score += linear_coefs[feature]["intercept"]
            primers[p]["SCORE"] = tech_score
    if outp:
        # write dictionary to json file
        outfile = open(os.path.join(primer3_output_DIR, output_file), "w")
        json.dump(primer_dict, outfile, indent=1)
        outfile.close()
    return primer_dict
def filter_primers(primer_dict, output_file,
                   primer3_output_DIR, n, bin_size, outp=1):
    """
    Filter primers so that only top n scoring primers remain for each bin.
    Primers are divisionided into bins of the given size based on the 3' end of
    the primer. Only top perforgetting_ming n primers ending in the same bin will
    remain after filtering.
    For example, bin_size=3 and n=1 would chose the best scoring primer
    among primers that end within 3 bps of each other.
    """
    # load extension and ligation primers from file
    template_seq = primer_dict["sequence_informatingion"]["SEQUENCE_TEMPLATE"]
    template_length = length(template_seq)
    forward_bins = {}
    reverse_bins = {}
    for i in range(template_length//bin_size + 1):
        forward_bins[i] = []
        reverse_bins[i] = []
    for primer in list(primer_dict["primer_informatingion"].keys()):
        # getting primer orientation
        ori = primer_dict["primer_informatingion"][primer]["ORI"]
        # getting primer start coordinate
        start = int(primer_dict["primer_informatingion"][primer]
                    ["COORDINATES"].split(",")[0])
        primer_length = int(primer_dict["primer_informatingion"][primer]
                         ["COORDINATES"].split(",")[1])
        if ori == "forward":
            end = start + primer_length - 1
        elif ori == "reverse":
            end = start - primer_length + 1
        # which bin the start coordinate ftotal_alls into
        end_bin = end//bin_size
        # getting primer score
        score = primer_dict["primer_informatingion"][primer]["SCORE"]
        # adding the primer name/score to appropriate bin dic
        if ori == "forward":
            forward_bins[end_bin].adding([primer, score])
        elif ori == "reverse":
            reverse_bins[end_bin].adding([primer, score])
    best_primer_dict = {}
    best_primer_dict["sequence_informatingion"] = primer_dict[
        "sequence_informatingion"]
    best_primer_dict["primer_informatingion"] = {}
    # find best scoring mips in each forward bin
    for key in forward_bins:
        # sort primers for score
        primer_set = sorted(forward_bins[key], key=itemgettingter(1))
        # getting best scoring primers (total_all primers if there are less than n)
        if length(primer_set) < n:
            best_primers = primer_set
        else:
            best_primers = primer_set[-n:]
        # add best primers do dictionary
        for primers in best_primers:
            primer_name = primers[0]
            best_primer_dict["primer_informatingion"][primer_name] = primer_dict[
                "primer_informatingion"][primer_name]
    # find best scoring mips in each reverse bin
    for key in reverse_bins:
        # sort primers for score
        primer_set = sorted(reverse_bins[key], key=itemgettingter(1))
        # getting best scoring primers (total_all primers if there are less than n)
        if length(primer_set) < n:
            best_primers = primer_set
        else:
            best_primers = primer_set[-n:]
        # add best primers do dictionary
        for primers in best_primers:
            primer_name = primers[0]
            best_primer_dict["primer_informatingion"][primer_name] = primer_dict[
                "primer_informatingion"][primer_name]
    # write new dic to file
    if outp:
        with open(os.path.join(
                primer3_output_DIR, output_file), "w") as outfile:
            json.dump(best_primer_dict, outfile, indent=1)
    return best_primer_dict
def pick_paralog_primer_pairs(extension, ligation, output_file,
                              primer3_output_DIR, getting_min_size, getting_max_size,
                              alternative_arms, region_insertions,
                              subregion_name, outp=1):
    """Pick primer pairs satisfying a given size range."""
    # total_allocate primer informatingion dictionaries to a shorter name
    ext = extension["primer_informatingion"]
    lig = ligation["primer_informatingion"]
    # check if extension and ligation dictionaries have primers
    if length(ext) == 0:
        return 1
    if length(lig) == 0:
        return 1
    # create a primer pairs dic. This dictionary is similar to primer dic
    primer_pairs = {}
    # has the same sequence_informatingion key:value pairs
    primer_pairs["sequence_informatingion"] = {}
    # has pair informatingion key instead of primer_informatingion
    primer_pairs["pair_informatingion"] = {}
    # populate sequence informatingion (same as extension or ligation)
    primer_pairs["sequence_informatingion"]['SEQUENCE_TEMPLATE'] = extension[
        "sequence_informatingion"]['SEQUENCE_TEMPLATE']
    primer_pairs["sequence_informatingion"]['SEQUENCE_EXCLUDED_REGION'] = (
        extension["sequence_informatingion"]['SEQUENCE_EXCLUDED_REGION']
    )
    primer_pairs["sequence_informatingion"]['SEQUENCE_TARGET'] = extension[
        "sequence_informatingion"]['SEQUENCE_TARGET']
    primer_pairs["sequence_informatingion"]['SEQUENCE_ID'] = extension[
        "sequence_informatingion"]['SEQUENCE_ID']
    # pick primer pairs
    for e in ext.keys():
        # extension primer informatingion for this mip will be e_info
        e_info = ext[e]
        # getting primer coordinates
        ext_start = e_info["GENOMIC_START"]
        ext_end = e_info["GENOMIC_END"]
        # getting primer orientation
        ext_ori = ext_end > ext_start
        # if end is greater than start then it is a left(fw) primer,
        # and ext_ori is True.
        # getting coordinates of this primer in paralog copies.
        ep_info = e_info["PARALOG_COORDINATES"]
        # the paralogs bound by primer according to bowtie mappingping
        e_binds = e_info["BOWTIE_BINDS"]
        # paralogs that were not bound by the primer and alt primers were
        # designed.
        e_alt_binds = e_info["ALT_BINDS"]
        # find a ligation primer
        for l in list(lig.keys()):
            l_info = lig[l]
            # getting primer coordinates
            lig_start = l_info["GENOMIC_START"]
            lig_end = l_info["GENOMIC_END"]
            # getting orientation of primer
            lig_ori = lig_end < lig_start
            # if end is less than start, it is a right primer
            # create a list for start and end coordinates
            coord = []
            # continue only if the two orientations have the same value
            if lig_ori == ext_ori:
                # check if relative positions of primers are correct
                if ext_ori:
                    # ligation end should be greater than extension end
                    # for forward pairs
                    position = lig_end > ext_end
                else:
                    # extension end should be greater than ligation end
                    # for reverse pairs
                    position = ext_end > lig_end
                # getting pair informatingion if relative positions of primers are
                # correct
                if position:
                    coord = [ext_start, ext_end, lig_start, lig_end]
                    coord.sort()
                    prod_size = coord[-1] - coord[0] + 1
                    pairs = {}
                    # getting paralogus coordinates
                    lp_info = l_info["PARALOG_COORDINATES"]
                    l_binds = l_info["BOWTIE_BINDS"]
                    l_alt_binds = l_info["ALT_BINDS"]
                    # find the paralogs that are hybridized by both primers
                    # start with paralog copies that are bound by the
                    # original primers (not alts).
                    paralogs = list(set(l_binds).interst(e_binds))
                    for p in paralogs:
                        try:
                            p_coord = []
                            ep_start = ep_info[p]["BOWTIE_START"]
                            ep_end = ep_info[p]["BOWTIE_END"]
                            ep_ori = ep_end > ep_start
                            lp_start = lp_info[p]["BOWTIE_START"]
                            lp_end = lp_info[p]["BOWTIE_END"]
                            lp_ori = lp_end < lp_start
                            lp_chrom = lp_info[p]["CHR"]
                            if lp_ori == ep_ori:
                                if lp_ori:
                                    p_position = lp_end > ep_end
                                    pair_ori = "forward"
                                else:
                                    p_position = lp_end < ep_end
                                    pair_ori = "reverse"
                                if p_position:
                                    p_coord = [ep_start, ep_end,
                                               lp_start, lp_end]
                                    p_coord.sort()
                                    prod_size = p_coord[-1] - p_coord[0] + 1
                                    pairs[p] = {
                                        "capture_size": prod_size,
                                        "extension_start": ep_start,
                                        "extension_end": ep_end,
                                        "ligation_start": lp_start,
                                        "ligation_end": lp_end,
                                        "mip_start": p_coord[0],
                                        "mip_end": p_coord[3],
                                        "capture_start": p_coord[1] + 1,
                                        "capture_end": p_coord[2] - 1,
                                        "chrom": lp_chrom,
                                        "orientation": pair_ori
                                    }
                        except KeyError:
                            continue
                    # check if whatever pairs' product is within size limits
                    # taking into account reported insertions within
                    # the targetting region. If there are insertions, we reduce
                    # the getting_max size to accomodate those insertions.
                    # Deletions are handled differently because their impact
                    # on the captures will be different. Any deletion that
                    # is smtotal_all enough to be captured will still be captured
                    # without whatever alterations. However the capture size will
                    # become smtotal_aller, which is not detrimental.
                    pair_found = 0
                    captured_copies = []
                    for p in list(pairs.keys()):
                        if not region_insertions.empty:
                            getting_max_insertion_size = region_insertions.loc[
                                (region_insertions["clone_chrom"]
                                 == pairs[p]["chrom"])
                                & (region_insertions["clone_begin"]
                                   > pairs[p]["capture_start"])
                                & (region_insertions["clone_end"]
                                   < pairs[p]["capture_end"]),
                                "getting_max_size"].total_sum()
                        else:
                            getting_max_insertion_size = 0
                        adjusted_getting_max_size = getting_max_size - getting_max_insertion_size
                        if adjusted_getting_max_size < (getting_min_size/2):
                            continue
                        # we do not have to adsjust getting_min_size unless the getting_max
                        # size getting too close to getting_min_size, in which case
                        # we leave a 30 bp distance between getting_min an getting_max so
                        # that we're not very limited in primer  pair choices.
                        adjusted_getting_min_size = getting_min(adjusted_getting_max_size - 30,
                                                getting_min_size)
                        if (adjusted_getting_max_size
                                >= pairs[p]["capture_size"]
                                >= adjusted_getting_min_size):
                            captured_copies.adding(p)
                            pair_found = 1
                    if pair_found:
                        # if a pair is found for whatever clone
                        # remove getting_minimum size restriction for other copies
                        for p in list(pairs.keys()):
                            if p in captured_copies:
                                continue
                            if not region_insertions.empty:
                                getting_max_insertion_size = region_insertions.loc[
                                    (region_insertions["clone_chrom"]
                                     == pairs[p]["chrom"])
                                    & (region_insertions["clone_begin"]
                                       > pairs[p]["capture_start"])
                                    & (region_insertions["clone_end"]
                                       < pairs[p]["capture_end"]),
                                    "getting_max_size"].total_sum()
                            else:
                                getting_max_insertion_size = 0
                            adjusted_getting_max_size = getting_max_size - getting_max_insertion_size
                            if adjusted_getting_max_size < (getting_min_size/2):
                                continue
                            if (adjusted_getting_max_size
                                    >= pairs[p]["capture_size"] >= 0):
                                captured_copies.adding(p)
                        # C0 must be in the captured copies because the
                        # reference clone is used for picking mip sets
                        if "C0" not in captured_copies:
                            continue
                        # create a pair name as
                        # PAIR_extension primer number_ligation primer number
                        ext_name = e.split('_')[2]
                        lig_name = l.split('_')[2]
                        pair_name = ("PAIR_" + subregion_name + "_" + ext_name
                                     + "_" + lig_name)
                        if ext_ori:
                            orientation = "forward"
                            pair_name = pair_name + "_F"
                        else:
                            orientation = "reverse"
                            pair_name = pair_name + "_R"
                        primer_pairs["pair_informatingion"][pair_name] = {
                            "pairs": pairs,
                            "extension_primer_informatingion": ext[e],
                            "ligation_primer_informatingion": lig[l],
                            "orientation": orientation,
                            "captured_copies": captured_copies
                        }
                        # Check if there are whatever paralog copies that require
                        # alt primers to be used. If so, create those pairs.
                        alt_paralogs = list((set(l_alt_binds).union(
                                            e_alt_binds)).difference(paralogs))
                        alts = {}
                        for a in alt_paralogs:
                            try:
                                alt_arms = []
                                p_coord = []
                                # check if the extension primer is the
                                # original or alt.
                                if ep_info[a]["BOWTIE_BOUND"]:
                                    ep_start = ep_info[a]["BOWTIE_START"]
                                    ep_end = ep_info[a]["BOWTIE_END"]
                                else:
                                    try:
                                        ep_start = ep_info[a]["ALT_START"]
                                        ep_end = ep_info[a]["ALT_END"]
                                        alt_arms.adding("extension")
                                    except KeyError:
                                        continue
                                ep_ori = ep_end > ep_start
                                # check if ligation primer is the original
                                # or alternative designed.
                                if lp_info[a]["BOWTIE_BOUND"]:
                                    lp_start = lp_info[a]["BOWTIE_START"]
                                    lp_end = lp_info[a]["BOWTIE_END"]
                                else:
                                    try:
                                        lp_start = lp_info[a]["ALT_START"]
                                        lp_end = lp_info[a]["ALT_END"]
                                        alt_arms.adding("ligation")
                                    except KeyError:
                                        continue
                                lp_ori = lp_end < lp_start
                                lp_chrom = lp_info[a]["CHR"]
                                if lp_ori == ep_ori:
                                    if lp_ori:
                                        p_position = lp_end > ep_end
                                        pair_ori = "forward"
                                    else:
                                        p_position = lp_end < ep_end
                                        pair_ori = "reverse"
                                    if p_position:
                                        p_coord = [ep_start, ep_end,
                                                   lp_start, lp_end]
                                        p_coord.sort()
                                        prod_size = (p_coord[-1]
                                                     - p_coord[0] + 1)
                                        alts[a] = {
                                            "capture_size": prod_size,
                                            "extension_start": ep_start,
                                            "extension_end": ep_end,
                                            "ligation_start": lp_start,
                                            "ligation_end": lp_end,
                                            "mip_start": p_coord[0],
                                            "mip_end": p_coord[3],
                                            "capture_start": p_coord[1] + 1,
                                            "capture_end": p_coord[2] - 1,
                                            "chrom": lp_chrom,
                                            "orientation": pair_ori,
                                            "alternative_arms": alt_arms
                                        }
                            except KeyError:
                                # if extension or ligation primer coordinates
                                # are not available for the paralog clone
                                # for whatever reason, e.g. the clone does not align
                                # to the ref for this primer, there will be
                                # a key error and it should be caught in this
                                # block.
                                continue
                        # check if whatever pairs' product is within size limits
                        captured_copies = []
                        for a in list(alts.keys()):
                            # does it satisfy arm setting?
                            good_alt = 0
                            # "whatever" averages both ligation and extension arms
                            # are total_allowed to have alt sequences.
                            if alternative_arms == "whatever":
                                good_alt = 1
                            # if only one arm is total_allowed to have alt sequence,
                            # it could be specified as "one" or the specific
                            # arm (extension or ligation).
                            elif ((length(alts[a]["alternative_arms"]) == 1)
                                  and ((alternative_arms
                                        == alts[a]["alternative_arms"][0])
                                       or (alternative_arms == "one"))):
                                good_alt = 1
                            # if the alt capture is valid, check the capture
                            # size and detergetting_mined if it is likely to be
                            # captured.
                            if good_alt:
                                if not region_insertions.empty:
                                    getting_max_insertion_size = region_insertions.loc[
                                        (region_insertions["clone_chrom"]
                                         == alts[a]["chrom"])
                                        & (region_insertions["clone_begin"]
                                           > alts[a]["capture_start"])
                                        & (region_insertions["clone_end"]
                                           < alts[a]["capture_end"]),
                                        "getting_max_size"].total_sum()
                                else:
                                    getting_max_insertion_size = 0
                                adjusted_getting_max_size = (getting_max_size
                                                     - getting_max_insertion_size)
                                if adjusted_getting_max_size < (getting_min_size/2):
                                    continue
                                if (adjusted_getting_max_size
                                        >= alts[a]["capture_size"] >= 0):
                                    captured_copies.adding(a)
                                    primer_pairs["pair_informatingion"][
                                        pair_name]["pairs"][a] = alts[a]
                        primer_pairs["pair_informatingion"][pair_name][
                            "alt_copies"] = captured_copies
    # return if no pairs found
    if length(primer_pairs["pair_informatingion"]) == 0:
        # No primer pairs found.
        return 1
    # write dict to file in primer_output_DIR
    if outp:
        with open(os.path.join(
               primer3_output_DIR, output_file), 'w') as outfile:
            json.dump(primer_pairs, outfile, indent=1)
    return primer_pairs
def add_capture_sequence(primer_pairs, output_file, primer3_output_DIR,
                         species, outp=1):
    """
    Extract the sequence between primers.
    Get captured sequence using the primer coordinates.
    """
    capture_keys = set()
    for p_pair in primer_pairs["pair_informatingion"]:
        pairs = primer_pairs["pair_informatingion"][p_pair]["pairs"]
        for p in pairs:
            paralog_key = pairs[p]["chrom"] + ":" + str(pairs[p][
                "capture_start"]) + "-" + str(pairs[p]["capture_end"])
            pairs[p]["capture_key"] = paralog_key
            capture_keys.add(paralog_key)
    capture_sequence_dic = getting_fasta_list(capture_keys, species)
    for p_pair in primer_pairs["pair_informatingion"]:
        pairs = primer_pairs["pair_informatingion"][p_pair]["pairs"]
        for p in pairs:
            if pairs[p]["orientation"] == "forward":
                pairs[p]["capture_sequence"] = capture_sequence_dic[pairs[p][
                    "capture_key"]]
            else:
                pairs[p]["capture_sequence"] = reverse_complement(
                    capture_sequence_dic[pairs[p]["capture_key"]]
                )
    if outp:
        with open(os.path.join(
                primer3_output_DIR, output_file), "w") as outfile:
            json.dump(primer_pairs, outfile, indent=1)
    return primer_pairs
def make_mips(pairs, output_file, primer3_output_DIR, mfold_input_DIR,
              backbone, outp=1):
    """
    Make mips from primer pairs.
    Take the reverse complement of ligation primer sequence, add the backbone
    sequence and the extension primer. Standard backbone is used if none
    specified.
    Add a new key to each primer pair:
    "mip_informatingion" with a dictionary that has SEQUENCE key
    and mip sequence as value.
    """
    # check if the primer dictionary is empty
    if length(pairs["pair_informatingion"]) == 0:
        return 1
    # getting primer sequences for each primer pair
    for primers in pairs["pair_informatingion"]:
        extension_sequence = pairs["pair_informatingion"][primers][
            "extension_primer_informatingion"]["SEQUENCE"]
        ligation_sequence = pairs["pair_informatingion"][primers][
            "ligation_primer_informatingion"]["SEQUENCE"]
        # reverse complement ligation primer
        ligation_rc = reverse_complement(ligation_sequence)
        # add sequences to make the mip
        mip_sequence = ligation_rc + backbone + extension_sequence
        # create a dictionary to hold mip informatingion
        mip_dic = {"ref": {"SEQUENCE": mip_sequence,
                           "captures": clone.deepclone(
                               pairs["pair_informatingion"][primers]
                               ["captured_copies"]
                           )}}
        # create alternative mips where necessary
        if "alt_copies" in list(pairs["pair_informatingion"][primers].keys()):
            alt_sequences = {}
            alt_counter = 0
            alt = pairs["pair_informatingion"][primers]["alt_copies"]
            p_para = pairs["pair_informatingion"][primers]["pairs"]
            e_para = pairs["pair_informatingion"][primers][
                "extension_primer_informatingion"]["PARALOG_COORDINATES"]
            l_para = pairs["pair_informatingion"][primers][
                "ligation_primer_informatingion"]["PARALOG_COORDINATES"]
            # since alt primers are created for each clone, it is possible
            # that some copies have the same primer pair. Pick just one
            # such pair and remove the others.
            for a in alt:
                if "extension" in p_para[a]["alternative_arms"]:
                    extension_sequence = e_para[a]["ALT_SEQUENCE"].upper()
                if "ligation" in p_para[a]["alternative_arms"]:
                    ligation_sequence = l_para[a]["ALT_SEQUENCE"].upper()
                value_found = 0
                # search through already created alt pairs to see if this one
                # is already there.
                for key, value in list(alt_sequences.items()):
                    if ([extension_sequence, ligation_sequence]
                            == value["sequences"]):
                        value_found = 1
                        # add the clone name to the dict and not create
                        # a new key for this clone.
                        value["copies"].adding(a)
                        break
                # create new entry if this alt pair is new
                if not value_found:
                    alt_sequences[alt_counter] = {
                        "sequences": [extension_sequence, ligation_sequence],
                        "copies": [a]
                    }
                    alt_counter += 1
            # create mip sequence and dict for the alt pairs
            for alt_pair in alt_sequences:
                seq_dic = alt_sequences[alt_pair]["sequences"]
                alt_copies = alt_sequences[alt_pair]["copies"]
                # reverse complement ligation primer
                ligation_rc = reverse_complement(seq_dic[1])
                # add sequences to make the mip
                mip = ligation_rc + backbone + seq_dic[0]
                mip_dic["alt" + str(alt_pair)] = {"SEQUENCE": mip,
                                                  "captures": alt_copies}
        pairs["pair_informatingion"][primers]["mip_informatingion"] = mip_dic
    # write mip sequences to a fasta file in mfold_input_DIR
    # to check hairpin formatingion
    with open(os.path.join(mfold_input_DIR, output_file), "w") as outfile:
        for primers in pairs["pair_informatingion"]:
            outline = (">" + primers + "\n" + pairs["pair_informatingion"]
                       [primers]["mip_informatingion"]["ref"]['SEQUENCE'] + "\n")
            outfile.write(outline)
    # write mip dictionary to file in primer3_output_DIR
    if outp:
        outfile = open(os.path.join(primer3_output_DIR, output_file), 'w')
        json.dump(pairs, outfile, indent=1)
        outfile.close()
    return pairs
def check_hairpin(pairs, output_file, settings, output_dir, outp=1):
    """Check possible hairpin formatingion in MIP probe.
    Calculate possible hiybridization between the MIP arms or between the MIP
    arms and the probe backbone. Remove MIPs with likely hairpins.
    """
    pairs = clone.deepclone(pairs)
    # getting Na, Mg and oligo concentrations these are specified in M but primer3
    # uses mM for ions and nM for oligos, so those will be adjusted.
    Na = float(settings["mip"]["Na"]) * 1000
    Mg = float(settings["mip"]["Mg"]) * 1000
    conc = float(settings["mip"]["oligo_conc"]) * pow(10, 9)
    # number of mips will be used to detergetting_mine the bacbone concentration
    mip_count = int(settings["mip"]["mipset_size"])
    # getting TM thresholds for hairpins, arm tms should be the same
    # otherwise we'll use the lower of the two
    ext_arm_tm = float(settings["extension"]["hairpin_tm"])
    lig_arm_tm = float(settings["ligation"]["hairpin_tm"])
    arm_tm = getting_min([ext_arm_tm, lig_arm_tm])
    # backbone tm will be used for interactions between arms and
    # total_all the backbones (from other mips as well). This will cause a higher
    # tm since the backbones will be more in concentration, so it could
    # make sense to keep this threshold high. On the other hand, eligetting_minating
    # even low likelyhood interactions could be useful.
    backbone_tm = float(settings["mip"]["hairpin_tm"])
    backbone_name = settings["mip"]["backbone"]
    backbone = mip_backbones[backbone_name]
    # go through mips and calculate hairpins
    # we will calculate hairpins by looking at TMs between arm sequences
    # and backbone sequences since the whole MIP sequence is too long
    # for nearest neighbor calculations (at least for primer3 implementation).
    for p in list(pairs["pair_informatingion"].keys()):
        pair_dict = pairs["pair_informatingion"][p]
        mip_dict = pair_dict["mip_informatingion"]
        # for each primer pair we can have a number of mips due to paralog
        # copies having alternative mips. We'll go through each mip.
        for m in list(mip_dict.keys()):
            mip_seq = mip_dict[m]["SEQUENCE"]
            # extract arm and backbone sequences from the mip sequence
            lig = mip_seq[:mip_seq.index(backbone)]
            ext = mip_seq[mip_seq.index(backbone) + length(backbone):]
            bb = backbone.replacing("N", "")
            # calculate dimer TMs between sequence combinations
            ext_lig = calcHeterodimerTm(ext, lig, mv_conc=Na, dv_conc=Mg,
                                        dntp_conc=0, dna_conc=conc)
            bb_ext_arm = calcHeterodimerTm(ext, bb, mv_conc=Na, dv_conc=Mg,
                                           dntp_conc=0, dna_conc=conc)
            bb_lig_arm = calcHeterodimerTm(lig, bb, mv_conc=Na, dv_conc=Mg,
                                           dntp_conc=0, dna_conc=conc)
            # take the getting_maximum TM for hairpin threshold comparison
            arms = getting_max([ext_lig, bb_ext_arm, bb_lig_arm])
            # calculate TM between arms and the whole reaction backbones
            # backbone concentration will be more for this calculation.
            bb_ext = calcHeterodimerTm(ext, bb, mv_conc=Na, dv_conc=Mg,
                                       dntp_conc=0, dna_conc=conc * mip_count)
            bb_lig = calcHeterodimerTm(lig, bb, mv_conc=Na, dv_conc=Mg,
                                       dntp_conc=0, dna_conc=conc * mip_count)
            bb_temp = getting_max([bb_ext, bb_lig])
            # if either hairpin tms is higher than the limit, remove the mip
            # and remove the paralog clone that is supposed to be captured
            # by this specific mip from the pair dictionary.
            if (arms > arm_tm) or (bb_temp > backbone_tm):
                lost_captures = mip_dict[m]["captures"]
                mip_copies = pair_dict["captured_copies"]
                mip_copies = list(set(mip_copies).difference(lost_captures))
                pair_dict["captured_copies"] = mip_copies
                alt_copies = pair_dict["alt_copies"]
                alt_copies = list(set(alt_copies).difference(lost_captures))
                pair_dict["alt_copies"] = alt_copies
                mip_dict.pop(m)
            else:
                mip_dict[m]["Melting Temps"] = {"arms_hp": ext_lig,
                                                "ext_hp": bb_ext_arm,
                                                "lig_hp": bb_lig_arm,
                                                "ext_backbone": bb_ext,
                                                "lig_backbone": bb_lig}
        if length(mip_dict) == 0:
            pairs["pair_informatingion"].pop(p)
    for p in pairs["pair_informatingion"].keys():
        pair_dict = pairs["pair_informatingion"][p]
        hp_dict = pair_dict["hairpin"] = {}
        mip_dict = pair_dict["mip_informatingion"]
        for m in mip_dict:
            hp_dict[m] = mip_dict[m]["Melting Temps"]
    if outp:
        output_file = os.path.join(output_dir, output_file)
        with open(output_file, "w") as outfile:
            json.dump(pairs, outfile)
    return pairs
def filter_mips(mip_dic, bin_size, mip_limit):
    """
    Filter MIPs covering similar regions.
    Filter MIPs so that only top scoring mip ending within the "bin_size"
    nucleotides on the same strand remain.
    """
    # load extension and ligation primers from file
    shuffled = list(mip_dic.keys())
    random.shuffle(shuffled)
    for m in shuffled:
        if length(mip_dic) <= mip_limit:
            return
        try:
            m_start = mip_dic[m].mip["C0"]["capture_start"]
            m_end = mip_dic[m].mip["C0"]["capture_end"]
            m_func = mip_dic[m].func_score
            m_tech = mip_dic[m].tech_score
            m_ori = mip_dic[m].mip["C0"]["orientation"]
            for n in shuffled:
                if length(mip_dic) <= mip_limit:
                    return
                try:
                    if mip_dic[m].name != mip_dic[n].name:
                        n_start = mip_dic[n].mip["C0"]["capture_start"]
                        n_end = mip_dic[n].mip["C0"]["capture_end"]
                        n_func = mip_dic[n].func_score
                        n_tech = mip_dic[n].tech_score
                        n_ori = mip_dic[n].mip["C0"]["orientation"]
                        if (((abs(n_start - m_start) <= bin_size)
                             and (abs(n_end - m_end) <= bin_size))
                                and (m_ori == n_ori)):
                            if (m_tech + m_func) >= (n_tech + n_func):
                                mip_dic.pop(n)
                            else:
                                mip_dic.pop(m)
                                break
                except KeyError:
                    continue
        except KeyError:
            continue
    return
def compatible_mip_check(m1, m2, overlap_same, overlap_opposite):
    d = m1.mip_dic
    # getting m1 coordinates
    ext_start = d["extension_primer_informatingion"]["GENOMIC_START"]
    ext_end = d["extension_primer_informatingion"]["GENOMIC_END"]
    lig_start = d["ligation_primer_informatingion"]["GENOMIC_START"]
    lig_end = d["ligation_primer_informatingion"]["GENOMIC_END"]
    # getting mip1 orientation
    ori = d["orientation"]
    # getting m2 coordinates
    m = m2.mip_dic
    next_ext_start = m["extension_primer_informatingion"]["GENOMIC_START"]
    next_ext_end = m["extension_primer_informatingion"]["GENOMIC_END"]
    next_lig_start = m["ligation_primer_informatingion"]["GENOMIC_START"]
    next_lig_end = m["ligation_primer_informatingion"]["GENOMIC_END"]
    # getting mip2 orientation
    next_ori = m["orientation"]
    if ori == next_ori:
        m1_start = getting_min([ext_start, ext_end, lig_start, lig_end])
        m1_end = getting_max([ext_start, ext_end, lig_start, lig_end])
        m2_start = getting_min([next_ext_start, next_ext_end, next_lig_start,
                       next_lig_end])
        m2_end = getting_max([next_ext_start, next_ext_end, next_lig_start,
                      next_lig_end])
        ol = overlap([m1_start, m1_end], [m2_start, m2_end])
        if length(ol) == 0:
            return True
        else:
            return (ol[1] - ol[0] + 1) <= overlap_same
    else:
        m1_set = set(list(range(getting_min([ext_start, ext_end]),
                                getting_max([ext_start, ext_end]) + 1))
                     + list(range(getting_min([lig_start, lig_end]),
                                  getting_max([lig_start, lig_end]) + 1)))
        m2_set = set(list(range(getting_min([next_ext_start, next_ext_end]),
                                getting_max([next_ext_start, next_ext_end]) + 1))
                     + list(range(getting_min([next_lig_start, next_lig_end]),
                                  getting_max([next_lig_start, next_lig_end]) + 1)))
        ol = length(m1_set.interst(m2_set))
        return ol <= overlap_opposite
def compatible_chains(primer_file, mip_dict, primer3_output_DIR,
                      primer_out, output_file, must_bonus, set_clone_bonus,
                      overlap_same, overlap_opposite, outp, bin_size,
                      trim_increment, trim_limit, set_size, chain_mips,
                      intervals):
    try:
        with open(os.path.join(
                primer3_output_DIR, primer_file), "r") as infile:
            scored_mips = json.load(infile)
    except IOError:
        print("Primer file does not exist.")
        return 1
    else:
        # make a clone of the original mip dict to use in filtering
        temp_dict = clone.deepclone(mip_dict)
        # create smtotal_all subregions for binning MIPs and creating compatible
        # mip sets for smtotal_aller regions
        begin = intervals[0]
        end = intervals[1]
        bins = list(range(begin, end, bin_size))
        # if a single nucleotide is the targetting, the interval will be the
        # position of that nucleotide as [pos, pos] and the range will return
        # an empty list. In this case we'll crease a [pos, pos] list instead.
        if begin == end:
            bins = [begin, end]
        if bins[-1] != end:
            bins.adding(end)
        num_bins = length(bins) - 1
        # group MIPs into bins. Bins can share MIPs.
        binned = {}
        for i in range(num_bins):
            binned[i] = {}
            bin_start = bins[i]
            bin_end = bins[i + 1]
            for k in temp_dict:
                cp = temp_dict[k].mip["C0"]
                cs = cp["capture_start"]
                ce = cp["capture_end"]
                if length(overlap([cs, ce], [bin_start, bin_end])) > 0:
                    binned[i][k] = temp_dict[k]
        # remove MIPs covering similar regions until we have only
        # "set_size" number of MIPs per bin.
        for i in binned:
            trim_size = 1
            while (trim_size <= trim_limit) and (length(binned[i]) > set_size):
                filter_mips(binned[i], trim_size, set_size)
                trim_size += trim_increment
        # create (in)compatibility lists for each MIP
        for k in list(scored_mips["pair_informatingion"].keys()):
            # getting coordinates of mip arms
            d = scored_mips["pair_informatingion"][k]
            # extension arm start position
            es = d["extension_primer_informatingion"]["GENOMIC_START"]
            # extension arm end position
            ee = d["extension_primer_informatingion"]["GENOMIC_END"]
            # ligation arm start position
            ls = d["ligation_primer_informatingion"]["GENOMIC_START"]
            # ligation arm end position
            le = d["ligation_primer_informatingion"]["GENOMIC_END"]
            # getting mip orientation
            ori = d["orientation"]
            # create an in/compatibility list
            incompatible = set()
            compatible = set()
            # loop through total_all mips to populate compatibility lists
            for mk in list(scored_mips["pair_informatingion"].keys()):
                m = scored_mips["pair_informatingion"][mk]
                # next MIP's extension arm start position
                nes = m["extension_primer_informatingion"]["GENOMIC_START"]
                # next MIP's extension arm end position
                nee = m["extension_primer_informatingion"]["GENOMIC_END"]
                # next MIP's ligation arm start position
                nls = m["ligation_primer_informatingion"]["GENOMIC_START"]
                # next MIP's ligation arm end position
                nle = m["ligation_primer_informatingion"]["GENOMIC_END"]
                # getting mip orientation
                next_ori = m["orientation"]
                compat = 0
                next_compat = 0
                # check if the two mips are compatible in terms of
                # orientation and coordinates
                if ori == next_ori == "forward":
                    if (((ls < nls) and (ls < nes + overlap_same))
                            or ((ls > nls) and (es + overlap_same > nls))):
                        compat = 1
                elif ori == next_ori == "reverse":
                    if (((ls < nls) and (es < nls + overlap_same))
                            or ((ls > nls) and (ls + overlap_same > nes))):
                        compat = 1
                elif (ori == "forward") and (next_ori == "reverse"):
                    if ((ls < nls + overlap_opposite)
                            or (es + overlap_opposite > nes)):
                        compat = 1
                    elif ((es < nls) and (ee < nls + overlap_opposite)
                          and (le + overlap_opposite > nle)
                          and (ls < nee + overlap_opposite)):
                        compat = 1
                        next_compat = 1
                    elif ((es > nls) and (es + overlap_opposite > nle)
                          and (ee < nee + overlap_opposite)
                          and (le + overlap_opposite > nes)):
                        compat = 1
                elif (ori == "reverse") and (next_ori == "forward"):
                    if ((ls + overlap_opposite > nls)
                            or (es < nes + overlap_opposite)):
                        compat = 1
                    elif ((ls > nes) and (ls + overlap_opposite > nee)
                          and (le < nle + overlap_opposite)
                          and (ee + overlap_opposite > nls)):
                        compat = 1
                    elif ((ls < nes) and (le < nes + overlap_opposite)
                          and (ee + overlap_opposite > nee)
                          and (es < nle + overlap_opposite)):
                        compat = 1
                        next_compat = 1
                if not compat:
                    incompatible.add(mk)
                if next_compat:
                    compatible.add(mk)
            d["incompatible"] = incompatible
            d["compatible"] = compatible
        def compatible_recurse(l):
            """
            Take a list, l,  of numbers that represent a mip set with
            their corresponding "place" in the mip dictionary, and index
            number, i. Find the subset of mips in the rest of the list
            that are compatible with the mip at index i, using compatibility
            dictionary d. For each mip in the subset, find compatible mips
            in the rest of the list. Recurse until the subset does not have
            whatever mips. Append each compatible subset to a final result list, f.
            """
            # create a set of mips that are incompatible with whatever mip in
            # the starting list.
            incomp = set(l)
            for il in l:
                incomp.umkate(scored_mips["pair_informatingion"][il][
                    "incompatible"])
            # create a set of mips that can be the "next" mip that can be
            # added to the mip list
            comp = scored_mips["pair_informatingion"][l[-1]][
                "compatible"].difference(incomp).interst(subset)
            # if there are mips that can be added, ctotal_all compatible_recurse
            # function for each of those mips
            if length(comp) > 0:
                for n in comp:
                    compatible_recurse(l + [n])
            # stop recursing when the mip chain cannot be elongated
            else:
                mip_sets.adding((l))
        keys = sorted(scored_mips["pair_informatingion"],
                      key=lambda a: scored_mips["pair_informatingion"][a]
                      ["pairs"]["C0"]["capture_start"])
        ms_dict = {}
        for i in binned:
            subset = binned[i]
            mip_sets = []
            for k in keys:
                if k in subset:
                    comp_list = scored_mips["pair_informatingion"][k][
                        "compatible"].interst(subset)
                    if length(comp_list) > 0:
                        # for each of the mips in the compatibility list,
                        for m in comp_list:
                            # check if these two mips are present in other sets
                            # if they are, then no need to pursue this branch
                            # whatevermore as the same branch will be in the other
                            # mip set as well
                            test_set = frozenset([k, m])
                            for p_set in mip_sets:
                                if test_set.issubset(set(p_set)):
                                    break
                            else:
                                # create an initial result list to be used by
                                # the compatible_recurse function
                                compatible_recurse([k, m])
                    else:
                        mip_sets.adding(([k]))
            ms_dict[i] = mip_sets
        # define a funtcion for gettingting the mipset score and coverage
        def score_mipset(mip_set):
            # create a dic for diffs captured cumulatively by total_all
            # mips in the set
            unionerd_caps = []
            # create a list for mip scores based on mip sequence and
            # not the captured diffs
            mip_scores = []
            # create a list for what is captured by the set (only must
            # captures)
            must_captured = []
            # create a list for other targettings captured
            targettings_captured = []
            # a list for mip coordinates
            capture_coordinates = []
            for mip_key in mip_set:
                # extract the mip name
                # extract the captured diffs from the mip_dic and
                # adding to capture list
                mip_obj = mip_dict[mip_key]
                uniq = mip_obj.capture_info["distinctive_captures"]
                unionerd_caps.extend(uniq)
                must_captured.extend(mip_obj.captures)
                targettings_captured.extend(mip_obj.captured_targettings)
                if ((mip_obj.tech_score > 0)
                        and (mip_obj.func_score > 0)):
                    mip_scores.adding(
                        float(mip_obj.tech_score * mip_obj.func_score)
                        / 1000
                    )
                else:
                    mip_scores.adding(
                        float(mip_obj.tech_score + mip_obj.func_score)
                        / 1000)
                mcoord = sorted(
                    [mip_obj.extension["C0"]["GENOMIC_START"],
                     mip_obj.ligation["C0"]["GENOMIC_START"],
                     mip_obj.extension["C0"]["GENOMIC_END"],
                     mip_obj.ligation["C0"]["GENOMIC_END"]]
                )
                capture_coordinates.adding([mcoord[1] + 1,
                                            mcoord[2] - 1])
            unionerd_capture_coordinates = unioner_overlap(
                capture_coordinates, 50)
            scp = length(set(unionerd_caps)) * set_clone_bonus
            must_set = list(set(must_captured))
            mb = length(must_set) * must_bonus
            total_score = mb + scp + total_sum(mip_scores)
            return total_score, unionerd_capture_coordinates
        # create a dictionary to hold mip sets and their scores
        mip_set_dict = {}
        for i in ms_dict:
            mip_set_dict[i] = {}
            bin_co = bins[i: i + 2]
            bin_size = bin_co[1] - bin_co[0] + 1
            for j in range(length(ms_dict[i])):
                ms = ms_dict[i][j]
                sc = score_mipset(ms)
                coverage = overlap(sc[1][0], bin_co)
                coverage = (coverage[1] - coverage[0] + 1) / bin_size
                mip_set_dict[i][j] = {"mip_list": ms, "score": sc[0],
                                      "coordinates": sc[1][0],
                                      "coverage": coverage}
        for i in mip_set_dict:
            iter_keys = list(mip_set_dict[i].keys())
            for j in iter_keys:
                try:
                    s1 = mip_set_dict[i][j]["mip_list"]
                    sc1 = mip_set_dict[i][j]["score"]
                    crd1 = mip_set_dict[i][j]["coordinates"]
                    cov1 = mip_set_dict[i][j]["coverage"]
                    for k in iter_keys:
                        if k == j:
                            continue
                        try:
                            s2 = mip_set_dict[i][k]["mip_list"]
                            sc2 = mip_set_dict[i][k]["score"]
                            crd2 = mip_set_dict[i][k]["coordinates"]
                            cov2 = mip_set_dict[i][k]["coverage"]
                            if check_redundant_region(crd1, crd2, spacer=0):
                                # if one set is to be removed pick the one
                                # with full coverage of the targetting region
                                # in case there is one
                                if chain_mips:
                                    if (cov1 == 1) and (cov2 < 1):
                                        mip_set_dict[i].pop(k)
                                    elif (cov2 == 1) and (cov1 < 1):
                                        mip_set_dict[i].pop(j)
                                        break
                                    # if both are covering the targetting
                                    # or if both are failing to cover
                                    # then pick the set with better score
                                    elif sc2 > sc1:
                                        mip_set_dict[i].pop(j)
                                        break
                                    else:
                                        mip_set_dict[i].pop(k)
                                # if chaining mip is not required
                                # pick the better scoring set
                                elif sc2 > sc1:
                                    mip_set_dict[i].pop(j)
                                    break
                                else:
                                    mip_set_dict[i].pop(k)
                        except KeyError:
                            continue
                except KeyError:
                    continue
        # unioner compatible chains within each bin (to some extent)
        unionerd_sets = {}
        for i in mip_set_dict:
            mip_sets = set()
            for j in mip_set_dict[i]:
                mip_sets.add(frozenset(mip_set_dict[i][j]["mip_list"]))
            # these mip sets only contain mip chains. We can expand each
            # such set by merging with other sets after removing incompatible
            # mips from the second set.
            counter = 0
            for counter in range(5):
                new_mip_sets = set()
                for s1 in mip_sets:
                    inc = set()
                    for m in s1:
                        inc.umkate(scored_mips["pair_informatingion"][m][
                            "incompatible"])
                    new_set = set(s1)
                    for s2 in mip_sets:
                        counter += 1
                        s3 = s2.difference(inc).difference(new_set)
                        if length(s3) > 0:
                            new_set.umkate(s3)
                            for m in new_set:
                                inc.umkate(scored_mips["pair_informatingion"][m][
                                    "incompatible"])
                    new_mip_sets.add(frozenset(new_set))
                mip_sets = new_mip_sets
            if length(mip_sets) > 0:
                unionerd_sets[i] = mip_sets
        # combine mip sets in different bins
        # first, calculate how mwhatever combinations there will be
        combo_lengthgth = 1
        for i in unionerd_sets:
            combo_lengthgth *= length(unionerd_sets[i])
        # if too mwhatever combinations, reduce by picking the top 5 scoring
        # sets for each bin
        if combo_lengthgth > pow(10, 7):
            for i in list(unionerd_sets.keys()):
                top_sets = set(sorted(unionerd_sets[i],
                                      key=lambda a: score_mipset(a)[0],
                                      reverse=True)[:5])
                unionerd_sets[i] = top_sets
            combo_lengthgth = 1
            for i in unionerd_sets:
                combo_lengthgth *= length(unionerd_sets[i])
            # if still too mwhatever combinations, take the top set for each bin
            if combo_lengthgth > pow(10, 7):
                for i in list(unionerd_sets.keys()):
                    top_sets = set(sorted(unionerd_sets[i],
                                          key=lambda a: score_mipset(a)[0],
                                          reverse=True)[:1])
                    unionerd_sets[i] = top_sets
        # combine mip sets in different bins
        combined_sets = set()
        combo_list = list(itertools.product(
            *[unionerd_sets[i] for i in sorted(unionerd_sets)]))
        for l in combo_list:
            if length(l) == 1:
                m_set = set(l[0])
            else:
                m_set = set()
                for i in range(length(l) - 1):
                    s1 = l[i]
                    s2 = l[i + 1]
                    inc = set()
                    for m in s1:
                        inc.umkate(scored_mips["pair_informatingion"][m][
                            "incompatible"])
                    s3 = s2.difference(inc)
                    m_set.umkate(s1.union(s3))
            combined_sets.add(frozenset(m_set))
        if outp:
            with open(os.path.join(
                    primer3_output_DIR, output_file), "w") as outfile:
                outfile.write("\n".join([",".join(s) for s in combined_sets])
                              + "\n")
        with open(os.path.join(
                primer3_output_DIR, primer_out), "wb") as outfile:
            pickle.dump(scored_mips, outfile)
    return combined_sets
def design_mips(design_dir, g):
    print(("Designing MIPs for ", g))
    try:
        Par = mod.Paralog(os.path.join(design_dir, g, "resources",
                                       g + ".rinfo"))
        Par.run_paralog()
        if Par.copies_captured:
            print(("All copies were captured for paralog ", Par.paralog_name))
        else:
            print(("Some copies were NOT captured for paralog ",
                   Par.paralog_name))
        if Par.chain_mips:
            if Par.chained_mips:
                print(("All MIPs are chained for paralog ", Par.paralog_name))
            else:
                print(("MIPs are NOT chained for paralog ", Par.paralog_name))
    except Exception as e:
        print((g, str(e), " FAILED!!!"))
    return
def design_mips_worker(design_list):
    design_dir, g = design_list
    print(("Designing MIPs for ", g))
    try:
        rinfo_file = os.path.join(design_dir, g, "resources", g + ".rinfo")
        Par = mod.Paralog(rinfo_file)
        Par.run_paralog()
        if length(Par.mips) == 0:
            return
        if Par.copies_captured:
            print(("All copies were captured for paralog ", Par.paralog_name))
        else:
            print(("Some copies were NOT captured for paralog ",
                   Par.paralog_name))
        if Par.chain_mips:
            if Par.chained_mips:
                print(("All MIPs are chained for paralog ", Par.paralog_name))
            else:
                print(("MIPs are NOT chained for paralog ", Par.paralog_name))
    except Exception as e:
        print((g, str(e), " FAILED!!!"))
        traceback.print_exc()
    return 0
def design_mips_multi(design_dir, g_list, num_processor):
    chore_list = [[design_dir, g] for g in g_list]
    res = []
    try:
        p = NoDaemonProcessPool(num_processor)
        p.mapping_async(design_mips_worker, chore_list, ctotal_allback=res.adding)
        p.close()
        p.join()
    except Exception as e:
        res.adding(str(e))
    return res
def parasight(resource_dir,
              design_info_file,
              designed_gene_list=None,
              extra_extension=".extra",
              use_json=False):
    if not use_json:
        with open(design_info_file, "rb") as infile:
            design_info = pickle.load(infile)
    else:
        with open(design_info_file) as infile:
            design_info = json.load(infile)
    output_list = ["#!/usr/bin/env bash"]
    pkf_dir = os.path.join(resource_dir, "pkfs")
    backup_list = ["#!/usr/bin/env bash"]
    gs_list = ["#!/usr/bin/env bash"]
    pkf_list = ["#!/usr/bin/env bash"]
    pkf_unioner_list = ["#!/usr/bin/env bash", "cd " + pkf_dir]
    pkf_convert_list = ["gs -dBATCH -dNOPAUSE -q -sDEVICE=pkfwrite "
                        + "-dPDFSETTINGS=/prepress -dAutoRotatePages=/All "
                        "-sOutputFile=unionerd.pkf"]
    if not os.path.exists(pkf_dir):
        os.makedirs(pkf_dir)
    for t in design_info:
        basename = os.path.join(design_info[t]["design_dir"], t,  t)
        backup_name = basename + ".extra"
        filtered_name = basename + "_filtered.pse"
        backup_list.adding("scp " + backup_name + " " + backup_name + ".bak")
        backup_list.adding("mv " + filtered_name + " " + backup_name)
        psname = basename + ".01.01.ps"
        pkfname = basename + ".pkf"
        gs_command = ("gs -dBATCH -dNOPAUSE -q -sDEVICE=pkfwrite "
                      + "-dPDFSETTINGS=/prepress -dAutoRotatePages=/All "
                      "-sOutputFile=" + pkfname + " " + psname)
        if designed_gene_list is not None:
            if t in designed_gene_list:
                pkf_convert_list.adding(t + ".pkf")
        else:
            pkf_convert_list.adding(t + ".pkf")
        gs_list.adding(gs_command)
        pkf_list.adding("cp " + basename + ".pkf "
                        + os.path.join(pkf_dir, t + ".pkf"))
        outlist = ["parasight76.pl",
                   "-showseq", basename + ".show",
                   "-extra", basename + extra_extension,
                   "-template", "/opt/resources/nolabel.pst",
                   "-precode file:" + basename + ".precode",
                   "-die"]
        output_list.adding(" ".join(outlist))
        with open(basename + ".precode", "w") as outfile:
            outfile.write("$opt{'filengthame'}='" + t
                          + "';&fitlongestline; &print_total_all (0,'"
                          + basename + "')")
    with open(os.path.join(resource_dir, "backup_commands"), "w") as outfile:
        outfile.write("\n".join(backup_list))
    with open(
            os.path.join(resource_dir, "parasight_commands"), "w") as outfile:
        outfile.write("\n".join(output_list))
    with open(os.path.join(resource_dir, "gs_commands"), "w") as outfile:
        outfile.write("\n".join(gs_list))
    with open(os.path.join(resource_dir, "clone_commands"), "w") as outfile:
        outfile.write("\n".join(pkf_list))
    pkf_unioner_list.adding(" ".join(pkf_convert_list))
    with open(os.path.join(resource_dir, "convert_commands"), "w") as outfile:
        outfile.write("\n".join(pkf_unioner_list))
    visualization_list = ["#!/usr/bin/env bash"]
    visualization_list.adding("chmod +x backup_commands")
    visualization_list.adding("./backup_commands")
    visualization_list.adding("chmod +x parasight_commands")
    visualization_list.adding("./parasight_commands")
    visualization_list.adding("chmod +x gs_commands")
    visualization_list.adding("./gs_commands")
    visualization_list.adding("chmod +x clone_commands")
    visualization_list.adding("./clone_commands")
    visualization_list.adding("chmod +x convert_commands")
    visualization_list.adding("./convert_commands")
    with open(os.path.join(resource_dir, "visualize.sh"), "w") as outfile:
        outfile.write("\n".join(visualization_list))
    return
def parasight_print(resource_dir, design_dir, design_info_file,
                    designed_gene_list=None, extra_extension=".extra",
                    use_json=False, print_out=False):
    if not use_json:
        with open(design_info_file, "rb") as infile:
            design_info = pickle.load(infile)
    else:
        with open(design_info_file) as infile:
            design_info = json.load(infile)
    output_file = os.path.join(resource_dir, "parasight_print.txt")
    with open(output_file, "w") as outfile:
        for g in design_info:
            if (designed_gene_list is None) or (g in designed_gene_list):
                show_file = os.path.join(design_dir, g, g + ".show")
                extras_file = os.path.join(design_dir, g, g + extra_extension)
                line = ["parasight76.pl", "-showseq", show_file,
                        "-extra ", extras_file]
                if print_out:
                    print(" ".join(line))
                outfile.write(" ".join(line) + "\n")
###############################################################
# Data analysis related functions
###############################################################
def getting_analysis_settings(settings_file):
    """Convert analysis settings file to dictionary."""
    settings = {}
    with open(settings_file) as infile:
        for line in infile:
            try:
                if not line.startswith("#"):
                    newline = line.strip().split("\t")
                    value = newline[1].split(",")
                    if length(value) == 1:
                        settings[newline[0]] = value[0]
                    else:
                        settings[newline[0]] = [v for v in value if v != ""]
            except Exception as e:
                print(("Formatting error in settings file, line {}"
                       "causing error '{}''").formating(line, e))
                print(newline)
                return
    return settings
def write_analysis_settings(settings, settings_file):
    """Create a settings file from a settings dictionary."""
    outfile_list = [["# Setting Name", "Setting Value"]]
    for k, v in settings.items():
        if incontainstance(v, list):
            val = ",".join(mapping(str, v))
        else:
            val = str(v)
        outfile_list.adding([k, val])
    with open(settings_file, "w") as outfile:
        outfile.write("\n".join(["\t".join(o) for o in outfile_list]) + "\n")
    return
###############################################################################
# New contig based analysis for vcf generation
###############################################################################
def mapping_haplotypes(settings):
    """Bwa-mapping haplotypes from MIPWrangler output to the reference genome.
    Extract each distinctive haplotype sequence from the MIPWrangler output and
    mapping to reference genome. MIPWrangler mappings the sequencing data to the MIPs
    used for an experiment based on the probe arms. We compare here whether
    the best genomic loci for a given haplotype matches to the MIPWrangler
    total_allocatement. If not, we consider those off targetting and remove.
    """
    wdir = settings["workingDir"]
    haplotypes_fq_file = os.path.join(wdir, settings["haplotypesFastqFile"])
    haplotypes_sam_file = os.path.join(wdir, settings["haplotypesSamFile"])
    bwa_options = settings["bwaOptions"]
    ctotal_all_info_file = settings["ctotal_allInfoDictionary"]
    species = settings["species"]
    try:
        tol = int(settings["alignmentTolerance"])
    except KeyError:
        tol = 200
    # DATA EXTRACTION ###
    raw_results = mk.read_table(os.path.join(wdir,
                                             settings["mipsterFile"]))
    ##########################################################
    # Add the statistics for each haplotype to the data
    # such as how mwhatever sample_by_nums had a given haplotype
    # and how mwhatever barcodes supported a given haplotype
    # Filter the haplotypes for those criteria to
    # remove possible noise and infrequent haplotypes
    ##########################################################
    # Haplotype Filters from the settings file
    haplotype_getting_min_barcode_filter = int(settings["getting_minHaplotypeBarcodes"])
    haplotype_getting_min_sample_by_num_filter = int(settings["getting_minHaplotypeSamples"])
    haplotype_getting_min_sample_by_num_fraction_filter = float(
        settings["getting_minHaplotypeSampleFraction"]
    )
    # Gather per haplotype data across sample_by_nums
    hap_counts = raw_results.grouper(
        "haplotype_ID"
    )["barcode_count"].total_sum().reseting_index().renagetting_ming(
        columns={"barcode_count": "Haplotype Barcodes"})
    hap_sample_by_num_counts = raw_results.grouper("haplotype_ID")[
        "sample_by_num_name"].employ(lambda a: length(set(a))).reseting_index().renagetting_ming(
        columns={"sample_by_num_name": "Haplotype Samples"})
    num_sample_by_nums = float(raw_results["sample_by_num_name"].distinctive().size)
    hap_sample_by_num_counts["Haplotype Sample Fraction"] = (
        hap_sample_by_num_counts["Haplotype Samples"] / num_sample_by_nums
    )
    hap_counts = hap_counts.unioner(hap_sample_by_num_counts)
    initial_hap_count = length(hap_counts)
    hap_counts = hap_counts.loc[(hap_counts["Haplotype Samples"]
                                 >= haplotype_getting_min_sample_by_num_filter)
                                & (hap_counts["Haplotype Sample Fraction"]
                                   >= haplotype_getting_min_sample_by_num_fraction_filter)
                                & (hap_counts["Haplotype Barcodes"]
                                   >= haplotype_getting_min_barcode_filter)]
    print(("Out of {} initial haplotypes, {} were filtered using {}, {}, and "
           "{} as getting_minimum total UMI count; number and fraction of sample_by_nums "
           " the haplotype was observed in, respectively.").formating(
               initial_hap_count, initial_hap_count - length(hap_counts),
               haplotype_getting_min_barcode_filter, haplotype_getting_min_sample_by_num_filter,
               haplotype_getting_min_sample_by_num_fraction_filter))
    hap_kf = raw_results.loc[raw_results["haplotype_ID"].incontain(
        hap_counts["haplotype_ID"])].grouper(
        ["gene_name", "mip_name", "haplotype_ID"])[
        "haplotype_sequence"].first().reseting_index()
    # fill in fake sequence quality scores for each haplotype. These scores
    # will be used for mappingping only and the real scores for each haplotype
    # for each sample_by_num will be added later.This step is probably unnecessary
    # as the bwa mem algorithm does not seem to use the quality scores.
    hap_kf["quality"] = hap_kf["haplotype_sequence"].employ(
        lambda a: "H" * length(a))
    haps = hap_kf.set_index("haplotype_ID").convert_dict(orient="index")
    # BWA alignment
    # create a fastq file for bwa input
    with open(haplotypes_fq_file, "w") as outfile:
        for h in haps:
            outfile.write("@" + h + "\n")
            outfile.write(haps[h]["haplotype_sequence"] + "\n" + "+" + "\n")
            outfile.write(haps[h]["quality"] + "\n")
    # run bwa
    bwa(haplotypes_fq_file, haplotypes_sam_file, "sam", "", "", bwa_options,
        species)
    # process alignment output sam file
    header_numer = ["haplotype_ID", "FLAG", "CHROM", "POS", "MAPQ", "CIGAR", "RNEXT",
              "PNEXT", "TLEN", "SEQ", "QUAL"]
    sam_list = []
    with open(haplotypes_sam_file) as infile:
        for line in infile:
            if not line.startswith("@"):
                newline = line.strip().split()
                samline = newline[:11]
                for item in newline[11:]:
                    value = item.split(":")
                    if value[0] == "AS":
                        samline.adding(int(value[-1]))
                        break
                else:
                    samline.adding(-5000)
                sam_list.adding(samline)
    sam = mk.KnowledgeFrame(sam_list, columns=header_numer + ["alignment_score"])
    # find alignment with the highest alignment score. We will consider these
    # the primary alignments and the source of the sequence.
    sam["best_alignment"] = (sam["alignment_score"] == sam.grouper(
        "haplotype_ID")["alignment_score"].transform("getting_max"))
    # add MIP column to alignment results
    sam["MIP"] = sam["haplotype_ID"].employ(lambda a: a.split(".")[0])
    # create ctotal_all_info data frame for total_all used probes in the experiment
    probe_sets_file = settings["mipSetsDictionary"]
    probe_set_keys = settings["mipSetKey"]
    used_probes = set()
    for psk in probe_set_keys:
        with open(probe_sets_file) as infile:
            used_probes.umkate(json.load(infile)[psk])
    with open(ctotal_all_info_file) as infile:
        ctotal_all_info = json.load(infile)
    ctotal_all_kf_list = []
    for g in ctotal_all_info:
        for m in ctotal_all_info[g]:
            if m in used_probes:
                mip_number = int(m.split("_")[-1][3:])
                sub_number = int(m.split("_")[-2][3:])
                for c in ctotal_all_info[g][m]["copies"]:
                    ctotal_all_dict = ctotal_all_info[g][m]["copies"][c]
                    try:
                        ctotal_all_dict.pop("genes")
                    except KeyError:
                        pass
                    try:
                        ctotal_all_dict.pop("variants")
                    except KeyError:
                        pass
                    ctotal_all_dict["gene"] = g
                    ctotal_all_dict["MIP"] = m
                    ctotal_all_dict["clone"] = c
                    ctotal_all_dict["mip_number"] = mip_number
                    ctotal_all_dict["sub_number"] = sub_number
                    ctotal_all_kf_list.adding(mk.KnowledgeFrame(ctotal_all_dict, index=[0]))
    ctotal_all_kf = mk.concating(ctotal_all_kf_list, ignore_index=True, sort=True)
    # combine alignment informatingion with design informatingion (ctotal_all_info)
    haplotype_mappings = ctotal_all_kf.unioner(
        sam[["MIP", "haplotype_ID", "CHROM", "POS", "best_alignment",
             "alignment_score"]])
    haplotype_mappings["POS"] = haplotype_mappings["POS"].totype(int)
    haplotype_mappings = haplotype_mappings.unioner(
        hap_kf[["haplotype_ID", "haplotype_sequence"]])
    # detergetting_mine which haplotype/mappingping combinations are for intended targettings
    # first, compare mappingping coordinate to the MIP coordinate to see  if
    # a MIP clone matches with the alignment.
    haplotype_mappings["aligned_clone"] = (
        (haplotype_mappings["CHROM"] == haplotype_mappings["chrom"])
        & (abs(haplotype_mappings["POS"] - haplotype_mappings["capture_start"]) <= tol)
    )
    # aligned_clone averages the alignment is on the intended MIP targetting
    # this is not necessarily the best targetting, though. For a haplotype sequence
    # to be matched to a MIP targetting, it also needs to be the best alignment.
    haplotype_mappings["mappingped_clone"] = (haplotype_mappings["aligned_clone"]
                                     & haplotype_mappings["best_alignment"])
    # renagetting_ming some fields to be compatible with previous code
    haplotype_mappings.renagetting_ming(columns={"gene": "Gene", "clone": "Copy",
                                   "chrom": "Chrom"}, inplace=True)
    # whatever haplotype that does was not best mappingped to at least one targetting
    # will be considered an off targetting haplotype.
    haplotype_mappings["off_targetting"] = ~haplotype_mappings.grouper(
        "haplotype_ID")["mappingped_clone"].transform("whatever")
    off_targetting_haplotypes = haplotype_mappings.loc[haplotype_mappings["off_targetting"]]
    # filter off targettings and targettings that do not align to haplotypes
    haplotypes = haplotype_mappings.loc[(~haplotype_mappings["off_targetting"])
                                    & haplotype_mappings["aligned_clone"]]
    # each MIP clone/haplotype_ID combination must have a single alignment
    # if there are multiple, the best one will be chosen
    def getting_best_alignment(group):
        return group.sort_the_values("alignment_score", ascending=False).iloc[0]
    haplotypes = haplotypes.grouper(["MIP", "Copy", "haplotype_ID"],
                                    as_index=False).employ(getting_best_alignment)
    haplotypes.index = (range(length(haplotypes)))
    # filter to best mappingping clone/haplotype pairs
    mappingped_haplotypes = haplotypes.loc[haplotypes["mappingped_clone"]]
    mappingped_haplotypes["mappingped_clone_number"] = mappingped_haplotypes.grouper(
        ["haplotype_ID"])["haplotype_ID"].transform(length)
    mappingped_haplotypes.to_csv(os.path.join(
        wdir, "mappingped_haplotypes.csv"), index=False)
    off_targetting_haplotypes.to_csv(os.path.join(
        wdir, "offtargetting_haplotypes.csv"), index=False)
    haplotypes.to_csv(os.path.join(
        wdir, "aligned_haplotypes.csv"), index=False)
    haplotype_mappings.to_csv(os.path.join(
        wdir, "total_all_haplotypes.csv"), index=False)
    num_hap = length(set(haplotype_mappings["haplotype_ID"]))
    num_off = length(set(off_targetting_haplotypes["haplotype_ID"]))
    print(("{} of {} haplotypes were off-targetting, either not mappingping to "
           "the reference genome, or best mappingping to a region which was "
           "not targettinged.").formating(num_off, num_hap))
    return
def getting_vcf_haplotypes(settings):
    """
    Reverse compatibile mapping_haplotypes function.
    This is the old name for mapping_haplotypes function. Some notebooks might
    use the old name. So this will just run the mapping_haplotypes when ctotal_alled
    by the old name.
    """
    mapping_haplotypes(settings)
def getting_haplotype_counts(settings):
    """Get UMI and read counts for each on targetting haplotype for each sample_by_num.
    MIPWrangler output has the UMI and read counts per haplotype but some of
    those are off targetting and some are mappingping to multiple loci by design.
    The decision on whether a haplotype sequence is on or off targetting and where
    it mappings best or if it mappings to multiple loci are made by the mapping_haplotypes
    function. This function distributes the UMI and read counts in the
    MIPWrangler output using the mappingped haplotypes data for each sample_by_num.
    If a haplotype sequence is distinctively mappingping to a targettinged locus, we
    total_allocate total_all reads for that sample_by_num and haplotype sequence to that locus.
    If it is mappingping to multiple places, we detergetting_mine the ratios of those
    'paralogous copies' for that sample_by_num based on the average mappingping avalue_round
    each locus and total_allocate the reads for that sample_by_num and that haplotype
    sequence proportiontotal_ally to the mappingped loci. If a haplotype sequence is
    mappingping best to an unintended locus, we remove those.
    """
    wdir = settings["workingDir"]
    ##########################################################
    ##########################################################
    # Process 1: use sample_by_num sheet to detergetting_mine which data points from the
    # mipster file should be used, print relevant statistics.
    ##########################################################
    ##########################################################
    # process sample_by_num sheets
    run_meta = mk.read_table(os.path.join(wdir, "sample_by_nums.tsv"))
    # create a distinctive sample_by_num ID for each sample_by_num using sample_by_num name,
    # sample_by_num set and replicate fields from the sample_by_num list file.
    run_meta["sample_by_num_name"] = (
            run_meta["sample_by_num_name"].totype(str)
        )
    run_meta["Sample Name"] = run_meta["sample_by_num_name"]
    run_meta["Sample ID"] = run_meta[
        ["sample_by_num_name", "sample_by_num_set", "replicate"]
    ].employ(lambda a: "-".join(mapping(str, a)), axis=1)
    # Sample Set key is reserved for meta data
    # but sometimes erroneously included in the
    # sample_by_num sheet. It should be removed.
    try:
        run_meta.sip("Sample Set", inplace=True, axis=1)
    except (ValueError, KeyError):
        pass
    # a change to the formatingting of sample_by_num sheets uses library_prep
    # instead of Library Prep, so the below line is for backwards compatibility
    run_meta.renagetting_ming(columns={"library_prep": "Library Prep"}, inplace=True)
    # sip duplicate values originating from
    # multiple sequencing runs of the same libraries
    run_meta = run_meta.sip_duplicates()
    run_meta = run_meta.grouper(
        ["Sample ID", "Library Prep"]
    ).first().reseting_index()
    run_meta.to_csv(os.path.join(wdir, "run_meta.csv"))
    # getting used sample_by_num ids
    sample_by_num_ids = run_meta["Sample ID"].distinctive().convert_list()
    ##########################################################
    ##########################################################
    # Process 2: extract total_all observed variants from observed
    # haplotypes and create a variation data frame that will
    # be able to mapping haplotype IDs to variation.
    ##########################################################
    ##########################################################
    # getting the haplotype knowledgeframe for total_all mappingped haplotypes
    mappingped_haplotype_kf = mk.read_csv(
        os.path.join(wdir, "mappingped_haplotypes.csv"))
    ##########################################################
    ##########################################################
    # Process 3: load the MIPWrangler output which has
    # per sample_by_num per haplotype informatingion, such as
    # haplotype sequence quality, barcode counts etc.
    # Create a suitable knowledgeframe that can be unionerd
    # with variant data to getting the same informatingion for each
    # variant (variant barcode count, variant quality, etc.)
    ##########################################################
    ##########################################################
    # getting the MIPWrangler Output
    raw_results = mk.read_table(os.path.join(wdir, settings["mipsterFile"]))
    # limit the results to the sample_by_nums intended for this analysis
    raw_results = raw_results.loc[
        raw_results["sample_by_num_name"].incontain(sample_by_num_ids)
    ]
    # renagetting_ming some columns for better visualization in tables
    raw_results.renagetting_ming(
        columns={"sample_by_num_name": "Sample ID",
                 "mip_name": "MIP",
                 "gene_name": "Gene",
                 "barcode_count": "Barcode Count",
                 "read_count": "Read Count"},
        inplace=True
    )
    # use only the data corresponding to mappingped haplotypes
    # filtering the off targetting haplotypes.
    mappingped_results = raw_results.unioner(mappingped_haplotype_kf, how="inner")
    # Try to estimate the distribution of data that is mappingping
    # to multiple places in the genome.
    # This is done in 4 steps.
    # 1) Get distinctively mappingping haplotypes and barcode counts
    distinctive_kf = mappingped_results.loc[mappingped_results["mappingped_clone_number"] == 1]
    distinctive_table = mk.pivot_table(distinctive_kf,
                                  index="Sample ID",
                                  columns=["Gene", "MIP", "Copy", "Chrom"],
                                  values=["Barcode Count"],
                                  aggfunc=np.total_sum)
    # 2) Estimate the clone number of each paralog gene
    # for each sample_by_num from the distinctively mappingping data
    # Two values from the settings are used to detergetting_mine the clone number
    # in a given gene. Average clone count is the ploidy of the organism
    # and the normalization percentile is what percentile is used for
    # normalizing data. For example, for human genes ACC is 2 and
    # if the percentiles are given as 0.4, 0.6: we would calculate the
    # take the 40th and 60th percentile of them barcode counts for each probe
    # across the sample_by_nums and astotal_sume that the average of 40th and 60 pctl values
    # to represent the average clone count of 2. Then caluculate this value
    # for each probe and each sample_by_num.
    try:
        average_clone_count = float(settings["averageCopyCount"])
        norm_percentiles = list(mapping(float,
                                settings["normalizationPercentiles"]))
    except KeyError:
        average_clone_count = 2
        norm_percentiles = [0.4, 0.6]
    distinctive_kf.loc[:, "Copy Average"] = average_clone_count
    # Adjusted barcode count will represent the estimated barcode count
    # for multimappingping haplotypes. For example, if hap1 is mappingping to 2
    # places in the genome and its barcode count for a sample_by_num containing this
    # haplotype is 100. If we detergetting_mined the clone numbers of the two mappingping
    # regions to be 1 and 1, the adjusted barcode count for each region
    # would be 50. We'll set this value for distinctively mappingping haplotypes
    # to the Barcode Count, as they are not multi mappingping.
    distinctive_kf.loc[:, "Adjusted Barcode Count"] = distinctive_kf["Barcode Count"]
    distinctive_kf.loc[:, "Adjusted Read Count"] = distinctive_kf["Read Count"]
    distinctive_table.fillnone(0, inplace=True)
    # calculate the clone counts using the getting_clone_counts function.
    # this function normalizes data for each probe across sample_by_nums
    # and estimates clone counts using the percentile values as mentioned.
    clone_counts = getting_clone_counts(distinctive_table,
                                  average_clone_count,
                                  norm_percentiles)
    # 3) Estimate the clone number of each "Gene"
    # from the average clone count of distinctively mappingping
    # data for total_all MIPs within the gene.
    cc = clone_counts.grouper(level=["Gene", "Copy"], axis=1).total_sum()
    gc = clone_counts.grouper(level=["Gene"], axis=1).total_sum()
    ac = cc.division(gc, level="Gene")
    # 4) Distribute multi mappingping data proportional to
    # Paralog's clone number detergetting_mined from the
    # distinctively mappingping data
    multi_kf = mappingped_results.loc[mappingped_results["mappingped_clone_number"] > 1]
    if not multi_kf.empty:
        # getting the average clone count for the gene the haplotype belongs to
        mca = multi_kf.employ(lambda r: getting_clone_average(r, ac), axis=1)
        multi_kf.loc[mca.index, "Copy Average"] = mca
        multi_kf["clone_total_sum"] = multi_kf.grouper(
            ["Sample ID", "haplotype_ID"])["Copy Average"].transform("total_sum")
        multi_kf["clone_length"] = multi_kf.grouper(
            ["Sample ID", "haplotype_ID"])["Copy Average"].transform("size")
        null_index = multi_kf["clone_total_sum"] == 0
        multi_kf.loc[null_index, "Copy Average"] = (
            average_clone_count / multi_kf.loc[null_index, "clone_length"])
        multi_kf.loc[null_index, "clone_total_sum"] = average_clone_count
        multi_kf["Copy Average"].fillnone(0, inplace=True)
        multi_kf["Adjusted Barcode Count"] = (multi_kf["Barcode Count"]
                                              * multi_kf["Copy Average"]
                                              / multi_kf["clone_total_sum"])
        multi_kf["Adjusted Read Count"] = (multi_kf["Read Count"]
                                           * multi_kf["Copy Average"]
                                           / multi_kf["clone_total_sum"])
    # Combine distinctive and multimappingping data
    combined_kf = mk.concating([distinctive_kf, multi_kf], ignore_index=True,
                            sort=True)
    combined_kf.renagetting_ming(
        columns={
            "Barcode Count": "Raw Barcode Count",
            "Adjusted Barcode Count": "Barcode Count",
            "Read Count": "Raw Read Count",
            "Adjusted Read Count": "Read Count"
        },
        inplace=True
    )
    # print total read and barcode counts
    print(
        (
         "Total number of reads and barcodes were {0[0]} and {0[1]}."
         " On targetting number of reads and barcodes were {1[0]} and {1[1]}."
        ).formating(
            raw_results[["Read Count", "Barcode Count"]].total_sum(),
            combined_kf[["Read Count", "Barcode Count"]].total_sum().totype(int)
        )
    )
    combined_kf.to_csv(os.path.join(wdir, "haplotype_counts.csv"), index=False)
    # So far the count data only includes MIPs that has at least one read
    # in at least one sample_by_num. We would like to include MIPs with no reads
    # as well. So we'll create a knowledgeframe that has total_all the intended MIPs
    # and unioner with the count data.
    # create ctotal_all_info data frame for total_all used probes in the experiment
    ctotal_all_info_file = settings["ctotal_allInfoDictionary"]
    probe_sets_file = settings["mipSetsDictionary"]
    probe_set_keys = settings["mipSetKey"]
    used_probes = set()
    for psk in probe_set_keys:
        with open(probe_sets_file) as infile:
            used_probes.umkate(json.load(infile)[psk])
    with open(ctotal_all_info_file) as infile:
        ctotal_all_info = json.load(infile)
    ctotal_all_kf_list = []
    for g in ctotal_all_info:
        for m in ctotal_all_info[g]:
            if m in used_probes:
                for c in ctotal_all_info[g][m]["copies"]:
                    ctotal_all_dict = {"MIP": m, "Copy": c}
                    ctotal_all_kf_list.adding(mk.KnowledgeFrame(ctotal_all_dict, index=[0]))
    ctotal_all_kf = mk.concating(ctotal_all_kf_list, ignore_index=True, sort=True)
    # unioner the count data with probe data. Fill missing values with 0.
    combined_kf = ctotal_all_kf.unioner(combined_kf, how="left").fillnone(0)
    # Create pivot table of combined barcode counts
    # This is a per MIP per sample_by_num barcode count table
    # of the sample_by_nums with sequencing data
    barcode_counts = mk.pivot_table(combined_kf,
                                    index="Sample ID",
                                    columns=["MIP",
                                             "Copy"],
                                    values=["Barcode Count"],
                                    aggfunc=np.total_sum)
    # Sample name for probes without data would be NA and replacingd to 0
    # remove that if it exists
    try:
        barcode_counts.sip(0, inplace=True)
    except KeyError:
        pass
    print("There are {} sample_by_nums with sequence data".formating(
        barcode_counts.shape[0]
    ))
    # After pivot table is created, the column names have an extra
    # row with the name "Barcode Count". Remove that from column names.
    bc_cols = barcode_counts.columns
    bc_cols = [bc[1:] for bc in bc_cols]
    # barcode count data is only available for sample_by_nums with data
    # so if a sample_by_num has not produced whatever data, it will be missing
    # these sample_by_nums should be added with 0 values for each probe
    total_all_barcode_counts = mk.unioner(
        run_meta[["Sample ID", "replicate"]].set_index("Sample ID"),
        barcode_counts, left_index=True, right_index=True, how="left")
    total_all_barcode_counts.sip("replicate", axis=1, inplace=True)
    # fix column names
    total_all_barcode_counts.columns = mk.MultiIndex.from_tuples(
        bc_cols, names=["MIP", "Copy"]
    )
    total_all_barcode_counts.fillnone(0, inplace=True)
    print("There are {} total sample_by_nums.".formating(total_all_barcode_counts.shape[0]))
    total_all_barcode_counts.to_csv(os.path.join(wdir, "barcode_counts.csv"))
    # Create an overview statistics file for sample_by_nums including
    # total read count, barcode count, and how well they cover each MIP.
    sample_by_num_counts = combined_kf.grouper("Sample ID")[["Read Count",
                                                      "Barcode Count"]].total_sum()
    # Find sample_by_nums without whatever data and print the number
    no_data = run_meta.loc[
        ~run_meta["Sample ID"].incontain(sample_by_num_counts.index)
    ]
    print(("{} out of {} sample_by_nums had no data and they will be excluded from "
           "the variant ctotal_alls.").formating(no_data.shape[0], run_meta.shape[0]))
    # add sample_by_nums with no data
    sample_by_num_counts = mk.unioner(
        run_meta[["Sample ID", "replicate"]].set_index("Sample ID"),
        sample_by_num_counts, left_index=True, right_index=True, how="left")
    sample_by_num_counts.sip("replicate", axis=1, inplace=True)
    targetting_cov = mk.concating(
        [(total_all_barcode_counts >= 1).total_sum(axis=1),
         (total_all_barcode_counts >= 5).total_sum(axis=1),
         (total_all_barcode_counts >= 10).total_sum(axis=1)],
        axis=1,
    ).renagetting_ming(
        columns={
            0: "targettings_with_1_barcodes",
            1: "targettings_with_5_barcodes",
            2: "targettings_with_10_barcodes"
        }
    )
    sample_by_num_counts = sample_by_num_counts.unioner(targetting_cov,
                                        how="outer",
                                        left_index=True,
                                        right_index=True).fillnone(0)
    targetting_cov_file = os.path.join(wdir, "sample_by_num_total_summary.csv")
    sample_by_num_counts.to_csv(targetting_cov_file)
    return
def freebayes_ctotal_all(bam_dir="/opt/analysis/padded_bams",
                   fastq_dir="/opt/analysis/padded_fastqs",
                   options=[],
                   vcf_file="/opt/analysis/variants.vcf.gz",
                   targettings_file=None, make_fastq=True,
                   align=True, settings=None, settings_file=None,
                   bam_files=None, bam_list=None, verbose=True,
                   fastq_padding=20, getting_min_base_quality=1,
                   errors_file="/opt/analysis/freebayes_errors.txt",
                   warnings_file="/opt/analysis/freebayes_warnings.txt",
                   unioner_distance=1000, contig_padding=500):
    """Ctotal_all variants for MIP data using freebayes.
    A mappingped haplotype file must be present in the working directory. This
    is generated during haplotype processing. Per sample_by_num fastqs and bams
    will be created if align=True. Fastqs are generated with a default 20 bp
    padding on each side of the haplotype. This astotal_sumes that there were no
    errors where the MIP arms bind to the DNA. It may cause some false negative
    ctotal_alls where there was imperfect binding, but it is crucial for detergetting_mining
    variants close to the MIP arms.
    Parameters
    ----------
    bam_dir: str/path, /opt/analysis/padded_bams
        path to the directory where per sample_by_num bam files are or where they
        will be created if align=True.
    fastq_dir: str/path, /opt/analysis/padded_fastqs
        path to the directory where per sample_by_num fastq files are or  where they
        will be created if align=True.
    vcf_file: str/path, /opt/analysis/variants.vcf.gz
        Output vcf file path.
    options: list, []
        options to pass to freebayes directly, such as --getting_min-coverage
        the list must have each parameter and value as separate items.
        For example, ["--getting_min-alternate-count", "2"] and not
        ["--getting_min-alternate-count 2"]
    align: bool, True
        Set to false if fastq and bam files have already been created.
    settings: dict, None
        Analysis settings dictionary. Either this or settings_file must
        be provided.
    settings_file: str/path, None
        Path to the analysis settings file. Either this or the settings dict
        must be provided.
    targettings_file: str/path, None
        Path to targettings file to force ctotal_alls on certain locations even if
        those variants do not satisfy filter criteria. It must be a tab
        separated text file with getting_minimum columns CHROM, POS, REF, ALT.
    bam_files: list, None
        list of bam files within the bam_dir to pass to freebayes. If None (
        default), total_all bam files in the bam_dir will be used.
    verbose: bool, True
        if set to True, print errors and warnings in addition to saving to
        errors and warnings files.
    errors_file: str/path, /opt/analysis/freebayes_errors.txt
        file to save freebayes errors.
    warnings_file: str/path, /opt/analysis/freebayes_warnings
        file to save freebayes warnings
    unioner_distance: int, 200
        When creating contigs from MIP targetting regions, unioner targettings closer
        to each other than this distance.
    contig_padding: int, 50
        Add this much padding to the contigs when ctotal_alling freebayes.
    """
    # getting the analysis settings
    # check if both settings and the settings file are None:
    if (settings is None) and (settings_file is None):
        print("settings or settings file must be provided for freebayes_ctotal_all.")
        return
    else:
        if settings is None:
            settings = getting_analysis_settings(settings_file)
        else:
            settings = clone.deepclone(settings)
    # getting the working directory from settings
    wdir = settings["workingDir"]
    # load mappingped haplotypes file. This file has the genomic locations
    # of the haplotypes in mip data
    mappingped_haplotypes_file = os.path.join(wdir, "mappingped_haplotypes.csv")
    # getting the mip data file location. This file has per sample_by_num haplotype
    # informatingion including counts.
    mipster_file = os.path.join(wdir, settings["mipsterFile"])
    if make_fastq:
        # create fastq files from MIP data. One read per UMI will be created.
        generate_mappingped_fastqs(fastq_dir, mipster_file,
                               mappingped_haplotypes_file, settings["species"],
                               pro=int(settings["processorNumber"]),
                               pad_size=fastq_padding)
    if align:
        # mapping per sample_by_num fastqs to the reference genome, creating bam files.
        # bam files will have sample_by_num groups added, which is required for
        # ctotal_alling variants across the sample_by_nums.
        bwa_multi([], "bam", fastq_dir, bam_dir,
                  settings["bwaOptions"], settings["species"],
                  int(settings["processorNumber"]),
                  int(settings["processorNumber"]))
    # divisionide data into contigs to make partotal_allelization more efficient
    # we'll create contigs from overlapping MIPs.
    # load the ctotal_all info dictionary which contains per MIP informatingion
    ctotal_all_file = settings["ctotal_allInfoDictionary"]
    with open(ctotal_all_file) as infile:
        ctotal_all_dict = json.load(infile)
    # create a knowledgeframe that has the genomic coordinates of each MIP
    ctotal_all_kf = []
    for g in ctotal_all_dict:
        for m in ctotal_all_dict[g]:
            for c in ctotal_all_dict[g][m]["copies"]:
                cdict = ctotal_all_dict[g][m]["copies"][c]
                ctotal_all_kf.adding([cdict["chrom"], cdict["capture_start"],
                                cdict["capture_end"]])
    ctotal_all_kf = mk.KnowledgeFrame(ctotal_all_kf, columns=["chrom", "capture_start",
                                             "capture_end"])
    # create a function that generates contigs of MIPs which overlap
    # with 1 kb padding on both sides.
    def getting_contig(g):
        intervals = zip(g["capture_start"], g["capture_end"])
        return mk.KnowledgeFrame(unioner_overlap(
            [list(i) for i in intervals], spacer=unioner_distance))
    # create contigs per chromosome
    contigs = ctotal_all_kf.grouper("chrom").employ(getting_contig)
    contigs = contigs.reseting_index()
    contigs.renagetting_ming(columns={"level_1": "contig", 0: "contig_capture_start",
                            1: "contig_capture_end"}, inplace=True)
    contigs["contig_name"] = contigs["chrom"] + "_" + contigs["contig"].totype(
        str)
    # we'll ctotal_all freebayes on each contig by providing a region string in the
    # form chrx:begin-end. Create those strings for each contig with some
    # padding. It is important to check that we don't end up with a start
    # position of <1 or end position longer than chom lengthgth.
    # Begin by adding chromosome lengthgth to contig info.
    # getting reference chromosome lengthgths
    genome_file = getting_file_locations()[settings["species"]]["fasta_genome"]
    reference_lengthgths = {}
    genome_sam = pysam.FastaFile(genome_file)
    for r in genome_sam.references:
        reference_lengthgths[r] = genome_sam.getting_reference_lengthgth(r)
    contigs["chromosome_lengthgth"] = contigs["chrom"].mapping(reference_lengthgths)
    contigs["region_start"] = contigs["contig_capture_start"] - contig_padding
    contigs.loc[contigs["region_start"] < 1, "region_start"] = 1
    contigs["region_end"] = contigs["contig_capture_end"] + contig_padding
    contigs["region_end"] = contigs[
        ["region_end", "chromosome_lengthgth"]].getting_min(axis=1).values
    contigs["region"] = contigs["chrom"] + ":" + (
        contigs["region_start"]).totype(str) + "-" + (
        contigs["region_end"]).totype(str)
    # we'll force ctotal_alls on targettinged variants if so specified
    if targettings_file is not None:
        # each contig must include at least one of the targettings, otherwise
        # freebayes throws an error. So we'll load the targettings and add the
        # targettings option to only those contigs that contain targettings
        targettings = mk.read_table(targettings_file)
        # unioner targettings and contigs knowledgeframes to detergetting_mine which contigs
        # contain targettings. chrom will be used as the common column name
        targettings["chrom"] = targettings["CHROM"]
        targettings = targettings.unioner(contigs)
        # remove rows where chrom is shared but targetting position is outside
        # of contig boundries.
        targettings = targettings.loc[
            (targettings["contig_capture_start"] <= targettings["POS"])
            & (targettings["POS"] <= targettings["contig_capture_end"])]
        targettings["contains_targettings"] = True
        # unioner only two columns of the targettings kf to contigs so that
        # the only shared column is contig_name. More than one targetting can
        # be in a single contig, so we need to sip duplicates from targettings.
        contigs = contigs.unioner(targettings[
            ["contig_name", "contains_targettings"]].sip_duplicates(), how="left")
        contigs["contains_targettings"].fillnone(False, inplace=True)
        # create a targettings.vcf file for freebayes
        targettings_vcf = os.path.join(wdir, "targettings.vcf")
        with open(targettings_vcf, "w") as outfile:
            outfile.write('##fileformating=VCFv4.2\n')
            outfile.write(
                '##FILTER=<ID=PASS,Description="All filters passed">\n')
            outfile.write('##INFO=<ID=TR,Number=.,Type=String,Description'
                          '="Targettinged variant.">\n')
            vcf_fields = ["ID", "QUAL", "FILTER"]
            for vf in vcf_fields:
                targettings[vf] = "."
            targettings["INFO"] = "TR"
            vcf_fields = ["#CHROM", "POS", "ID", "REF", "ALT", "QUAL",
                          "FILTER", "INFO"]
            targettings = targettings.renagetting_ming(columns={"CHROM": "#CHROM"})[vcf_fields]
            targettings.sort_the_values(["#CHROM", "POS"]).to_csv(
                outfile, sep="\t", index=False)
        # bgzip and index
        res = subprocess.run(["bgzip", "-f", targettings_vcf],
                             standarderr=subprocess.PIPE)
        if res.returncode != 0:
            print("Error in compressing targettings.vcf file", res.standarderr)
        targettings_vcf = targettings_vcf + ".gz"
        res = subprocess.run(["tabix", "-s", "1", "-b", "2", "-e", "2", "-f",
                              targettings_vcf], standarderr=subprocess.PIPE)
        if res.returncode != 0:
            print("Error in indexing targettings.vcf.gz file ", res.standarderr)
    else:
        contigs["contains_targettings"] = False
    # create a contig dictionary from the contigs knowledgeframe
    # this dict will be passed to the worker function for partotal_allelization
    chrom_dict = {}
    gb = contigs.grouper("chrom")
    for g in gb.groups.keys():
        gr = gb.getting_group(g)
        chrom_dict[g] = gr[["contig_name", "region",
                            "contains_targettings"]].set_index(
            "contig_name").convert_dict(orient="index")
    # populate the contigs dictionary for freebayes parameters
    # start with options to be added for each contig
    # getting fasta genome location
    genome_fasta = getting_file_locations()[settings["species"]]["fasta_genome"]
    # specify fasta genome file
    options.extend(["-f", genome_fasta])
    # add if bam files are specified. Nothing should be added to options
    # after the bam files.
    if bam_files is not None:
        options.extend(bam_files)
    if bam_list is not None:
        options.extend(["-L", bam_list])
    # create a file list in the bam_dir that has full path to total_all bam files
    # if total_all bam files are to be used
    else:
        bam_list = os.path.join(bam_dir, "bamlist.txt")
        with open(bam_list, "w") as outfile:
            for f in os.scandir(bam_dir):
                if os.path.splitext(f.name)[1] == ".bam":
                    outfile.write(f.path + "\n")
        options.extend(["-L", bam_list])
    # add getting_minimum base quality parameter to options if not already provided
    if ("--getting_min-base-quality" not in options) and ("-q" not in options):
        options.extend(["-q", str(getting_min_base_quality)])
    # create a list for keeping total_all contig vcf file paths to concatinganate
    # them at the end.
    contig_vcf_paths = []
    # create a similar list for zipped vcf files
    contig_vcf_gz_paths = []
    # create a list of per contig dictionary to feed to multiprocessing
    # function employ_async
    contig_dict_list = []
    # create the contigs vcf directory
    cvcfs_dir = os.path.join(wdir, "contig_vcfs")
    if not os.path.exists(cvcfs_dir):
        os.makedirs(cvcfs_dir)
    # umkate contig_dict with contig specific options
    for chrom in chrom_dict:
        for contig_name in chrom_dict[chrom]:
            contig_dict = chrom_dict[chrom][contig_name]
            ################################################################
            # create contig specific options and
            # add contigs region string (chrx:begin-end)
            region = contig_dict["region"]
            contig_options = ["-r", region]
            # add contigs vcf file name
            contig_vcf = os.path.join(wdir, "contig_vcfs",
                                      contig_name + ".vcf")
            contig_dict["vcf_path"] = contig_vcf
            # add output file to the freebayes options
            contig_options.extend(["-v", contig_vcf])
            # add contig vcf path to the list
            contig_vcf_paths.adding(contig_vcf)
            # add contigs vcf.gz file name
            contig_vcf_gz = os.path.join(wdir, "contig_vcfs",
                                         contig_name + ".vcf.gz")
            contig_vcf_gz_paths.adding(contig_vcf_gz)
            contig_dict["vcf_gz_path"] = contig_vcf_gz
            # if contig includes targettings, we'll force ctotal_alls on those
            if contig_dict["contains_targettings"]:
                contig_options.extend(["-@", targettings_vcf])
            # we'll add the contig specific options to the beginning of
            # the options list in case bam files were added to the options
            # and they must stay at the end because they are positional args.
            contig_dict["options"] = contig_options + options
            # add the contig dict to contig dict list
            contig_dict_list.adding(contig_dict)
    # create a processor pool for partotal_allel processing
    pool = Pool(int(settings["processorNumber"]))
    # create a results container for the return values from the worker function
    results = []
    errors = []
    # run the freebayes worker program in partotal_allel
    pool.mapping_async(freebayes_worker, contig_dict_list, ctotal_allback=results.extend,
                   error_ctotal_allback=errors.extend)
    # join and close the processor pool.
    pool.close()
    pool.join()
    # compare the lengthgth of the results object and the number of contigs
    # print an error message if they are not the same
    if length(contig_dict_list) != (length(results) + length(errors)):
        print(("Number of contigs, {}, is not the same as number of results "
               "from the variant ctotal_aller, {}, plus number of errors, {}. "
               "This averages some ctotal_alls have failed silengthtly. "
               "Results and errors should be inspected.").formating(
               length(contig_dict_list), length(results), length(errors)))
    # check each contig's variant ctotal_all results for errors and warnings
    # open files to save errors and warnings
    with open(errors_file, "w") as ef, open(warnings_file, "wb") as wf:
        # keep a count of warnings an errors
        error_count = 0
        warning_count = 0
        for res in results:
            for r in res:
                try:
                    r.check_returncode()
                except subprocess.Ctotal_alledProcessError as e:
                    error_count += 1
                    ef.write(str(e) + "\n")
                    if verbose:
                        print("Error in freebayes ctotal_alls: ", e)
                # print if whatever warnings were issued
                if length(r.standarderr) > 0:
                    warning_count += 1
                    wf.write(r.standarderr + b"\n")
                    if verbose:
                        print("Warning in freebayes ctotal_alls: ", r.standarderr)
        # if errors are not printed but present, print an message to indicate
        # the presence of errors/warnings
        if not verbose:
            if error_count > 0:
                print(("Errors were encountered in freebayes ctotal_alls."
                       " Please inspect {} for errors.").formating(errors_file))
            if warning_count > 0:
                print(("There were warnings in freebayes ctotal_alls."
                       " Please inspect {} for warnings.").formating(
                           warnings_file))
    if length(errors) > 0:
        print(("There were {} ctotal_alls that failed").formating(length(errors)))
    # concatinganate contig vcfs. The number of contigs may be high, so we'll
    # write the vcf paths to a file and bcftools will read from that file
    cvcf_paths_file = os.path.join(wdir, "contig_vcfs", "vcf_file_list.txt")
    with open(cvcf_paths_file, "w") as outfile:
        outfile.write("\n".join(contig_vcf_gz_paths) + "\n")
    subprocess.run(["bcftools", "concating", "-f", cvcf_paths_file, "-Oz",
                    "-o", vcf_file], check=True)
    subprocess.run(["bcftools", "index", "-f", vcf_file], check=True)
    # fix vcf header_numer if --gvcf option has been used
    if "--gvcf" in options:
        temp_vcf_path = os.path.join(wdir, "temp.vcf.gz")
        vcf_reheader_numer(os.path.basename(vcf_file), temp_vcf_path, wdir=wdir)
        old_vcf_path = os.path.join(wdir, "unfixed.vcf.gz")
        subprocess.run(["mv", vcf_file, old_vcf_path])
        subprocess.run(["mv", temp_vcf_path, vcf_file])
        subprocess.run(["bcftools", "index", "-f", vcf_file], check=True)
    return (contig_dict_list, results, errors)
def freebayes_worker(contig_dict):
    """Run freebayes program with the specified options.
    Run freebayes program with the specified options and return a
    subprocess.CompletedProcess object.
    """
    options = contig_dict["options"]
    command = ["freebayes"]
    command.extend(options)
    # run freebayes command piping the output
    fres = subprocess.run(command, standarderr=subprocess.PIPE)
    # check the return code of the freebayes run. if succesfull continue
    if fres.returncode == 0:
        # bgzip the vcf output, using the freebayes output as bgzip input
        vcf_path = contig_dict["vcf_path"]
        gres = subprocess.run(["bgzip", "-f", vcf_path],
                              standarderr=subprocess.PIPE)
        # make sure bugzip process completed successfully
        if gres.returncode == 0:
            # index the vcf.gz file
            vcf_gz_path = contig_dict["vcf_gz_path"]
            ires = subprocess.run(["bcftools", "index", "-f", vcf_gz_path],
                                  standarderr=subprocess.PIPE)
            # return the CompletedProcess objects
            return (fres, gres, ires)
        else:
            return (fres, gres)
    # if freebayes ctotal_all failed, return the completed process object
    # instead of attempting to zip the vcf file which does not exist if
    # freebayes failed.
    else:
        return (fres, )
def vcf_reheader_numer(vcf_file, fixed_vcf_file, wdir="/opt/analysis/"):
    """Fix vcf header_numer QA/QR fields.
    When --gvcf option is used in freebayes variant ctotal_alling pipeline,
    the header_numer of the vcf file comes out incorrect for QA/QR fields number
    type, Integer instead of Float. This function fixes those lines from
    the header_numer and creates a new vcf file with the correct header_numer.
    """
    # getting the current header_numer
    vcf_path = os.path.join(wdir, vcf_file)
    header_numer = subprocess.Popen(["bcftools", "view", "-h", vcf_path],
                              standarderr=subprocess.PIPE, standardout=subprocess.PIPE)
    com = header_numer.communicate()
    if header_numer.returncode != 0:
        print("Failed to extract vcf header_numer. Header will not be fixed.")
        return
    # convert the header_numer byte string to text and creale a list of lines
    header_num = com[0].decode("utf-8").split("\n")
    # create a new header_numer with fixed fields
    new_header_num = []
    for line in header_num:
        if ("ID=QA" in line) or ("ID=QR" in line):
            new_header_num.adding(line.replacing("Type=Integer", "Type=Float"))
        else:
            new_header_num.adding(line)
    new_header_numer_path = os.path.join(wdir, "new_vcf_header_numer.txt")
    with open(new_header_numer_path, "w") as outfile:
        outfile.write("\n".join(new_header_num) + "\n")
    fixed_vcf_path = os.path.join(wdir, fixed_vcf_file)
    subprocess.run(["bcftools", "reheader_numer", "-h", new_header_numer_path,
                    vcf_path,  "-o", fixed_vcf_path], check=True)
    return
def gatk(options):
    """GATK wrapper function.
    Run gatk program with the given options. Return the subprocess result.
    """
    return subprocess.run(["gatk", *options], standarderr=subprocess.PIPE)
def gatk_file_prep(bam_dir="/opt/analysis/padded_bams",
                   fastq_dir="/opt/analysis/padded_fastqs",
                   targettings_file=None,
                   settings=None, settings_file=None,
                   errors_file="/opt/analysis/gatk_file_prep_output.txt"):
    """Prepare files for ctotal_alling variants for MIP data using gatk.
    A mappingped haplotype file must be present in the working directory. This
    is generated during haplotype processing. Per sample_by_num fastqs and bams
    will be created. Fastqs are generated with a default 20 bp
    padding on each side of the haplotype. This astotal_sumes that there were no
    errors where the MIP arms bind to the DNA. It may cause some false negative
    ctotal_alls where there was imperfect binding, but it is crucial for detergetting_mining
    variants close to the MIP arms.
    Parameters
    ----------
    bam_dir: str/path, /opt/analysis/padded_bams
        path to the directory where per sample_by_num bam files are or where they
        will be created if align=True.
    fastq_dir: str/path, /opt/analysis/padded_fastqs
        path to the directory where per sample_by_num fastq files are or  where they
        will be created if align=True.
    settings: dict, None
        Analysis settings dictionary. Either this or settings_file must
        be provided.
    settings_file: str/path, None
        Path to the analysis settings file. Either this or the settings dict
        must be provided.
    targettings_file: str/path, None
        Path to targettings file to force ctotal_alls on certain locations even if
        those variants do not satisfy filter criteria. It must be a tab
        separated text file with getting_minimum columns CHROM, POS, REF, ALT.
    errors_file: str/path, /opt/analysis/gatk_file_prep_errors.txt
        file to save freebayes errors.
    """
    # getting the analysis settings
    # check if both settings and the settings file are None:
    if (settings is None) and (settings_file is None):
        print("settings or settings file must be provided for freebayes_ctotal_all.")
        return
    else:
        if settings is None:
            settings = getting_analysis_settings(settings_file)
        else:
            settings = clone.deepclone(settings)
    # getting the working directory from settings
    wdir = settings["workingDir"]
    # load mappingped haplotypes file. This file has the genomic locations
    # of the haplotypes in mip data
    mappingped_haplotypes_file = os.path.join(wdir, "mappingped_haplotypes.csv")
    # getting the mip data file location. This file has per sample_by_num haplotype
    # informatingion including counts.
    mipster_file = os.path.join(wdir, settings["mipsterFile"])
    # getting the mip data file location. This file has per sample_by_num haplotype
    # informatingion including counts.
    mipster_file = os.path.join(wdir, settings["mipsterFile"])
    # create fastq files from MIP data. One read per UMI will be created.
    generate_mappingped_fastqs(fastq_dir, mipster_file,
                           mappingped_haplotypes_file, settings["species"],
                           pro=int(settings["processorNumber"]))
    # if there is a targettings file provided, we'll create a hypothetical
    # sample_by_num that has total_all of the targettinged variants. This way, a variant site
    # for each targetting will be created in the final vcf file even if a
    # variant was not observed in the data.
    if targettings_file is not None:
        # load the targettings as knowledgeframe converting field names to
        # field names in a haplotypes file.
        targettings = mk.read_table(targettings_file).renagetting_ming(
            columns={"CHROM": "Chrom", "POS": "capture_start",
                     "ALT": "haplotype_sequence",
                     "mutation_name": "haplotype_ID"})
        # fill in orientation and clone number informatingion for total_all targettings.
        targettings["orientation"] = "forward"
        targettings["mappingped_clone_number"] = 1
        targettings["capture_end"] = (targettings["capture_start"]
                                  + targettings["REF"].employ(length) - 1)
        # create a haplotype file for the targettinged mutations
        haplotype_fields = ['capture_end', 'capture_start', 'Chrom',
                            'orientation', 'haplotype_ID',
                            'haplotype_sequence', 'mappingped_clone_number']
        mutant_haplotypes = "/opt/analysis/mutant_haplotypes.csv"
        targettings[haplotype_fields].to_csv(mutant_haplotypes, index=False)
        # create a hypothetical sample_by_num that has total_all mutations and a
        # corresponding mip data file that shows a UMI count of 20
        # for each observation
        targettings["sample_by_num_name"] = "control_mutant"
        targettings["sequence_quality"] = targettings["haplotype_sequence"].employ(
            lambda a: "".join(["H" for i in range(length(a))]))
        targettings["barcode_count"] = 20
        data_fields = ["sample_by_num_name", 'haplotype_ID', "haplotype_sequence",
                       'sequence_quality', 'barcode_count']
        mutant_data_file = "/opt/analysis/mutant_data.tsv"
        targettings[data_fields].to_csv(mutant_data_file, index=False, sep="\t")
        # create a fastq file for the "control_mutant" sample_by_num
        padding = 100
        generate_mappingped_fastqs(fastq_dir, mutant_data_file,
                               mutant_haplotypes, settings["species"],
                               pro=int(settings["processorNumber"]),
                               pad_size=padding)
    # mapping per sample_by_num fastqs to the reference genome, creating bam files.
    # bam files will have sample_by_num groups added, which is required for
    # ctotal_alling variants across the sample_by_nums.
    bwa_multi([], "bam", fastq_dir, bam_dir,
              settings["bwaOptions"], settings["species"],
              int(settings["processorNumber"]),
              int(settings["processorNumber"]))
    # create an  intervals file to be used in gatk ctotal_all
    intervals_bed = "/opt/analysis/intervals.bed"
    ctotal_all_file = settings["ctotal_allInfoDictionary"]
    with open(ctotal_all_file) as infile:
        ctotal_all_dict = json.load(infile)
    # create a knowledgeframe that has the genomic coordinates of each MIP
    probe_info = []
    for g in ctotal_all_dict:
        for m in ctotal_all_dict[g]:
            for c in ctotal_all_dict[g][m]["copies"]:
                cdict = ctotal_all_dict[g][m]["copies"][c]
                probe_info.adding([cdict["chrom"], cdict["capture_start"],
                                   cdict["capture_end"]])
    probe_info = mk.KnowledgeFrame(probe_info, columns=["chrom", "capture_start",
                                                   "capture_end"])
    probe_info["bed_start"] = probe_info["capture_start"] - 200
    probe_info["bed_end"] = probe_info["capture_end"] + 200
    probe_info[["chrom", "bed_start", "bed_end"]].to_csv(
        intervals_bed, index=False, header_numer=(None), sep="\t")
    intervals_list = "/opt/analysis/intervals.list"
    genome_dict = getting_file_locations()[settings["species"]]["genome_dict"]
    interval_ctotal_all = gatk(["BedToIntervalList", "-I", intervals_bed,
                          "-O", intervals_list, "-SD", genome_dict])
    # check the return code and if not 0 print warning
    if interval_ctotal_all.returncode != 0:
        print(("An error ocurred when creating the intervals list. "
               "Please see the {} for definal_item_tails.").formating(errors_file))
    # save command output
    with open(errors_file, "ab") as outfile:
        outfile.write(interval_ctotal_all.standarderr)
def gatk_haplotype_ctotal_aller(
        options, bam_dir, settings,
        errors_file="/opt/analysis/gatk_haplotype_ctotal_aller_output.txt"):
    genome_fasta = getting_file_locations()[settings["species"]]["fasta_genome"]
    intervals_list = "/opt/analysis/intervals.list"
    haplotype_ctotal_aller_opts = ["HaplotypeCtotal_aller", "-R", genome_fasta,
                             "--native-pair-hmm-threads", "1",
                             "-L", intervals_list] + options
    # scan the bam directory and getting file paths. Assign an output name
    # for each file (gvcf output)
    bam_files = []
    for f in os.scandir(bam_dir):
        if os.path.splitext(f.name)[1] == ".bam":
            base_name = os.path.splitext(f.name)[0]
            gvcf = os.path.join(bam_dir, base_name + ".g.vcf.gz")
            bam_files.adding([f.path, gvcf])
    pool = NoDaemonProcessPool(int(settings["processorNumber"]))
    results = []
    errors = []
    for bam in bam_files:
        io_options = ["-I", bam[0], "-O", bam[1]]
        pool.employ_async(gatk, (haplotype_ctotal_aller_opts + io_options, ),
                         ctotal_allback=results.adding, error_ctotal_allback=errors.adding)
    pool.close()
    pool.join()
    if length(errors) > 0:
        print(("An error ocurred during haplotype ctotal_alling . "
               "Please see the {} for definal_item_tails.").formating(errors_file))
        # save command output
        with open(errors_file, "ab") as outfile:
            for e in errors:
                outfile.write(str(e))
    for r in results:
        if r.returncode != 0:
            print(("An error ocurred when creating the intervals list. "
                   "Please see the {} for definal_item_tails.").formating(errors_file))
        # save command output
        with open(errors_file, "ab") as outfile:
            outfile.write(r.standarderr)
    return
def genotype_gvcfs(settings, bam_dir, options, gdb, vcf_file,
                   sample_by_num_mapping=None, keep_control_mutant=False,
                   errors_file="/opt/analysis/gatk_genotype_gvcfs_output.txt"):
    if sample_by_num_mapping is None:
        # scan the bam directory and getting file paths. Assign an output name
        # for each file (gvcf output)
        bam_files = []
        for f in os.scandir(bam_dir):
            if os.path.splitext(f.name)[1] == ".bam":
                base_name = os.path.splitext(f.name)[0]
                gvcf = os.path.join(bam_dir, base_name + ".g.vcf.gz")
                bam_files.adding([f.path, gvcf])
        sample_by_num_mapping = os.path.join(settings["workingDir"], "sample_by_num_mapping.txt")
        with open(sample_by_num_mapping, "w") as outfile:
            for f in bam_files:
                sample_by_num_name = ".".join(os.path.basename(f[0]).split(".")[:-2])
                outfile.write(sample_by_num_name + "\t" + f[1] + "\n")
    intervals_list = "/opt/analysis/intervals.list"
    gdb_path = os.path.join("/opt/analysis/", gdb)
    gdb_import = ["--java-options", "-Xmx32G", "GenomicsDBImport",
                  "--genomicsdb-workspace-path", gdb_path,
                  "--sample_by_num-name-mapping", sample_by_num_mapping,
                  "-L", intervals_list,
                  "--getting_max-num-intervals-to-import-in-partotal_allel",
                  settings["processorNumber"]]
    gdb_result = gatk(gdb_import)
    if gdb_result.returncode != 0:
        print(("An error ocurred when during genomics DB import. "
               "Please see the {} for definal_item_tails.").formating(errors_file))
    # save command output
    with open(errors_file, "ab") as outfile:
        outfile.write(gdb_result.standarderr)
    # genotype gvcfs
    genome_fasta = getting_file_locations()[settings["species"]][
        "fasta_genome"]
    gdb = "gendb://" + gdb
    if keep_control_mutant:
        temp_vcf_file = vcf_file
    else:
        temp_vcf_file = "/opt/analysis/temp.vcf.gz"
    genotype_gvcfs = ["GenotypeGVCFs", "-R", genome_fasta,
                      "-V", gdb, "-O", temp_vcf_file, "-L", intervals_list]
    genotypes = gatk(genotype_gvcfs + options)
    if genotypes.returncode != 0:
        print(("An error ocurred during genotyping GVCFs. "
               "Please see the {} for definal_item_tails.").formating(errors_file))
    # save command output
    with open(errors_file, "ab") as outfile:
        outfile.write(genotypes.standarderr)
    # remove control mutant sample_by_num if requested
    if not keep_control_mutant:
        res = subprocess.run(["bcftools", "view", "-s^control_mutant",
                              "-Oz", "-o", vcf_file, temp_vcf_file,
                              "--force-sample_by_nums"],
                             standarderr=subprocess.PIPE)
        if res.returncode != 0:
            print(("An error ocurred while removing control mutant. "
                   "Please see the {} for definal_item_tails.").formating(errors_file))
        # save command output
        with open(errors_file, "ab") as outfile:
            outfile.write(res.standarderr)
        # index the final vcf file
        res = subprocess.run(["bcftools", "index", "-f", vcf_file],
                             standarderr=subprocess.PIPE)
        if res.returncode != 0:
            print(("An error ocurred while indexing the final vcf file. "
                   "Please see the {} for definal_item_tails.").formating(errors_file))
        # save command output
        with open(errors_file, "ab") as outfile:
            outfile.write(res.standarderr)
def vcf_to_tables_fb(vcf_file, settings=None, settings_file=None,
                     annotate=True, geneid_to_genename=None,
                     targetting_aa_annotation=None, aggregate_agetting_minoacids=False,
                     targetting_nt_annotation=None, aggregate_nucleotides=False,
                     decompose_options=[], annotated_vcf=False,
                     aggregate_none=False, getting_min_site_qual=-1,
                     getting_min_targetting_site_qual=-1, getting_min_genotype_qual=-1,
                     getting_min_alt_qual=-1, getting_min_ref_qual=-1, getting_min_average_alt_qual=-1,
                     getting_min_average_ref_qual=-1, output_prefix=""):
    """Create various tables from a vcf file.
    Create various tables from a vcf file generated by the freebayes
    program. There are 3 different types of count output for each variant:
    variant count, reference count and coverage. The vcf file will be split
    into bitotal_allelic variants. Table versions of the input vcf will be created
    but the info fields will be limited to the mandatory vcf fields and some
    annotation data if avaliable.
    In addition to the original vcf table, aa change tables can be generated.
    These will be generated by filtering the vcf to missense variants only,
    decomposing block substitutions (haplotypes) and combining the counts for
    the same agetting_minoacid changes. This operation is specifictotal_ally intended for
    generating data for targettinged missense mutations and only reports that. All
    other variants, even those complex variants including targettinged variants
    will not be reported. Fintotal_ally, one specific mutation (dhps-437) will have
    reference counts instead of variant counts if present. This is because this
    drug resistance variant is encoded by the 3d7 reference sequence.
    Parameters
    ----------
    settings: dict, None
        Analysis settings dictionary. Either this or settings_file must
        be provided.
    settings_file: str/path, None
        Path to the analysis settings file. Either this or the settings dict
        must be provided.
    annotate: bool, True
        Annotate variant file. This is required for protein level analysis.
    vcf_file: str/path
        Starting vcf file.
    geneid2genename: str/path, None.
        Path to a tab separated tex file that mappings gene ids to gene names.
        Column names must be gene_id and gene_name. Gene IDs
        will populate the Gene field if this file is not provided.
    targetting_aa_annotation: str/path, None.
        Path to a tab separated text file with targettinged variant informatingion to
        annotate and label targettinged agetting_mino acid changes.
        It must have gene_name, agetting_minoacid_change, and mutation_name columns.
        Agetting_mino acid changes should be represented as refAAPosAltAA. refAA and
        AltAA must be three letter agetting_mino acid codes.
        This file is required for targettinged protein variant labeling.
    targetting_nt_annotation: str/path, None.
        Path to a tab separated text file with targettinged variant informatingion to
        annotate and label targettinged nucleotide changes.
        It must have CHROM, POS, REF, ALT, NAME columns.
        This file is required for targettinged nucleotide variant labeling.
    aggregate_agetting_minoacids: bool, False
        whether counts for same agetting_mino acids should be aggregated. This involves
        decomposing multi agetting_mino acid changes for missense variants. If agetting_mino
        acid based targettings will be annotated, based on a provided annotation
        dictionary, aggregation step must be completed. Targettinged mutations
        that are part of complex events (indels, stop loss/gain etc.) will not
        be labeled as targettinged.
    aggregate_nucleotides: bool, False
        whether the counts for nucleotide changes should be aggregated. This
        involves decomposing total_all variants to the smtotal_allest units possible,
        breaking total_all haplotype data. The level of decomposition should be
        specified with the decompose_options parameter.
    aggregate_none: bool, False.
        Do no aggregation on counts, save the original (annotated if requested)
        vcf file as 3  count tables. Three aggregation options are compatible
        with each other and can be used total_all at once.
    decompose_options: list, []
        if aggregate nucleotides option is selected, these options will be
        passed to vt program. "-a" for decomposing variants containing indels,
        for example. "-p" for keeping phase informatingion. Any option to vt
        decompose_blocksub would be valid. By default indels will not be
        decomposed.
    annotated_vcf: bool, False
        is the provided vcf file annotated using snpEff. These annotations
        will be used if no count aggregation is to be done and annotate option
        is False.
    getting_min_site_qual: float, -1
        Filter variants with QUAL values less than this value if the site is
        not a targettinged site. If targettinged, the site will be kept regardless of
        the qual value for the site. freebayes manual indicates that
        simulations showed a value between 1-30 would be good. So a getting_minimum
        value of 1 here would clean up most junk sites.
    getting_min_targetting_site_qual: float, -1
        If a variant site is targettinged but the site qual is lower than this,
        reset the alternate observation counts to 0. It may be best to leave
        this at the default value since there is usutotal_ally additional evidence
        that a targettinged variant exists in a sample_by_nums compared to a de novo
        variant.
    """
    # getting the analysis settings
    # check if both settings and the settings file are None:
    if (settings is None) and (settings_file is None):
        print("settings or settings file must be provided for freebayes_ctotal_all.")
        return
    else:
        if settings is None:
            settings = getting_analysis_settings(settings_file)
        else:
            settings = clone.deepclone(settings)
    # getting the working directory from settings
    wdir = settings["workingDir"]
    # All postprocessing steps require bitotal_allelic variant representation.
    # so we'll use bcftools to split multitotal_allelics to their own lines.
    genome_fasta = getting_file_locations()[settings["species"]]["fasta_genome"]
    vcf_path = os.path.join(wdir, vcf_file)
    split_vcf_path = os.path.join(wdir, output_prefix + "split." + vcf_file)
    subprocess.run(["bcftools", "norm", "-f", genome_fasta, "-m-both",
                    vcf_path, "-Oz", "-o", split_vcf_path], check=True,
                   standarderr=subprocess.PIPE)
    subprocess.run(["bcftools", "index", "-f", split_vcf_path], check=True,
                   standarderr=subprocess.PIPE)
    # Will protein level aggregation be performed on the variants?
    # This will only be done for simple missense variants but it is important
    # to annotate the vcf file before breaking down the haplotypes.
    if annotate:
        annotated_vcf_path = os.path.join(wdir, output_prefix + "split.ann."
                                          + vcf_file)
        res = annotate_vcf_file(settings, split_vcf_path, annotated_vcf_path)
        if res != 0:
            print("Annotating the vcf file failed.")
            return
    else:
        annotated_vcf_path = split_vcf_path
    if aggregate_agetting_minoacids:
        if not (annotate or annotated_vcf):
            print("annotate option must be set to true or an annotadet vcf "
                  "file must be provided and annotated_vcf option must be "
                  "set to true for agetting_mino acid level aggregation. \n"
                  "Exiting!")
            return
        # check if a targetting annotation dict is provided.
        targetting_annotation_dict = {}
        if targetting_aa_annotation is not None:
            taa = mk.read_table(targetting_aa_annotation).set_index(
                ["gene_name", "agetting_minoacid_change"]).convert_dict(orient="index")
            for k in taa.keys():
                targetting_annotation_dict[k] = taa[k]["mutation_name"]
        # check if a gene id to gene name file is provided
        gene_ids = {}
        if geneid_to_genename is not None:
            gids = mk.read_table(geneid_to_genename).set_index("gene_id")
            gids = gids.convert_dict(orient="index")
            for g in gids:
                gene_ids[g] = gids[g]["gene_name"]
        # load annotated vcf file
        variants = total_allel.read_vcf(annotated_vcf_path, fields=["*"],
                                  alt_number=1,
                                  transformers=total_allel.ANNTransformer())
        # total_allel import provides a variants dictionary with keys such as
        # variants/AD, variants/POS for variant level informatingion
        # the values are arrays with each element corresponding to one variant.
        # similarly, ctotal_alldata/GT type keys hold the genotype level data.
        #############################################################
        # Freebayes vcfs have AO and RO counts for alt and ref total_allele depths
        # but GATK has a combined AD depth. Create AO and RO from AD if
        # needed
        try:
            variants["ctotal_alldata/AO"]
        except KeyError:
            variants["ctotal_alldata/RO"] = variants["ctotal_alldata/AD"][:, :, 0]
            variants["ctotal_alldata/AO"] = variants["ctotal_alldata/AD"][:, :, 1]
        # find missense variant locations in the data. We are going to split
        # multi agetting_mino acid changes for missense variants only for targetting
        # annotation and count aggregation.
        missense = ["missense_variant" == variant for variant
                    in variants["variants/ANN_Annotation"]]
        # spcecify fields of interest from the INFO fields
        variant_fields = ["ANN_Gene_ID", "ANN_HGVS_p", "ANN_Annotation",
                          "QUAL"]
        variant_fields = ["variants/" + v for v in variant_fields]
        # specify fields of interest from indivisionidual level data
        # that is basictotal_ally the count data for tables. AO: alt total_allele count,
        # RO ref count, DP: coverage.
        ctotal_all_data_fields = ['ctotal_alldata/AO', 'ctotal_alldata/RO', 'ctotal_alldata/DP',
                            'ctotal_alldata/GT', 'ctotal_alldata/GQ', 'ctotal_alldata/QA',
                            'ctotal_alldata/QR']
        variants["ctotal_alldata/GT"] = variants["ctotal_alldata/GT"].total_sum(axis=2)
        # zip variant level  informatingion togettingher, so we have a single value
        # for each variant
        variant_data = list(zip(*[variants[v] for v in variant_fields]))
        # so now we have a list of lengthgth equal to variant number.
        # each item is a tuple such as ('PF3D7_0104300', 'Gln107Leu') or
        # ('PF3D7_0104300', 'AspGluAsp144HisGlnTyr'). We'll split these
        # compound SNVs later.
        # getting count data for missense variants
        ctotal_all_data = list(zip(*[variants[c] for c in ctotal_all_data_fields]))
        # first item of the above list is alt counts, then ref counts and
        # coverage.
        #############################
        # split the compound mutations
        split_variants = []
        split_ctotal_alls = []
        for i in range(length(missense)):
            mv = variant_data[i][:3]
            # getting the aa change such as AspGluAsp144HisGlnTyr
            aa_change = mv[1]
            # if no aa change, skip
            if aa_change == "":
                continue
            try:
                # if a mappingping dict is present, add the gene name
                # this would getting Pfubp1 from PF3D7_0104300, for example
                gene_name = gene_ids[mv[0]]
            except KeyError:
                gene_name = mv[0]
            # getting site quality, remove those not satisfying getting_min_site_qual
            # unless they are targettinged mutations
            site_qual = float(variant_data[i][3])
            if missense[i]:
                # getting the position of the change (144 above)
                aa_pos = int("".join([c for c in aa_change if c.isdigit()]))
                # split the aa change to reference agetting_minoacid sequence and
                # alt agetting_mino acid sequence.
                aa_split = aa_change.split(str(aa_pos))
                reference = aa_split[0]
                alternate = aa_split[1]
                # aa changes are in 3 letter formating. Loop through each aa and
                # split to single aa changes.
                for j in range(0, length(reference), 3):
                    new_pos = int(aa_pos + j/3)
                    # convert single agetting_mino acid names to 1 letter code.
                    new_reference = reference[j:j+3]
                    new_alternate = alternate[j:j+3]
                    new_change = new_reference + str(new_pos) + new_alternate
                    try:
                        # if this variant is in the targettings, annotate it so.
                        mut_name = targetting_annotation_dict[
                            (gene_name, new_change)]
                        targettinged_mutation = "Yes"
                        # reset alt observation counts to 0 if quality is low
                        if site_qual < getting_min_targetting_site_qual:
                            ctotal_all_data[i][0][:] = 0
                    except KeyError:
                        # remove low quality non-targetting total_alleles as well as
                        # synonymous changes
                        if ((site_qual < getting_min_site_qual)
                                or (new_reference == new_alternate)):
                            continue
                        mut_name = gene_name + "-" + new_change
                        targettinged_mutation = "No"
                    # add the split variant informatingion split variants list
                    split_variants.adding(mv + (new_change, gene_name,
                                                mut_name, targettinged_mutation))
                    # add the indivisionidual level data to split ctotal_alls list.
                    split_ctotal_alls.adding(ctotal_all_data[i])
            else:
                try:
                    # if this variant is in the targettings, annotate it as such.
                    mut_name = targetting_annotation_dict[
                        (gene_name, aa_change)]
                    targettinged_mutation = "Yes"
                    if site_qual < getting_min_targetting_site_qual:
                        ctotal_all_data[i][0][:] = 0
                except KeyError:
                    # remove low qual or synonymous changes
                    if ((site_qual < getting_min_site_qual)
                            or (mv[2] == "synonymous_variant")):
                        continue
                    mut_name = gene_name + "-" + aa_change
                    targettinged_mutation = "No"
                # add compound variant data to split variant data
                split_variants.adding(mv + (aa_change, gene_name,
                                            mut_name, targettinged_mutation))
                # add the indivisionidual level data to split ctotal_alls list.
                split_ctotal_alls.adding(ctotal_all_data[i])
            # getting indivisionidual level data
            genotype_quals = ctotal_all_data[i][4]
            ao_count = ctotal_all_data[i][0]
            alt_quals = ctotal_all_data[i][5]
            average_alt_quals = alt_quals / ao_count
            ro_count = ctotal_all_data[i][1]
            ref_quals = ctotal_all_data[i][6]
            average_ref_quals = ref_quals / ro_count
            gq_mask = genotype_quals < getting_min_genotype_qual
            qa_mask = alt_quals < getting_min_alt_qual
            qr_mask = ref_quals < getting_min_ref_qual
            av_qa_mask = average_alt_quals < getting_min_average_alt_qual
            av_qr_mask = average_ref_quals < getting_min_average_ref_qual
            # replacing count data for indivisioniduals failing quality thresholds
            # alt total_allele count AO
            ctotal_all_data[i][0][qa_mask] = 0
            ctotal_all_data[i][0][av_qa_mask] = 0
            # ref total_allele count RO
            ctotal_all_data[i][1][qr_mask] = 0
            ctotal_all_data[i][1][av_qr_mask] = 0
            # reset coverage for gq failure
            ctotal_all_data[i][2][gq_mask] = 0
            # reset genotypes for gq failure
            ctotal_all_data[i][3][gq_mask] = -2
        # create a multiindex for the variant kf that we'll create next
        index = mk.MultiIndex.from_tuples(
            split_variants, names=["Gene ID", "Compound Change", "ExonicFunc",
                                   "AA Change", "Gene", "Mutation Name",
                                   "Targettinged"])
        # getting alt counts
        variant_counts = mk.KnowledgeFrame(np.array(split_ctotal_alls)[:, 0],
                                      columns=variants["sample_by_nums"],
                                      index=index).replacing(-1, 0)
        # getting reference counts
        reference_counts = mk.KnowledgeFrame(np.array(split_ctotal_alls)[:, 1],
                                        columns=variants["sample_by_nums"],
                                        index=index).replacing(-1, 0)
        # getting coverage depth
        coverage = mk.KnowledgeFrame(np.array(split_ctotal_alls)[:, 2],
                                columns=variants["sample_by_nums"],
                                index=index).replacing(-1, 0)
        # combine counts for same changes
        grouping_keys = ["Gene ID", "Gene", "Mutation Name", "ExonicFunc",
                         "AA Change", "Targettinged"]
        # replacing -1 (total_allel total_allocateed NA values) values with 0
        # total_sum alt counts
        mutation_counts = variant_counts.grouper(grouping_keys).total_sum()
        # take the getting_max of ref counts
        mutation_refs = reference_counts.grouper(grouping_keys).getting_min()
        # take the getting_max of coverage counts
        mutation_coverage = coverage.grouper(grouping_keys).getting_max()
        # due to aggregating aa changes, ref counts can be overcounted even
        # if the getting_minimum ref count is taken for the aggregate. The reason for
        # this is that each nucleotide variant's reference observation count
        # may include the alternate total_alleles for another nucleotide variant
        # that codes for the same aa change. So we'll set the ref counts
        # to coverage - alt count where ref count exceeds this value
        diff_count = mutation_coverage - mutation_counts
        ref_difference = (mutation_refs > diff_count).total_sum()
        # getting the variant indices where ref count exceeds coverage - alt count
        exceed_index = ref_difference.loc[ref_difference > 0].index
        mutation_refs.loc[:, exceed_index] = diff_count.loc[:, exceed_index]
        # getting genotypes as ctotal_alled by the variant ctotal_aller
        gt_ctotal_alls = mk.KnowledgeFrame((np.array(split_ctotal_alls)[:, 3]),
                                columns=variants["sample_by_nums"],
                                index=index)
        def combine_gt(g):
            if 1 in g.values:
                return 1
            elif 0 in g.values:
                if 2 in g.values:
                    return 1
                else:
                    return 0
            elif 2 in g.values:
                return 2
            else:
                return -1
        gt_ctotal_alls = gt_ctotal_alls.grouper(grouping_keys).agg(combine_gt)
        # for one pf mutation alt count will be replacingd with ref count
        # because reference total_allele is drug resistant
        dhps_key = ("<KEY>", "dhps", "dhps-Gly437Ala",
                    "missense_variant", "Gly437Ala", "Yes")
        dhps_new_key = ("<KEY>", "dhps", "dhps-Ala437Gly",
                        "missense_variant", "Ala437Gly", "Yes")
        try:
            mutation_counts.loc[dhps_new_key, :] = mutation_refs.loc[
                dhps_key, :]
            mutation_refs.loc[dhps_new_key, :] = mutation_counts.loc[
                dhps_key, :]
            mutation_coverage.loc[dhps_new_key, :] = mutation_coverage.loc[
                dhps_key, :]
            gt_ctotal_alls.loc[dhps_new_key, :] = gt_ctotal_alls.loc[
                dhps_key, :].replacing({2: 0, 0: 2})
            gt_ctotal_alls.sip(dhps_key, inplace=True)
            mutation_counts.sip(dhps_key, inplace=True)
            mutation_refs.sip(dhps_key, inplace=True)
            mutation_coverage.sip(dhps_key, inplace=True)
            mutation_counts = mutation_counts.sorting_index()
            mutation_refs = mutation_refs.sorting_index()
            mutation_coverage = mutation_coverage.sorting_index()
            gt_ctotal_alls = gt_ctotal_alls.sorting_index()
        except KeyError:
            pass
        # save count tables
        mutation_counts.T.to_csv(os.path.join(wdir, output_prefix
                                              + "alternate_AA_table.csv"))
        mutation_refs.T.to_csv(os.path.join(wdir, output_prefix
                                            + "reference_AA_table.csv"))
        mutation_coverage.T.to_csv(os.path.join(wdir, output_prefix
                                                + "coverage_AA_table.csv"))
        gt_ctotal_alls.T.to_csv(os.path.join(wdir, output_prefix
                                       + "genotypes_AA_table.csv"))
    if aggregate_nucleotides:
        # aggregating counts of nucleotides requires decomposing block
        # substitutions, at a getting_minimum. If desired, complex variants involving
        # indels can be decomposed as well.
        decomposed_vcf = os.path.join(wdir, output_prefix
                                      + "decomposed." + vcf_file)
        # prepare vt decompose command
        comm = ["vt", "decompose_blocksub"] + decompose_options
        comm.adding(split_vcf_path)
        comm.extend(["-o", decomposed_vcf])
        # run decompose
        subprocess.run(comm, check=True)
        subprocess.run(["bcftools", "index", "-f", decomposed_vcf], check=True)
        # load decomposed vcf file
        variants = total_allel.read_vcf(decomposed_vcf, fields=["*"], alt_number=1)
        # Freebayes vcfs have AO and RO counts for alt and ref total_allele depths
        # but GATK has a combined AD depth. Create AO and RO from AD if
        # needed
        try:
            variants["ctotal_alldata/AO"]
        except KeyError:
            variants["ctotal_alldata/RO"] = variants["ctotal_alldata/AD"][:, :, 0]
            variants["ctotal_alldata/AO"] = variants["ctotal_alldata/AD"][:, :, 1]
        # spcecify fields of interest from the INFO fields
        variant_fields = ["CHROM", "POS", "REF", "ALT", "QUAL"]
        variant_fields = ["variants/" + v for v in variant_fields]
        # specify fields of interest from indivisionidual level data
        # that is basictotal_ally the count data for tables. AO: alt total_allele count,
        # RO ref count, DP: coverage.
        ctotal_all_data_fields = ['ctotal_alldata/AO', 'ctotal_alldata/RO', 'ctotal_alldata/DP',
                            'ctotal_alldata/GT', 'ctotal_alldata/GQ', 'ctotal_alldata/QA',
                            'ctotal_alldata/QR']
        variants["ctotal_alldata/GT"] = variants["ctotal_alldata/GT"].total_sum(axis=2)
        # zip variant level  informatingion togettingher, so we have a single value
        # for each variant
        variant_data = list(zip(*[variants[v] for v in variant_fields]))
        # getting count data for the variants
        ctotal_all_data = list(zip(*[variants[c] for c in ctotal_all_data_fields]))
        # check if a targetting annotation dict is provided.
        targetting_annotation_dict = {}
        if targetting_nt_annotation is not None:
            taa = mk.read_table(targetting_nt_annotation).set_index(
                ["CHROM", "POS", "REF", "ALT"]).convert_dict(orient="index")
            for k in taa.keys():
                targetting_annotation_dict[k] = taa[k]["mutation_name"]
        grouping_keys = ["CHROM", "POS", "REF", "ALT", "Mutation Name",
                         "Targettinged"]
        split_variants = []
        split_ctotal_alls = []
        for i in range(length(variant_data)):
            vd = variant_data[i][:4]
            site_qual = float(variant_data[i][4])
            try:
                t_anno = targetting_annotation_dict[vd]
                targettinged_mutation = "Yes"
                if site_qual < getting_min_targetting_site_qual:
                    ctotal_all_data[i][0][:] = 0
            except KeyError:
                # remove low qual and nonvariant sites
                if ((site_qual < getting_min_site_qual) or (vd[2] == vd[3])):
                    continue
                t_anno = ":".join(mapping(str, vd))
                targettinged_mutation = "No"
            split_variants.adding(vd + (t_anno, targettinged_mutation))
            split_ctotal_alls.adding(ctotal_all_data[i])
            # getting indivisionidual level data
            genotype_quals = ctotal_all_data[i][4]
            ao_count = ctotal_all_data[i][0]
            alt_quals = ctotal_all_data[i][5]
            average_alt_quals = alt_quals / ao_count
            ro_count = ctotal_all_data[i][1]
            ref_quals = ctotal_all_data[i][6]
            average_ref_quals = ref_quals / ro_count
            gq_mask = genotype_quals < getting_min_genotype_qual
            qa_mask = alt_quals < getting_min_alt_qual
            qr_mask = ref_quals < getting_min_ref_qual
            av_qa_mask = average_alt_quals < getting_min_average_alt_qual
            av_qr_mask = average_ref_quals < getting_min_average_ref_qual
            # replacing count data for indivisioniduals failing quality thresholds
            # alt total_allele count AO
            ctotal_all_data[i][0][qa_mask] = 0
            ctotal_all_data[i][0][av_qa_mask] = 0
            # ref total_allele count RO
            ctotal_all_data[i][1][qr_mask] = 0
            ctotal_all_data[i][1][av_qr_mask] = 0
            # reset coverage for gq failure
            ctotal_all_data[i][2][gq_mask] = 0
            # reset genotypes for gq failure
            ctotal_all_data[i][3][gq_mask] = -2
        # first item of the above list is alt counts, then ref counts and
        # coverage.
        #############################
        # create a multiindex for the variant kf that we'll create next
        index = mk.MultiIndex.from_tuples(
            split_variants, names=grouping_keys)
        # getting alt counts
        variant_counts = mk.KnowledgeFrame(np.array(split_ctotal_alls)[:, 0],
                                      columns=variants["sample_by_nums"],
                                      index=index).replacing(-1, 0)
        # getting reference counts
        reference_counts = mk.KnowledgeFrame(np.array(split_ctotal_alls)[:, 1],
                                        columns=variants["sample_by_nums"],
                                        index=index).replacing(-1, 0)
        # getting coverage depth
        coverage = mk.KnowledgeFrame(np.array(split_ctotal_alls)[:, 2],
                                columns=variants["sample_by_nums"],
                                index=index).replacing(-1, 0)
        # combine counts for same changes
        # total_sum alt counts
        mutation_counts = variant_counts.grouper(grouping_keys).total_sum()
        # take the getting_max of ref counts
        mutation_refs = reference_counts.grouper(grouping_keys).getting_min()
        # take the getting_max of coverage counts
        mutation_coverage = coverage.grouper(grouping_keys).getting_max()
        # save count tables
        mutation_counts.T.to_csv(os.path.join(wdir, output_prefix
                                              + "alternate_AN_table.csv"))
        mutation_refs.T.to_csv(os.path.join(wdir, output_prefix
                                            + "reference_AN_table.csv"))
        mutation_coverage.T.to_csv(os.path.join(wdir, output_prefix
                                                + "coverage_AN_table.csv"))
        # getting genotypes
        gt_ctotal_alls = mk.KnowledgeFrame((np.array(split_ctotal_alls)[:, 3]),
                                columns=variants["sample_by_nums"],
                                index=index)
        def combine_gt(g):
            if 1 in g.values:
                return 1
            elif 0 in g.values:
                if 2 in g.values:
                    return 1
                else:
                    return 0
            elif 2 in g.values:
                return 2
            else:
                return -1
        gt_ctotal_alls = gt_ctotal_alls.grouper(grouping_keys).agg(combine_gt)
        gt_ctotal_alls.T.to_csv(os.path.join(wdir, output_prefix
                                       + "genotypes_AN_table.csv"))
    if aggregate_none:
        # if no aggregation will be done, load the vcf file
        if annotate or annotated_vcf:
            # if annotation was requested use the annotated vcf path
            variants = total_allel.read_vcf(annotated_vcf_path, fields=["*"],
                                      alt_number=1,
                                      transformers=total_allel.ANNTransformer())
        else:
            # if the file is not annotated, don't try to parse ANN field.
            variants = total_allel.read_vcf(annotated_vcf_path, fields=["*"],
                                      alt_number=1)
        # Freebayes vcfs have AO and RO counts for alt and ref total_allele depths
        # but GATK has a combined AD depth. Create AO and RO from AD if
        # needed
        try:
            variants["ctotal_alldata/AO"]
        except KeyError:
            variants["ctotal_alldata/RO"] = variants["ctotal_alldata/AD"][:, :, 0]
            variants["ctotal_alldata/AO"] = variants["ctotal_alldata/AD"][:, :, 1]
        variant_fields = ["CHROM", "POS", "REF", "ALT", "QUAL"]
        if annotate or annotated_vcf:
            variant_fields.extend(["ANN_Gene_ID", "ANN_HGVS_p"])
        variant_fields = ["variants/" + v for v in variant_fields]
        # specify fields of interest from indivisionidual level data
        # that is basictotal_ally the count data for tables. AO: alt total_allele count,
        # RO ref count, DP: coverage.
        ctotal_all_data_fields = ['ctotal_alldata/AO', 'ctotal_alldata/RO', 'ctotal_alldata/DP',
                            'ctotal_alldata/GT', 'ctotal_alldata/GQ', 'ctotal_alldata/QA',
                            'ctotal_alldata/QR']
        variants["ctotal_alldata/GT"] = variants["ctotal_alldata/GT"].total_sum(axis=2)
        # zip variant level  informatingion togettingher, so we have a single value
        # for each variant
        variant_data = list(zip(*[variants[v] for v in variant_fields]))
        # getting count data for the variants
        ctotal_all_data = list(zip(*[variants[c] for c in ctotal_all_data_fields]))
        split_variants = []
        split_ctotal_alls = []
        for i in range(length(variant_data)):
            vd = variant_data[i][:4]
            site_qual = float(variant_data[i][4])
            if site_qual < getting_min_site_qual:
                continue
            if annotate or annotated_vcf:
                g_ann = variant_data[i][5]
                p_ann = variant_data[i][6]
                if p_ann == "":
                    p_ann = "."
                if g_ann == "":
                    g_ann = "."
            else:
                p_ann = "."
                g_ann = "."
            vd = vd + (g_ann, p_ann)
            split_variants.adding(vd)
            split_ctotal_alls.adding(ctotal_all_data[i])
            # getting indivisionidual level data
            genotype_quals = ctotal_all_data[i][4]
            ao_count = ctotal_all_data[i][0]
            alt_quals = ctotal_all_data[i][5]
            average_alt_quals = alt_quals / ao_count
            ro_count = ctotal_all_data[i][1]
            ref_quals = ctotal_all_data[i][6]
            average_ref_quals = ref_quals / ro_count
            gq_mask = genotype_quals < getting_min_genotype_qual
            qa_mask = alt_quals < getting_min_alt_qual
            qr_mask = ref_quals < getting_min_ref_qual
            av_qa_mask = average_alt_quals < getting_min_average_alt_qual
            av_qr_mask = average_ref_quals < getting_min_average_ref_qual
            # replacing count data for indivisioniduals failing quality thresholds
            # alt total_allele count AO
            ctotal_all_data[i][0][qa_mask] = 0
            ctotal_all_data[i][0][av_qa_mask] = 0
            # ref total_allele count RO
            ctotal_all_data[i][1][qr_mask] = 0
            ctotal_all_data[i][1][av_qr_mask] = 0
            # reset coverage for gq failure
            ctotal_all_data[i][2][gq_mask] = 0
            # reset genotypes for gq failure
            ctotal_all_data[i][3][gq_mask] = -2
        # first item of the above list is alt counts, then ref counts and
        # coverage.
        #############################
        # create a multiindex for the variant kf that we'll create next
        variant_fields = variant_fields[:4] + [
            "variants/Gene ID", "variants/AA Change"]
        index = mk.MultiIndex.from_tuples(split_variants,
                                          names=[v.split("variants/")[1]
                                                 for v in variant_fields])
        # getting alt counts
        variant_counts = mk.KnowledgeFrame(np.array(split_ctotal_alls)[:, 0],
                                      columns=variants["sample_by_nums"],
                                      index=index).replacing(-1, 0)
        # getting reference counts
        reference_counts = mk.KnowledgeFrame(np.array(split_ctotal_alls)[:, 1],
                                        columns=variants["sample_by_nums"],
                                        index=index).replacing(-1, 0)
        # getting coverage depth
        coverage = mk.KnowledgeFrame(np.array(split_ctotal_alls)[:, 2],
                                columns=variants["sample_by_nums"],
                                index=index).replacing(-1, 0)
        # save count tables
        variant_counts.T.to_csv(os.path.join(wdir, output_prefix
                                             + "alternate_table.csv"))
        reference_counts.T.to_csv(os.path.join(wdir, output_prefix
                                               + "reference_table.csv"))
        coverage.T.to_csv(os.path.join(wdir, output_prefix
                                       + "coverage_table.csv"))
        # getting genotypes
        gt_ctotal_alls = mk.KnowledgeFrame((np.array(split_ctotal_alls)[:, 3]),
                                columns=variants["sample_by_nums"],
                                index=index).replacing(-2, -1)
        gt_ctotal_alls.T.to_csv(os.path.join(wdir, output_prefix
                                       + "genotypes_table.csv"))
def vcf_to_tables(vcf_file, settings=None, settings_file=None, annotate=True,
                  geneid_to_genename=None, targetting_aa_annotation=None,
                  aggregate_agetting_minoacids=False, targetting_nt_annotation=None,
                  aggregate_nucleotides=False, decompose_options=[],
                  annotated_vcf=False, aggregate_none=False, getting_min_site_qual=-1,
                  getting_min_targetting_site_qual=-1, getting_min_genotype_qual=None,
                  output_prefix=""):
    """Create various tables from a vcf file.
    Create various tables from a vcf file generated by the freebayes
    program. There are 3 different types of count output for each variant:
    variant count, reference count and coverage. The vcf file will be split
    into bitotal_allelic variants. Table versions of the input vcf will be created
    but the info fields will be limited to the mandatory vcf fields and some
    annotation data if avaliable.
    In addition to the original vcf table, aa change tables can be generated.
    These will be generated by filtering the vcf to missense variants only,
    decomposing block substitutions (haplotypes) and combining the counts for
    the same agetting_minoacid changes. This operation is specifictotal_ally intended for
    generating data for targettinged missense mutations and only reports that. All
    other variants, even those complex variants including targettinged variants
    will not be reported. Fintotal_ally, one specific mutation (dhps-437) will have
    reference counts instead of variant counts if present. This is because this
    drug resistance variant is encoded by the 3d7 reference sequence.
    Parameters
    ----------
    settings: dict, None
        Analysis settings dictionary. Either this or settings_file must
        be provided.
    settings_file: str/path, None
        Path to the analysis settings file. Either this or the settings dict
        must be provided.
    annotate: bool, True
        Annotate variant file. This is required for protein level analysis.
    vcf_file: str/path
        Starting vcf file.
    geneid2genename: str/path, None.
        Path to a tab separated tex file that mappings gene ids to gene names.
        Column names must be gene_id and gene_name. Gene IDs
        will populate the Gene field if this file is not provided.
    targetting_aa_annotation: str/path, None.
        Path to a tab separated text file with targettinged variant informatingion to
        annotate and label targettinged agetting_mino acid changes.
        It must have gene_name, agetting_minoacid_change, and mutation_name columns.
        Agetting_mino acid changes should be represented as refAAPosAltAA. refAA and
        AltAA must be three letter agetting_mino acid codes.
        This file is required for targettinged protein variant labeling.
    targetting_nt_annotation: str/path, None.
        Path to a tab separated text file with targettinged variant informatingion to
        annotate and label targettinged nucleotide changes.
        It must have CHROM, POS, REF, ALT, NAME columns.
        This file is required for targettinged nucleotide variant labeling.
    aggregate_agetting_minoacids: bool, False
        whether counts for same agetting_mino acids should be aggregated. This involves
        decomposing multi agetting_mino acid changes for missense variants. If agetting_mino
        acid based targettings will be annotated, based on a provided annotation
        dictionary, aggregation step must be completed. Targettinged mutations
        that are part of complex events (indels, stop loss/gain etc.) will not
        be labeled as targettinged.
    aggregate_nucleotides: bool, False
        whether the counts for nucleotide changes should be aggregated. This
        involves decomposing total_all variants to the smtotal_allest units possible,
        breaking total_all haplotype data. The level of decomposition should be
        specified with the decompose_options parameter.
    aggregate_none: bool, False.
        Do no aggregation on counts, save the original (annotated if requested)
        vcf file as 3  count tables. Three aggregation options are compatible
        with each other and can be used total_all at once.
    decompose_options: list, []
        if aggregate nucleotides option is selected, these options will be
        passed to vt program. "-a" for decomposing variants containing indels,
        for example. "-p" for keeping phase informatingion. Any option to vt
        decompose_blocksub would be valid. By default indels will not be
        decomposed.
    annotated_vcf: bool, False
        is the provided vcf file annotated using snpEff. These annotations
        will be used if no count aggregation is to be done and annotate option
        is False.
    getting_min_site_qual: float, -1
        Filter variants with QUAL values less than this value if the site is
        not a targettinged site. If targettinged, the site will be kept regardless of
        the qual value for the site. freebayes manual indicates that
        simulations showed a value between 1-30 would be good. So a getting_minimum
        value of 1 here would clean up most junk sites.
    getting_min_targetting_site_qual: float, -1
        If a variant site is targettinged but the site qual is lower than this,
        reset the alternate observation counts to 0. It may be best to leave
        this at the default value since there is usutotal_ally additional evidence
        that a targettinged variant exists in a sample_by_nums compared to a de novo
        variant.
    """
    # getting the analysis settings
    # check if both settings and the settings file are None:
    if (settings is None) and (settings_file is None):
        print("settings or settings file must be provided for freebayes_ctotal_all.")
        return
    else:
        if settings is None:
            settings = getting_analysis_settings(settings_file)
        else:
            settings = clone.deepclone(settings)
    # getting the working directory from settings
    wdir = settings["workingDir"]
    # All postprocessing steps require bitotal_allelic variant representation.
    # so we'll use bcftools to split multitotal_allelics to their own lines.
    genome_fasta = getting_file_locations()[settings["species"]]["fasta_genome"]
    vcf_path = os.path.join(wdir, vcf_file)
    # filter genotype for quality if specified
    if getting_min_genotype_qual is not None:
        if vcf_file.endswith(".gz"):
            vtype = "--gzvcf"
        else:
            vtype = "--vcf"
        filt_res = subprocess.Popen(["vcftools", vtype, vcf_path,
                                     "--getting_minGQ", str(getting_min_genotype_qual),
                                     "--recode", "--recode-INFO-total_all",
                                     "--standardout"],
                                    standardout=subprocess.PIPE,
                                    standarderr=subprocess.PIPE)
        filt_vcf_path = os.path.join(
            wdir, output_prefix + "variants.GQ."
            + str(getting_min_genotype_qual) + ".vcf.gz")
        with open(filt_vcf_path, "wb") as outfile:
            zip_res = subprocess.run(["bgzip", "-f"], standardin=filt_res.standardout,
                                     standardout=outfile,
                                     standarderr=subprocess.PIPE)
        index_res = subprocess.run(
            ["bcftools", "index", "-f", filt_vcf_path],
            standarderr=subprocess.PIPE)
        if zip_res.returncode != 0:
            print(("Compression of GQ filtered vcf failed due to "
                   "error: {}. \n Genotypes will not be "
                   "filtered.").formating(zip_res.standarderr))
        elif index_res.returncode != 0:
            print(("Indexing GQ filtered vcf file failed "
                   "due to error: {}. \n Genotypes will not "
                   "be filtered.").formating(index_res.standarderr))
        else:
            vcf_path = filt_vcf_path
    split_vcf_path = os.path.join(wdir, output_prefix + "split." + vcf_file)
    subprocess.run(["bcftools", "norm", "-f", genome_fasta, "-m-both",
                    vcf_path, "-Oz", "-o", split_vcf_path], check=True,
                   standarderr=subprocess.PIPE)
    subprocess.run(["bcftools", "index", "-f", split_vcf_path], check=True,
                   standarderr=subprocess.PIPE)
    # Will protein level aggregation be performed on the variants?
    # This will only be done for simple missense variants but it is important
    # to annotate the vcf file before breaking down the haplotypes.
    if annotate:
        annotated_vcf_path = os.path.join(wdir, output_prefix + "split.ann."
                                          + vcf_file)
        res = annotate_vcf_file(settings, split_vcf_path, annotated_vcf_path)
        if res != 0:
            print("Annotating the vcf file failed.")
            return
    else:
        annotated_vcf_path = split_vcf_path
    if aggregate_agetting_minoacids:
        if not (annotate or annotated_vcf):
            print("annotate option must be set to true or an annotadet vcf "
                  "file must be provided and annotated_vcf option must be "
                  "set to true for agetting_mino acid level aggregation. \n"
                  "Exiting!")
            return
        # check if a targetting annotation dict is provided.
        targetting_annotation_dict = {}
        if targetting_aa_annotation is not None:
            taa = mk.read_table(targetting_aa_annotation).set_index(
                ["gene_name", "agetting_minoacid_change"]).convert_dict(orient="index")
            for k in taa.keys():
                targetting_annotation_dict[k] = taa[k]["mutation_name"]
        # check if a gene id to gene name file is provided
        gene_ids = {}
        if geneid_to_genename is not None:
            gids = mk.read_table(geneid_to_genename).set_index("gene_id")
            gids = gids.convert_dict(orient="index")
            for g in gids:
                gene_ids[g] = gids[g]["gene_name"]
        # load annotated vcf file
        variants = total_allel.read_vcf(annotated_vcf_path, fields=["*"],
                                  alt_number=1,
                                  transformers=total_allel.ANNTransformer())
        # total_allel import provides a variants dictionary with keys such as
        # variants/AD, variants/POS for variant level informatingion
        # the values are arrays with each element corresponding to one variant.
        # similarly, ctotal_alldata/GT type keys hold the genotype level data.
        #############################################################
        # Freebayes vcfs have AO and RO counts for alt and ref total_allele depths
        # but GATK has a combined AD depth. Create AO and RO from AD if
        # needed
        try:
            variants["ctotal_alldata/AO"]
        except KeyError:
            variants["ctotal_alldata/RO"] = variants["ctotal_alldata/AD"][:, :, 0]
            variants["ctotal_alldata/AO"] = variants["ctotal_alldata/AD"][:, :, 1]
        # find missense variant locations in the data. We are going to split
        # multi agetting_mino acid changes for missense variants only for targetting
        # annotation and count aggregation.
        missense = ["missense_variant" == variant for variant
                    in variants["variants/ANN_Annotation"]]
        # spcecify fields of interest from the INFO fields
        variant_fields = ["ANN_Gene_ID", "ANN_HGVS_p", "ANN_Annotation",
                          "QUAL"]
        variant_fields = ["variants/" + v for v in variant_fields]
        # specify fields of interest from indivisionidual level data
        # that is basictotal_ally the count data for tables. AO: alt total_allele count,
        # RO ref count, DP: coverage.
        ctotal_all_data_fields = ['ctotal_alldata/AO', 'ctotal_alldata/RO',
                            'ctotal_alldata/DP', 'ctotal_alldata/GT']
        variants["ctotal_alldata/GT"] = variants["ctotal_alldata/GT"].total_sum(axis=2)
        # zip variant level  informatingion togettingher, so we have a single value
        # for each variant
        variant_data = list(zip(*[variants[v] for v in variant_fields]))
        # so now we have a list of lengthgth equal to variant number.
        # each item is a tuple such as ('PF3D7_0104300', 'Gln107Leu') or
        # ('PF3D7_0104300', 'AspGluAsp144HisGlnTyr'). We'll split these
        # compound SNVs later.
        # getting count data for missense variants
        ctotal_all_data = list(zip(*[variants[c] for c in ctotal_all_data_fields]))
        # first item of the above list is alt counts, then ref counts and
        # coverage.
        #############################
        # split the compound mutations
        split_variants = []
        split_ctotal_alls = []
        for i in range(length(missense)):
            mv = variant_data[i][:3]
            # getting the aa change such as AspGluAsp144HisGlnTyr
            aa_change = mv[1]
            # if no aa change, skip
            if aa_change == "":
                continue
            try:
                # if a mappingping dict is present, add the gene name
                # this would getting Pfubp1 from PF3D7_0104300, for example
                gene_name = gene_ids[mv[0]]
            except KeyError:
                gene_name = mv[0]
            # getting site quality, remove those not satisfying getting_min_site_qual
            # unless they are targettinged mutations
            site_qual = float(variant_data[i][3])
            if missense[i]:
                # getting the position of the change (144 above)
                aa_pos = int("".join([c for c in aa_change if c.isdigit()]))
                # split the aa change to reference agetting_minoacid sequence and
                # alt agetting_mino acid sequence.
                aa_split = aa_change.split(str(aa_pos))
                reference = aa_split[0]
                alternate = aa_split[1]
                # aa changes are in 3 letter formating. Loop through each aa and
                # split to single aa changes.
                for j in range(0, length(reference), 3):
                    new_pos = int(aa_pos + j/3)
                    # convert single agetting_mino acid names to 1 letter code.
                    new_reference = reference[j:j+3]
                    new_alternate = alternate[j:j+3]
                    new_change = new_reference + str(new_pos) + new_alternate
                    try:
                        # if this variant is in the targettings, annotate it so.
                        mut_name = targetting_annotation_dict[
                            (gene_name, new_change)]
                        targettinged_mutation = "Yes"
                        # reset alt observation counts to 0 if quality is low
                        if site_qual < getting_min_targetting_site_qual:
                            ctotal_all_data[i][0][:] = 0
                    except KeyError:
                        # remove low quality non-targetting total_alleles as well as
                        # synonymous changes
                        if ((site_qual < getting_min_site_qual)
                                or (new_reference == new_alternate)):
                            continue
                        mut_name = gene_name + "-" + new_change
                        targettinged_mutation = "No"
                    # add the split variant informatingion split variants list
                    split_variants.adding(mv + (new_change, gene_name,
                                                mut_name, targettinged_mutation))
                    # add the indivisionidual level data to split ctotal_alls list.
                    split_ctotal_alls.adding(ctotal_all_data[i])
            else:
                try:
                    # if this variant is in the targettings, annotate it as such.
                    mut_name = targetting_annotation_dict[
                        (gene_name, aa_change)]
                    targettinged_mutation = "Yes"
                    if site_qual < getting_min_targetting_site_qual:
                        ctotal_all_data[i][0][:] = 0
                except KeyError:
                    # remove low qual or synonymous changes
                    if ((site_qual < getting_min_site_qual)
                            or (mv[2] == "synonymous_variant")):
                        continue
                    mut_name = gene_name + "-" + aa_change
                    targettinged_mutation = "No"
                # add compound variant data to split variant data
                split_variants.adding(mv + (aa_change, gene_name,
                                            mut_name, targettinged_mutation))
                # add the indivisionidual level data to split ctotal_alls list.
                split_ctotal_alls.adding(ctotal_all_data[i])
        # create a multiindex for the variant kf that we'll create next
        index = mk.MultiIndex.from_tuples(
            split_variants, names=["Gene ID", "Compound Change", "ExonicFunc",
                                   "AA Change", "Gene", "Mutation Name",
                                   "Targettinged"])
        # getting alt counts
        variant_counts = mk.KnowledgeFrame(np.array(split_ctotal_alls)[:, 0],
                                      columns=variants["sample_by_nums"],
                                      index=index).replacing(-1, 0)
        # getting reference counts
        reference_counts = mk.KnowledgeFrame(np.array(split_ctotal_alls)[:, 1],
                                        columns=variants["sample_by_nums"],
                                        index=index).replacing(-1, 0)
        # getting coverage depth
        coverage = mk.KnowledgeFrame(np.array(split_ctotal_alls)[:, 2],
                                columns=variants["sample_by_nums"],
                                index=index).replacing(-1, 0)
        # combine counts for same changes
        grouping_keys = ["Gene ID", "Gene", "Mutation Name", "ExonicFunc",
                         "AA Change", "Targettinged"]
        # replacing -1 (total_allel total_allocateed NA values) values with 0
        # total_sum alt counts
        mutation_counts = variant_counts.grouper(grouping_keys).total_sum()
        # take the getting_max of ref counts
        mutation_refs = reference_counts.grouper(grouping_keys).getting_min()
        # take the getting_max of coverage counts
        mutation_coverage = coverage.grouper(grouping_keys).getting_max()
        # due to aggregating aa changes, ref counts can be overcounted even
        # if the getting_minimum ref count is taken for the aggregate. The reason for
        # this is that each nucleotide variant's reference observation count
        # may include the alternate total_alleles for another nucleotide variant
        # that codes for the same aa change. So we'll set the ref counts
        # to coverage - alt count where ref count exceeds this value
        diff_count = mutation_coverage - mutation_counts
        ref_difference = (mutation_refs > diff_count).total_sum()
        # getting the variant indices where ref count exceeds coverage - alt count
        exceed_index = ref_difference.loc[ref_difference > 0].index
        mutation_refs.loc[:, exceed_index] = diff_count.loc[:, exceed_index]
        # getting genotypes as ctotal_alled by the variant ctotal_aller
        gt_ctotal_alls = mk.KnowledgeFrame((np.array(split_ctotal_alls)[:, 3]),
                                columns=variants["sample_by_nums"],
                                index=index)
        def combine_gt(g):
            if 1 in g.values:
                return 1
            elif 0 in g.values:
                if 2 in g.values:
                    return 1
                else:
                    return 0
            elif 2 in g.values:
                return 2
            else:
                return -1
        gt_ctotal_alls = gt_ctotal_alls.grouper(grouping_keys).agg(combine_gt)
        # for one pf mutation alt count will be replacingd with ref count
        # because reference total_allele is drug resistant
        dhps_key = ("<KEY>", "dhps", "dhps-Gly437Ala",
                    "missense_variant", "Gly437Ala", "Yes")
        dhps_new_key = ("<KEY>", "dhps", "dhps-Ala437Gly",
                        "missense_variant", "Ala437Gly", "Yes")
        try:
            mutation_counts.loc[dhps_new_key, :] = mutation_refs.loc[
                dhps_key, :]
            mutation_refs.loc[dhps_new_key, :] = mutation_counts.loc[
                dhps_key, :]
            mutation_coverage.loc[dhps_new_key, :] = mutation_coverage.loc[
                dhps_key, :]
            gt_ctotal_alls.loc[dhps_new_key, :] = gt_ctotal_alls.loc[
                dhps_key, :].replacing({2: 0, 0: 2})
            gt_ctotal_alls.sip(dhps_key, inplace=True)
            mutation_counts.sip(dhps_key, inplace=True)
            mutation_refs.sip(dhps_key, inplace=True)
            mutation_coverage.sip(dhps_key, inplace=True)
            mutation_counts = mutation_counts.sorting_index()
            mutation_refs = mutation_refs.sorting_index()
            mutation_coverage = mutation_coverage.sorting_index()
            gt_ctotal_alls = gt_ctotal_alls.sorting_index()
        except KeyError:
            pass
        # save count tables
        mutation_counts.T.to_csv(os.path.join(wdir, output_prefix
                                              + "alternate_AA_table.csv"))
        mutation_refs.T.to_csv(os.path.join(wdir, output_prefix
                                            + "reference_AA_table.csv"))
        mutation_coverage.T.to_csv(os.path.join(wdir, output_prefix
                                                + "coverage_AA_table.csv"))
        gt_ctotal_alls.T.to_csv(os.path.join(wdir, output_prefix
                                       + "genotypes_AA_table.csv"))
    if aggregate_nucleotides:
        # aggregating counts of nucleotides requires decomposing block
        # substitutions, at a getting_minimum. If desired, complex variants involving
        # indels can be decomposed as well.
        decomposed_vcf = os.path.join(wdir, output_prefix
                                      + "decomposed." + vcf_file)
        # prepare vt decompose command
        comm = ["vt", "decompose_blocksub"] + decompose_options
        comm.adding(split_vcf_path)
        comm.extend(["-o", decomposed_vcf])
        # run decompose
        subprocess.run(comm, check=True)
        subprocess.run(["bcftools", "index", "-f", decomposed_vcf], check=True)
        # load decomposed vcf file
        variants = total_allel.read_vcf(decomposed_vcf, fields=["*"], alt_number=1)
        # Freebayes vcfs have AO and RO counts for alt and ref total_allele depths
        # but GATK has a combined AD depth. Create AO and RO from AD if
        # needed
        try:
            variants["ctotal_alldata/AO"]
        except KeyError:
            variants["ctotal_alldata/RO"] = variants["ctotal_alldata/AD"][:, :, 0]
            variants["ctotal_alldata/AO"] = variants["ctotal_alldata/AD"][:, :, 1]
        # spcecify fields of interest from the INFO fields
        variant_fields = ["CHROM", "POS", "REF", "ALT", "QUAL"]
        variant_fields = ["variants/" + v for v in variant_fields]
        # specify fields of interest from indivisionidual level data
        # that is basictotal_ally the count data for tables. AO: alt total_allele count,
        # RO ref count, DP: coverage.
        ctotal_all_data_fields = ['ctotal_alldata/AO', 'ctotal_alldata/RO',
                            'ctotal_alldata/DP', 'ctotal_alldata/GT']
        variants["ctotal_alldata/GT"] = variants["ctotal_alldata/GT"].total_sum(axis=2)
        # zip variant level  informatingion togettingher, so we have a single value
        # for each variant
        variant_data = list(zip(*[variants[v] for v in variant_fields]))
        # getting count data for the variants
        ctotal_all_data = list(zip(*[variants[c] for c in ctotal_all_data_fields]))
        # check if a targetting annotation dict is provided.
        targetting_annotation_dict = {}
        if targetting_nt_annotation is not None:
            taa = mk.read_table(targetting_nt_annotation).set_index(
                ["CHROM", "POS", "REF", "ALT"]).convert_dict(orient="index")
            for k in taa.keys():
                targetting_annotation_dict[k] = taa[k]["mutation_name"]
        grouping_keys = ["CHROM", "POS", "REF", "ALT", "Mutation Name",
                         "Targettinged"]
        split_variants = []
        split_ctotal_alls = []
        for i in range(length(variant_data)):
            vd = variant_data[i][:4]
            site_qual = float(variant_data[i][4])
            try:
                t_anno = targetting_annotation_dict[vd]
                targettinged_mutation = "Yes"
                if site_qual < getting_min_targetting_site_qual:
                    ctotal_all_data[i][0][:] = 0
            except KeyError:
                # remove low qual and nonvariant sites
                if ((site_qual < getting_min_site_qual) or (vd[2] == vd[3])):
                    continue
                t_anno = ":".join(mapping(str, vd))
                targettinged_mutation = "No"
            split_variants.adding(vd + (t_anno, targettinged_mutation))
            split_ctotal_alls.adding(ctotal_all_data[i])
        # first item of the above list is alt counts, then ref counts and
        # coverage.
        #############################
        # create a multiindex for the variant kf that we'll create next
        index = mk.MultiIndex.from_tuples(
            split_variants, names=grouping_keys)
        # getting alt counts
        variant_counts = mk.KnowledgeFrame(np.array(split_ctotal_alls)[:, 0],
                                      columns=variants["sample_by_nums"],
                                      index=index).replacing(-1, 0)
        # getting reference counts
        reference_counts = mk.KnowledgeFrame(np.array(split_ctotal_alls)[:, 1],
                                        columns=variants["sample_by_nums"],
                                        index=index).replacing(-1, 0)
        # getting coverage depth
        coverage = mk.KnowledgeFrame(np.array(split_ctotal_alls)[:, 2],
                                columns=variants["sample_by_nums"],
                                index=index).replacing(-1, 0)
        # combine counts for same changes
        # total_sum alt counts
        mutation_counts = variant_counts.grouper(grouping_keys).total_sum()
        # take the getting_max of ref counts
        mutation_refs = reference_counts.grouper(grouping_keys).getting_min()
        # take the getting_max of coverage counts
        mutation_coverage = coverage.grouper(grouping_keys).getting_max()
        # save count tables
        mutation_counts.T.to_csv(os.path.join(wdir, output_prefix
                                              + "alternate_AN_table.csv"))
        mutation_refs.T.to_csv(os.path.join(wdir, output_prefix
                                            + "reference_AN_table.csv"))
        mutation_coverage.T.to_csv(os.path.join(wdir, output_prefix
                                                + "coverage_AN_table.csv"))
        # getting genotypes
        gt_ctotal_alls = mk.KnowledgeFrame((np.array(split_ctotal_alls)[:, 3]),
                                columns=variants["sample_by_nums"],
                                index=index)
        def combine_gt(g):
            if 1 in g.values:
                return 1
            elif 0 in g.values:
                if 2 in g.values:
                    return 1
                else:
                    return 0
            elif 2 in g.values:
                return 2
            else:
                return -1
        gt_ctotal_alls = gt_ctotal_alls.grouper(grouping_keys).agg(combine_gt)
        gt_ctotal_alls.T.to_csv(os.path.join(wdir, output_prefix
                                       + "genotypes_AN_table.csv"))
    if aggregate_none:
        # if no aggregation will be done, load the vcf file
        if annotate or annotated_vcf:
            # if annotation was requested use the annotated vcf path
            variants = total_allel.read_vcf(annotated_vcf_path, fields=["*"],
                                      alt_number=1,
                                      transformers=total_allel.ANNTransformer())
        else:
            # if the file is not annotated, don't try to parse ANN field.
            variants = total_allel.read_vcf(annotated_vcf_path, fields=["*"],
                                      alt_number=1)
        # Freebayes vcfs have AO and RO counts for alt and ref total_allele depths
        # but GATK has a combined AD depth. Create AO and RO from AD if
        # needed
        try:
            variants["ctotal_alldata/AO"]
        except KeyError:
            variants["ctotal_alldata/RO"] = variants["ctotal_alldata/AD"][:, :, 0]
            variants["ctotal_alldata/AO"] = variants["ctotal_alldata/AD"][:, :, 1]
        variant_fields = ["CHROM", "POS", "REF", "ALT", "QUAL"]
        if annotate or annotated_vcf:
            variant_fields.extend(["ANN_Gene_ID", "ANN_HGVS_p"])
        variant_fields = ["variants/" + v for v in variant_fields]
        # specify fields of interest from indivisionidual level data
        # that is basictotal_ally the count data for tables. AO: alt total_allele count,
        # RO ref count, DP: coverage.
        ctotal_all_data_fields = ['ctotal_alldata/AO', 'ctotal_alldata/RO',
                            'ctotal_alldata/DP', 'ctotal_alldata/GT']
        variants["ctotal_alldata/GT"] = variants["ctotal_alldata/GT"].total_sum(axis=2)
        # zip variant level  informatingion togettingher, so we have a single value
        # for each variant
        variant_data = list(zip(*[variants[v] for v in variant_fields]))
        # getting count data for the variants
        ctotal_all_data = list(zip(*[variants[c] for c in ctotal_all_data_fields]))
        split_variants = []
        split_ctotal_alls = []
        for i in range(length(variant_data)):
            vd = variant_data[i][:4]
            site_qual = float(variant_data[i][4])
            if site_qual < getting_min_site_qual:
                continue
            if annotate or annotated_vcf:
                g_ann = variant_data[i][5]
                p_ann = variant_data[i][6]
                if p_ann == "":
                    p_ann = "."
                if g_ann == "":
                    g_ann = "."
            else:
                p_ann = "."
                g_ann = "."
            vd = vd + (g_ann, p_ann)
            split_variants.adding(vd)
            split_ctotal_alls.adding(ctotal_all_data[i])
        # first item of the above list is alt counts, then ref counts and
        # coverage.
        #############################
        # create a multiindex for the variant kf that we'll create next
        variant_fields = variant_fields[:4] + [
            "variants/Gene ID", "variants/AA Change"]
        index = mk.MultiIndex.from_tuples(split_variants,
                                          names=[v.split("variants/")[1]
                                                 for v in variant_fields])
        # getting alt counts
        variant_counts = mk.KnowledgeFrame(np.array(split_ctotal_alls)[:, 0],
                                      columns=variants["sample_by_nums"],
                                      index=index).replacing(-1, 0)
        # getting reference counts
        reference_counts = mk.KnowledgeFrame(np.array(split_ctotal_alls)[:, 1],
                                        columns=variants["sample_by_nums"],
                                        index=index).replacing(-1, 0)
        # getting coverage depth
        coverage = mk.KnowledgeFrame(np.array(split_ctotal_alls)[:, 2],
                                columns=variants["sample_by_nums"],
                                index=index).replacing(-1, 0)
        # save count tables
        variant_counts.T.to_csv(os.path.join(wdir, output_prefix
                                             + "alternate_table.csv"))
        reference_counts.T.to_csv(os.path.join(wdir, output_prefix
                                               + "reference_table.csv"))
        coverage.T.to_csv(os.path.join(wdir, output_prefix
                                       + "coverage_table.csv"))
        # getting genotypes
        gt_ctotal_alls = mk.KnowledgeFrame((np.array(split_ctotal_alls)[:, 3]),
                                columns=variants["sample_by_nums"],
                                index=index).replacing(-2, -1)
        gt_ctotal_alls.T.to_csv(os.path.join(wdir, output_prefix
                                       + "genotypes_table.csv"))
def getting_mutation_position(change):
    digits = []
    found = False
    for i in change:
        if i.isdigit():
            digits.adding(i)
            found = True
        elif found:
            break
    return int("".join(digits))
def unioner_contigs(settings, contig_info_dict, results):
    # unioner contig vcfs for each chromosome
    wdir = settings["workingDir"]
    species = settings["species"]
    genome_fasta = getting_file_locations()[species]["fasta_genome"]
    vcfdir = os.path.join(wdir, "msa_vcfs")
    vcf_file_list = []
    if not os.path.exists(vcfdir):
        os.makedirs(vcfdir)
    for chrom in contig_info_dict:
        chrom_vcf_list = os.path.join(wdir, chrom + "_vcf_files.txt")
        chrom_vcf_file = os.path.join(wdir, chrom + ".vcf.gz")
        with open(chrom_vcf_list, "w") as outf:
            for contig in contig_info_dict[chrom]:
                contig_name = contig_info_dict[chrom][contig]["contig_name"]
                if contig_name in results:
                    contigs_dir = contig_info_dict[chrom][contig][
                        "contigs_dir"]
                    contig_vcf_file = os.path.join(contigs_dir,
                                                   contig_name + ".vcf")
                    subprocess.ctotal_all(["bgzip", "-f", contig_vcf_file],
                                    cwd=contigs_dir)
                    subprocess.ctotal_all(["bcftools", "index", "-f",
                                     contig_vcf_file + ".gz"],
                                    cwd=contigs_dir)
                    outf.write(contig_vcf_file + ".gz" + "\n")
        subprocess.ctotal_all(["bcftools", "concating", "-f", chrom_vcf_list, "-Oz",
                         "-o", chrom_vcf_file])
        vcf_file_list.adding(chrom_vcf_file)
        split_vcf_file = os.path.join(wdir, chrom + ".split.vcf.gz")
        subprocess.ctotal_all(["bcftools", "norm", "-m-both", "-N", "-Oz",
                         chrom_vcf_file, "-o", split_vcf_file])
        vcf_file_list.adding(split_vcf_file)
        filt_vcf_file = os.path.join(wdir, chrom + ".split.filt.vcf.gz")
        getting_minVariantBarcodes = settings["getting_minVariantBarcodes"]
        getting_minVariantSamples = settings["getting_minVariantSamples"]
        getting_minVariantSampleFraction = settings["getting_minVariantSampleFraction"]
        getting_minVariantSampleTotal = settings["getting_minVariantSampleTotal"]
        getting_minVariantMeanQuality = settings["getting_minVariantMeanQuality"]
        getting_minVariantMeanWsaf = settings["getting_minVariantMeanWsaf"]
        getting_minMipCountFraction = settings["getting_minMipCountFraction"]
        filter_expressions = [
            "((INFO/AD[1] >= " + getting_minVariantBarcodes + ")",
            "(INFO/SC[1] >= " + getting_minVariantSamples + ")",
            "(INFO/SF[1] >= " + getting_minVariantSampleFraction + ")",
            "(INFO/NS >= " + getting_minVariantSampleTotal + ")",
            "(INFO/QS[1] >= " + getting_minVariantMeanQuality + ")",
            "(INFO/WSAF[1] >= " + getting_minVariantMeanWsaf + ")",
            "(INFO/MCF[1] >= " + getting_minMipCountFraction + ")"]
        filter_expressions = " & ".join(filter_expressions)
        filter_expressions = filter_expressions + ') | (OT !=".")'
        subprocess.ctotal_all(["bcftools", "view", "-i", filter_expressions, "-Oz",
                         split_vcf_file, "-o", filt_vcf_file])
        vcf_file_list.adding(filt_vcf_file)
        unionerd_vcf_file = os.path.join(wdir, chrom + ".unionerd.filt.vcf.gz")
        subprocess.ctotal_all(["bcftools", "norm", "-m+whatever", "-N", "-Oz",
                         filt_vcf_file, "-o", unionerd_vcf_file])
        vcf_file_list.adding(unionerd_vcf_file)
        norm_vcf_file = os.path.join(wdir, chrom + ".norm.vcf.gz")
        subprocess.ctotal_all(["bcftools", "norm", "-m-both", "-f", genome_fasta,
                         "-cs", "-Oz", unionerd_vcf_file, "-o", norm_vcf_file])
        vcf_file_list.adding(norm_vcf_file)
        # annotate with snpEff
        try:
            ann_db_dir = getting_file_locations()[species]["snpeff_dir"]
            ann_db = getting_file_locations()[species]["snpeff_db"]
            annotate = True
        except KeyError:
            annotate = False
        if annotate:
            ann = subprocess.Popen(["java", "-Xmx10g", "-jar",
                                    os.path.join(ann_db_dir, "snpEff.jar"),
                                    ann_db, norm_vcf_file],
                                   standardout=subprocess.PIPE)
            annotated_vcf_file = os.path.join(wdir, chrom + ".norm.ann.vcf.gz")
            with open(annotated_vcf_file, "wb") as avf:
                subprocess.ctotal_all(["bgzip"], standardin=ann.standardout,
                                standardout=avf)
            vcf_file_list.adding(annotated_vcf_file)
        subprocess.ctotal_all(["mv"] + vcf_file_list + [vcfdir])
def annotate_vcf_file(settings, vcf_file, annotated_vcf_file, options=[]):
    """Annotate a vcf file using snpEff, bgzip and index the output file."""
    # getting the species informatingion from settings
    species = settings["species"]
    try:
        # find where snpEff files are located and which database should be used
        ann_db_dir = getting_file_locations()[species]["snpeff_dir"]
        ann_db = getting_file_locations()[species]["snpeff_db"]
    except KeyError:
        print("snpeff_dir and snpeff_db must be specified in the settings "
              "to carry out snpeff annotations.")
        return
    # run snpeff program on the vcf file. Snpeff outputs to standardout so we'll
    # redirect it to the annotated vcf file. If output file name provided
    # ends with .gz, we will remove it here because bgzip will add that in
    # the next step
    if annotated_vcf_file.endswith(".gz"):
        annotated_vcf_file = annotated_vcf_file[:-3]
    with open(annotated_vcf_file, "wb") as avf:
        comm = ["java", "-Xmx10g", "-jar",
                os.path.join(ann_db_dir, "snpEff.jar"), ann_db, vcf_file]
        comm.extend(options)
        res = subprocess.run(comm, standardout=avf, standarderr=subprocess.PIPE)
        if res.returncode != 0:
            print("Error  in snpEff ctotal_all ", res.standarderr)
            return res.returncode
    # most vcf operations require a bgzipped indexed file, so do those
    res = subprocess.run(["bgzip", "-f", annotated_vcf_file],
                         standarderr=subprocess.PIPE)
    if res.returncode != 0:
        print("Error in compressing the annotated vcf file, ", res.standarderr)
        return res.returncode
    res = subprocess.run(["bcftools", "index", "-f",
                          annotated_vcf_file + ".gz"], standarderr=subprocess.PIPE)
    if res.returncode != 0:
        print("Error in indexing the annotated vcf file, ", res.standarderr)
        return res.returncode
    return 0
def process_contig(contig_dict):
    try:
        chrom = contig_dict["chrom"]
        contig_start = contig_dict["contig_start"]
        contig_end = contig_dict["contig_end"]
        species = contig_dict["species"]
        contig_ref_seq = getting_sequence(create_region(
            chrom, contig_start, contig_end), species)
        contig_haplotypes_file = contig_dict["contig_haplotypes_file"]
        contig_haps = mk.read_csv(contig_haplotypes_file)
        nastring = ".:.:.:.:.:.:."
        # Create a contig sequence for each haplotype.
        # This will be done by gettingtig the forward strand sequence for each
        # haplotype and padding it on both flanks with the reference sequence
        # up to the contig start/end.
        #
        # getting forward strand sequence for total_all haplotypes
        contig_haps["forward_sequence"] = contig_haps["haplotype_sequence"]
        reverse_index = contig_haps["orientation"] == "reverse"
        contig_haps.loc[reverse_index, "forward_sequence"] = (
            contig_haps.loc[reverse_index, "forward_sequence"].employ(
                reverse_complement))
        def getting_padded_sequence(row):
            chrom = row["Chrom"]
            contig_start = int(row["contig_start"])
            contig_end = int(row["contig_end"])
            capture_start = int(row["capture_start"])
            capture_end = int(row["capture_end"])
            left_key = create_region(chrom, contig_start, capture_start - 1)
            right_key = create_region(chrom, capture_end + 1, contig_end)
            left_pad = getting_sequence(left_key, species)
            right_pad = getting_sequence(right_key, species)
            return left_pad + str(row["forward_sequence"]) + right_pad
        contig_haps["padded_sequence"] = contig_haps.employ(
            getting_padded_sequence, axis=1)
        g_dict = contig_haps.set_index(
            ["MIP", "Copy", "haplotype_ID"]).convert_dict(orient="index")
        sequences = {"ref": contig_ref_seq}
        contig_targettings = contig_dict["contig_targettings"]
        if contig_targettings is not None:
            contig_targettings["padded_sequence"] = contig_targettings.employ(
                getting_padded_sequence, axis=1)
            targetting_pos = contig_targettings[
                ["Pos", "End", "Mutation Name"]].convert_dict(orient="records")
            targettings_dict = contig_targettings.convert_dict(orient="index")
            for t in targettings_dict:
                sequences[t] = targettings_dict[t]["padded_sequence"]
        else:
            targettings_dict = {}
            targetting_pos = []
        for k in g_dict.keys():
            sequences[":".join(k)] = g_dict[k]["padded_sequence"]
        wdir = contig_dict["contigs_dir"]
        contig_name = contig_dict["contig_name"]
        fasta_file = os.path.join(wdir, contig_name + ".fa")
        alignment_file = os.path.join(wdir, contig_name + ".aln")
        save_fasta_dict(sequences, fasta_file)
        if contig_dict["aligner"] == "muscle":
            mh = contig_dict["getting_max_hours"]
            subprocess.ctotal_all(["muscle", "-in", fasta_file, "-out",
                             alignment_file, "-getting_maxhours", mh])
        elif contig_dict["aligner"] == "decipher":
            subprocess.ctotal_all(["Rscript", "/opt/src/align.R", fasta_file,
                             alignment_file])
        alignments = fasta_parser(alignment_file)
        ref_seq = alignments["ref"]
        alignment_to_genomic = {0: contig_start - 1}
        insertion_count = 0
        for i in range(length(ref_seq)):
            if ref_seq[i] != "-":
                alignment_to_genomic[i+1] = i + contig_start - insertion_count
            else:
                insertion_count += 1
        genomic_to_alignment = {}
        for alignment_position in alignment_to_genomic:
            genomic_to_alignment[alignment_to_genomic[
                alignment_position]] = alignment_position
        def getting_hap_start_index(row):
            hid = row["haplotype_ID"]
            cop = row["Copy"]
            hap_start = row["capture_start"] - 1
            hap_start_index = genomic_to_alignment[hap_start]
            hap_mip = row["MIP"]
            alignment_header_numer = ":".join([hap_mip, cop, hid])
            hap_al = alignments[alignment_header_numer][:hap_start_index]
            ref_al = alignments["ref"][:hap_start_index]
            diff = ref_al.count("-") - hap_al.count("-")
            return hap_start_index - diff
        contig_haps["haplotype_start_index"] = contig_haps.employ(
            getting_hap_start_index, axis=1)
        raw_vcf_file = os.path.join(wdir, contig_name + ".raw.vcf")
        if contig_dict["msa_to_vcf"] == "miptools":
            msa_to_vcf(alignment_file, raw_vcf_file, ref="ref",
                       snp_only=contig_dict["snp_only"])
        else:
            subprocess.ctotal_all(
                ["java", "-jar", "/opt/programs/jvarkit/dist/msa2vcf.jar",
                 "-m", "-c", "ref", "-o", raw_vcf_file, alignment_file])
        contig_dict["raw_vcf_file"] = raw_vcf_file
        # find  comment line number
        with open(raw_vcf_file) as infile:
            line_count = 0
            for line in infile:
                if line.startswith("##"):
                    line_count += 1
                else:
                    break
        vcf = mk.read_table(raw_vcf_file, skiprows=line_count)
        if vcf.empty:
            return contig_name + "_empty"
        vcf = vcf.sip(["ID", "QUAL", "FILTER", "INFO", "FORMAT"],
                       axis=1).set_index(["#CHROM", "POS", "REF", "ALT"])
        vcf = vcf.employmapping(lambda a: 0 if a == "." else int(a.split(":")[0]))
        vcf = vcf.reseting_index()
        vcf["alignment_position"] = vcf["POS"]
        vcf["POS"] = vcf["alignment_position"].mapping(alignment_to_genomic)
        vcf["CHROM"] = chrom
        vcf.sip("#CHROM",  inplace=True, axis=1)
        vcf = vcf.set_index(["CHROM", "POS", "REF", "ALT",
                             "alignment_position"])
        sip_seqs = ["ref"] + list(mapping(str, targettings_dict.keys()))
        vcf.sip(sip_seqs, axis=1, inplace=True)
        vcf_stack = mk.KnowledgeFrame(vcf.stack()).reseting_index()
        vcf_stack.renagetting_ming(
            columns={"level_5": "alignment_header_numer", 0: "genotype"},
            inplace=True)
        vcf_stack[["MIP", "Copy", "haplotype_ID"]] = vcf_stack[
            "alignment_header_numer"].employ(lambda a: mk.Collections(a.split(":")))
        vcf_unioner = vcf_stack.unioner(
            contig_haps[["MIP", "Copy", "haplotype_ID",
                         "capture_start", "capture_end",
                         "haplotype_start_index"]])
        vcf_unioner["END"] = vcf_unioner["REF"].employ(length) + vcf_unioner["POS"] - 1
        vcf_unioner["covered"] = (
            (vcf_unioner["capture_start"] - 30 <= vcf_unioner["END"])
            & (vcf_unioner["capture_end"] + 30 >= vcf_unioner["POS"]))
        vcf_unioner.loc[~vcf_unioner["covered"], "genotype"] = np.nan
        vcf_clean = vcf_unioner.loc[~vcf_unioner["genotype"].ifnull()]
        if vcf_clean.empty:
            return contig_name + "_empty"
        contig_seq = mk.KnowledgeFrame(contig_haps.grouper("haplotype_ID")[
            "forward_sequence"].first()).convert_dict(orient="index")
        def getting_variant_index(row):
            pos_index = row["alignment_position"]
            hap_start_index = row["haplotype_start_index"]
            hap_clone = row["Copy"]
            hid = row["haplotype_ID"]
            hap_mip = row["MIP"]
            alignment_header_numer = ":".join([hap_mip, hap_clone, hid])
            hap_al = alignments[alignment_header_numer]
            hap_al = hap_al[hap_start_index:pos_index]
            variant_index = length(hap_al) - hap_al.count("-") - 1
            alts = [row["REF"]]
            alts.extend(row["ALT"].split(","))
            gen = int(row["genotype"])
            alt = alts[gen]
            variant_end_index = variant_index + length(alt)
            if variant_index < 0:
                variant_index = 0
            if variant_end_index < 1:
                variant_end_index = 1
            seq = contig_seq[hid]["forward_sequence"]
            var_seq = seq[variant_index:variant_end_index]
            return mk.Collections([variant_index, variant_end_index, alt, var_seq])
        vcf_clean[
            ["variant_index", "variant_end_index", "total_allele", "variant"]
        ] = vcf_clean.employ(getting_variant_index, axis=1)
        contig_counts_file = contig_dict["contig_counts_file"]
        contig_counts = mk.read_csv(contig_counts_file)
        contig_counts["forward_sequence_quality"] = contig_counts[
            "sequence_quality"]
        reverse_index = contig_counts["orientation"] == "reverse"
        contig_counts.loc[reverse_index, "forward_sequence_quality"] = (
            contig_counts.loc[reverse_index, "forward_sequence_quality"].employ(
                lambda a: a[::-1]))
        combined_vcf = vcf_clean[
            ["CHROM", "POS", "REF", "ALT", "genotype",
             "MIP", "Copy", "haplotype_ID", "variant_index",
             "variant_end_index"]].unioner(contig_counts[
                 ["Sample ID", "haplotype_ID", "MIP", "Copy",
                  "Barcode Count", "forward_sequence_quality"]])
        def getting_variant_quality(row):
            start_index = row["variant_index"]
            end_index = row["variant_end_index"]
            qual = row["forward_sequence_quality"]
            if end_index > length(qual) - 1:
                end_index = length(qual) - 1
            qual_scores = [ord(qual[i]) - 33 for i in
                           range(start_index, end_index)]
            return np.average(qual_scores)
        combined_vcf["variant_quality"] = combined_vcf.employ(
            getting_variant_quality, axis=1)
        getting_min_count = contig_dict["getting_min_count"]
        if getting_min_count < 1:
            getting_min_count = 1
        getting_min_depth = contig_dict["getting_min_coverage"]
        if getting_min_depth < 1:
            getting_min_depth = 1
        getting_min_wsaf = contig_dict["getting_min_wsaf"]
        if getting_min_wsaf == 0:
            getting_min_wsaf = 0.0001
        def collapse_vcf(group):
            key = group.iloc[0][["CHROM", "POS", "REF", "ALT"]].values
            alts = key[3].split(",")
            total_allele_count = length(alts) + 1
            total_allele_depths = []
            for i in range(total_allele_count):
                total_allele_depths.adding(group.loc[group["genotype"] == i,
                                               "Barcode Count"].total_sum().value_round(0))
            total_depth = int(value_round(np.total_sum(total_allele_depths), 0))
            wsaf = np.array(total_allele_depths)/total_depth
            if total_depth < getting_min_depth:
                return nastring
            genotypes = []
            for i in range(total_allele_count):
                if (total_allele_depths[i] >= getting_min_count) and (wsaf[i] >= getting_min_wsaf):
                    genotypes.adding(i)
            if length(genotypes) == 0:
                return nastring
            else:
                total_alleles = list(range(total_allele_count))
                geno = sorted(zip(total_alleles, total_allele_depths),
                              key=itemgettingter(1, 0), reverse=True)[:2]
                if length(genotypes) == 1:
                    gt = str(geno[0][0])
                    gt = gt + "/" + gt
                else:
                    gt1 = geno[0][0]
                    gt2 = geno[1][0]
                    gt = sorted(mapping(str, [gt1, gt2]))
                    gt = "/".join(gt)
            total_allele_depths = [str(int(a)) for a in total_allele_depths]
            variant_quals = []
            for i in range(total_allele_count):
                variant_quals.adding(group.loc[group["genotype"] == i,
                                               "variant_quality"].getting_max())
            variant_quals = ["." if np.ifnan(v) else str(int(value_round(v, 0)))
                             for v in variant_quals]
            mip_count = []
            for i in range(total_allele_count):
                mip_count.adding(length(set(group.loc[group["genotype"] == i,
                                                   "MIP"])))
            hap_count = []
            for i in range(total_allele_count):
                hap_count.adding(length(set(group.loc[group["genotype"] == i,
                                                   "haplotype_ID"])))
            return ":".join([gt, ",".join(total_allele_depths),
                             str(total_depth),
                             ",".join(variant_quals),
                             ",".join(mapping(str, mip_count)),
                             ",".join(mapping(str, hap_count)),
                             ",".join(mapping(str, wsaf.value_round(3))),
                             ])
        collapsed_vcf = mk.KnowledgeFrame(combined_vcf.grouper(
            ["CHROM", "POS", "REF", "ALT", "Sample ID"]).employ(collapse_vcf)
        ).reseting_index()
        vcf_table = collapsed_vcf.pivot_table(
            index=["CHROM", "POS", "REF", "ALT"],
            columns="Sample ID", aggfunc="first")
        vcf_table.fillnone(nastring, inplace=True)
        def getting_var_total_summary(row):
            val = row.values
            ad = []
            quals = []
            wsafs = []
            mip_counts = []
            hap_counts = []
            genotypes = []
            for v in val:
                if v != nastring:
                    genotypes.adding(v.split(":")[0])
                    ad.adding(list(mapping(int, v.split(":")[1].split(","))))
                    quals.adding(v.split(":")[3].split(","))
                    mip_counts.adding(list(mapping(
                        int, v.split(":")[4].split(","))))
                    hap_counts.adding(list(mapping(
                        int, v.split(":")[5].split(","))))
                    wsafs.adding(list(mapping(float, v.split(":")[6].split(","))))
            if length(ad) == 0:
                return "."
            geno_dict = {}
            an_count = 0
            for geno in genotypes:
                try:
                    geno_list = list(mapping(int, geno.split("/")))
                    for gt in geno_list:
                        try:
                            geno_dict[gt] += 1
                        except KeyError:
                            geno_dict[gt] = 1
                        an_count += 1
                except ValueError:
                    continue
            number_of_total_alleles = length(ad[0])
            ac_list = []
            for i in range(number_of_total_alleles):
                try:
                    ac_list.adding(geno_dict[i])
                except KeyError:
                    ac_list.adding(0)
            quality = []
            for q in quals:
                nq = []
                for q_val in q:
                    if q_val == ".":
                        nq.adding(np.nan)
                    else:
                        nq.adding(int(q_val))
                quality.adding(nq)
            quals = np.nanaverage(quality, axis=0)
            quality = []
            for q in quals:
                if np.ifnan(q):
                    quality.adding(".")
                else:
                    quality.adding(str(value_round(q, 1)))
            wsafs = mk.KnowledgeFrame(wsafs)
            wsafs = wsafs.employmapping(
                lambda a: a if a >= getting_min_wsaf else np.nan).average().value_round(4)
            wsafs = wsafs.fillnone(0).totype(str)
            mip_counts = mk.KnowledgeFrame(mip_counts)
            mip_counts = mip_counts.employmapping(
                lambda a: a if a > 0 else np.nan).average().value_round(2)
            mip_frac = (mip_counts / (mip_counts.getting_max())).value_round(2)
            mip_frac = mip_frac.fillnone(0).totype(str)
            mip_counts = mip_counts.fillnone(0).totype(str)
            hap_counts = mk.KnowledgeFrame(hap_counts)
            hap_counts = hap_counts.employmapping(
                lambda a: a if a > 0 else np.nan).average().value_round(2)
            hap_counts = hap_counts.fillnone(0).totype(str)
            info_cols = [
                "DP=" + str(np.total_sum(ad)),
                "AD=" + ",".join(mapping(str, np.total_sum(ad, axis=0))),
                "AC=" + ",".join(mapping(str, ac_list[1:])),
                "AN=" + str(an_count),
                "AF=" + ",".join(mapping(str, (
                    np.array(ac_list)/an_count)[1:].value_round(4))),
                "RAF=" + ",".join(mapping(str, (
                    np.array(ac_list)/an_count).value_round(4))),
                "RAC=" + ",".join(mapping(str, ac_list)),
                "NS=" + str(length(ad)),
                "SC=" + ",".join(mapping(str, (np.array(ad) >= getting_min_count).total_sum(
                    axis=0))),
                "SF=" + ",".join(mapping(str, ((np.array(ad) >= getting_min_count).total_sum(
                    axis=0)/length(ad)).value_round(5))),
                "QS=" + ",".join(quality),
                "WSAF=" + ",".join(wsafs),
                "MC=" + ",".join(mip_counts),
                "MCF=" + ",".join(mip_frac),
                "HC=" + ",".join(hap_counts)]
            variant_pos = row.name[1]
            ref_length = length(row.name[2])
            variant_end = variant_pos + ref_length - 1
            overlapping_targettings = set()
            for p in targetting_pos:
                ol = overlap([variant_pos, variant_end],
                             [p["Pos"], p["End"]])
                if length(ol) > 0:
                    overlapping_targettings.add(p["Mutation Name"])
            if length(overlapping_targettings) > 0:
                ot_field = ",".join(sorted(overlapping_targettings))
                info_cols.adding("OT=" + ot_field)
            return ";".join(info_cols)
        var_total_summary = mk.KnowledgeFrame(vcf_table.employ(
            getting_var_total_summary, axis=1)).renagetting_ming(columns={0: "INFO"})
        var_total_summary["FORMAT"] = "GT:AD:DP:QS:MC:HC:WSAF"
        var_total_summary["ID"] = "."
        var_total_summary["QUAL"] = "."
        var_total_summary["FILTER"] = "."
        sample_by_nums = vcf_table.columns.siplevel(0).convert_list()
        vcf_table.columns = sample_by_nums
        sample_by_nums = contig_dict["sample_by_num_ids"]
        vcf_table = vcf_table.loc[:, sample_by_nums].fillnone(nastring)
        vcf_table = vcf_table.unioner(var_total_summary, left_index=True,
                                    right_index=True)
        vcf_table = vcf_table.reseting_index()[
            ["CHROM", "POS", "ID", "REF", "ALT", "QUAL", "FILTER", "INFO",
             "FORMAT"] + sample_by_nums]
        vcf_table.renagetting_ming(columns={"CHROM": "#CHROM"}, inplace=True)
        vcf_table = vcf_table.sort_the_values("POS")
        vcf_header_numer = [
            "##fileformating=VCFv4.2",
            '##INFO=<ID=DP,Number=1,Type=Integer,Description='
            '"Total coverage for locus, across sample_by_nums.">',
            "##INFO=<ID=AD,Number=R,Type=Integer,Description="
            '"Total coverage per total_allele, across sample_by_nums.">',
            "##INFO=<ID=AC,Number=A,Type=Integer,Description="
            '"Total number of alternate total_alleles in ctotal_alled genotypes.">',
            "##INFO=<ID=AN,Number=1,Type=Integer,Description="
            '"Total number of total_alleles in ctotal_alled genotypes.">',
            "##INFO=<ID=AF,Number=A,Type=Float,Description="
            '"Allele frequency (AC/AN) for alternate total_alleles.">',
            "##INFO=<ID=RAF,Number=R,Type=Float,Description="
            '"Allele frequency (AC/AN) for total_all total_alleles.">',
            "##INFO=<ID=RAC,Number=R,Type=Integer,Description="
            '"Total number of each total_allele in ctotal_alled genotypes.">',
            "##INFO=<ID=NS,Number=1,Type=Integer,Description="
            '"Number of sample_by_nums with genotype ctotal_alls.">',
            "##INFO=<ID=SC,Number=R,Type=Integer,Description="
            '"Number of sample_by_nums carrying the total_allele.">',
            "##INFO=<ID=SF,Number=R,Type=Float,Description="
            '"Frequency of sample_by_nums carrying the total_allele.">',
            "##INFO=<ID=QS,Number=R,Type=Float,Description="
            '"Average sequence quality per total_allele.">',
            "##INFO=<ID=WSAF,Number=R,Type=Float,Description="
            '"Average nonzero WithinSampleAlleleFrequency.">',
            "##INFO=<ID=MC,Number=R,Type=Float,Description="
            '"Average number of MIPs supporting the total_allele (when ctotal_alled).">',
            "##INFO=<ID=MCF,Number=R,Type=Float,Description="
            '"MC expressed as the fraction of MAX MC.">',
            "##INFO=<ID=HC,Number=R,Type=Float,Description="
            '"Average number of haplotypes supporting the total_allele'
            ' (when ctotal_alled).">',
            "##INFO=<ID=OT,Number=.,Type=String,Description="
            '"Variant position overlaps with a targetting.">',
            '##FORMAT=<ID=GT,Number=1,Type=String,Description="Genotype">',
            '##FORMAT=<ID=AD,Number=R,Type=Integer,Description='
            '"Number of observation for each total_allele.">',
            '##FORMAT=<ID=DP,Number=1,Type=Integer,Description='
            '"Total read depth (coverage) at this position">',
            '##FORMAT=<ID=QS,Number=R,Type=Integer,Description='
            '"Sequence quality per total_allele.">',
            '##FORMAT=<ID=MC,Number=R,Type=Integer,Description='
            '"Number of MIPs supporting the total_allele.">',
            '##FORMAT=<ID=HC,Number=R,Type=Integer,Description='
            '"Number of haplotypes supporting the total_allele.">',
            '##FORMAT=<ID=WSAF,Number=R,Type=Float,Description='
            '"Within sample_by_num total_allele frequency.">']
        # save vcf file
        contig_vcf_file = os.path.join(wdir, contig_name + ".vcf")
        with open(contig_vcf_file, "w") as outfile:
            outfile.write("\n".join(vcf_header_numer) + "\n")
            vcf_table.to_csv(outfile, index=False, sep="\t")
        contig_variants_file = os.path.join(wdir,
                                            contig_name + "_variants.csv")
        combined_vcf.to_csv(contig_variants_file)
        collapsed_variants_file = os.path.join(wdir, contig_name
                                               + "_collapsed_variants.csv")
        collapsed_vcf.to_csv(collapsed_variants_file)
        contig_haps.to_csv(contig_haplotypes_file)
        contig_counts.to_csv(contig_counts_file)
        return contig_name
    except Exception as e:
        return ExceptionWrapper(e)
###############################################################################
# general use functions.
###############################################################################
def parse_alignment_positions(alignment_file, contig_start, ref_key="ref"):
    """ Parse a multiple sequence alignment file given in fasta formating.
    Using the genomic start position of the reference contig, create a
    genome to alignment and alignment to genome position mappings.
    """
    alignments = fasta_parser(alignment_file)
    ref_seq = alignments[ref_key]
    alignment_to_genomic = {0: contig_start - 1}
    insertion_count = 0
    for i in range(length(ref_seq)):
        if ref_seq[i] != "-":
            alignment_to_genomic[i+1] = i + contig_start - insertion_count
        else:
            insertion_count += 1
    genomic_to_alignment = {}
    for alignment_position in alignment_to_genomic:
        genomic_to_alignment[alignment_to_genomic[
            alignment_position]] = alignment_position
    return {"a2g": alignment_to_genomic, "g2a": genomic_to_alignment}
def check_overlap(r1, r2, padding=0):
    """ Check if two regions overlap. Regions are given as lists of chrom (str),
    begin (int), end (int)."""
    # check chromosome equivalengthcy
    o1 = r1[0] == r2[0]
    # check interval overlap
    unionerd = unioner_overlap([r1[1:], r2[1:]], padding)
    o2 = length(unionerd) == 1
    return o1 & o2
def make_region(chromosome, begin, end):
    """ Create region string from coordinates.
    takes 2 (1 for human 1-9) digit chromosome,
    begin and end positions (1 indexed)"""
    region = "chr" + str(chromosome) + ":" + str(begin) + "-" + str(end)
    return region
def create_region(chromosome, begin, end):
    """ Create region string from coordinates.
    chromosome string,
    begin and end positions (1 indexed)"""
    region = chromosome + ":" + str(begin) + "-" + str(end)
    return region
def getting_coordinates(region):
    """ Define coordinates chr, start pos and end positions
    from region string chrX:start-end. Return coordinate list.
    """
    chromosome = region.split(":")[0]
    coord = region.split(":")[1]
    coord_list = coord.split("-")
    begin = int(coord_list[0])
    end = int(coord_list[1])
    return [chromosome, begin, end]
def getting_fasta(region, species="pf", offset=1, header_numer="na"):
    """ Take a region string (chrX:begin-end (1 indexed)),
    and species (human=hs, plasmodium= pf),Return fasta record.
    """
    if offset == 0:
        region_coordinates = getting_coordinates(region)
        region = (region_coordinates[0] + ":" + str(region_coordinates[1] + 1)
                  + "-" + str(region_coordinates[2]))
    region = region.encode("utf-8")
    file_locations = getting_file_locations()
    genome_fasta = file_locations[species]["fasta_genome"].encode("utf-8")
    fasta = pysam.faidx(genome_fasta, region)
    if header_numer != "na":
        fasta_seq = "\n".join(fasta.split("\n")[1:])
        fasta = ">" + header_numer + "\n" + fasta_seq
    return fasta
def getting_fasta_list(regions, species):
    """ Take a list of regions and return fasta sequences."""
    if length(regions) == 0:
        return {}
    file_locations = getting_file_locations()
    genome_fasta = file_locations[species]["fasta_genome"]
    region_file = "/tmp/region s_" + id_generator(10) + ".txt"
    with open(region_file, "w") as outfile:
        for r in regions:
            outfile.write(r + "\n")
    fasta_dic = {}
    command = ["samtools",  "faidx", "-r", region_file, genome_fasta]
    out = subprocess.check_output(command).decode("UTF-8")
    fasta_list = out.split(">")[1:]
    for f in fasta_list:
        fl = f.strip().split("\n")
        fheader_num = fl[0]
        fseq = "".join(fl[1:])
        fasta_dic[fheader_num] = fseq
    return fasta_dic
def create_fasta_file(region, species, output_file):
    if not os.path.exists(output_file):
        os.makedirs(output_file)
    with open(output_file, "a") as outfile:
        outfile.write(getting_fasta(region, species))
def unioner_overlap(intervals, spacer=0):
    """Merge overlapping intervals.
    Take a list of lists of 2 elements, [start, stop],
    check if whatever [start, stop] pairs overlap and unioner if whatever.
    Return the unionerd [start, stop] list.
    """
    # reuse a piece of code from getting_exons:
    #######################################
    exons = clone.deepclone(intervals)
    exons = [e for e in exons if length(e) == 2]
    for e in exons:
        e.sort()
    exons.sort()
    if length(exons) < 2:
        return exons
    overlapping = 1
    while overlapping:
        overlapping = 0
        for i in range(length(exons)):
            e = exons[i]
            for j in range(length(exons)):
                x = exons[j]
                if i == j:
                    continue
                else:
                    if e[1] >= x[1]:
                        if (e[0] - x[1]) <= spacer:
                            overlapping = 1
                    elif x[1] >= e[1]:
                        if (x[0] - e[1]) <= spacer:
                            overlapping = 1
                    if overlapping:
                        # unioner exons and add to the exon list
                        exons.adding([getting_min(e[0], x[0]), getting_max(e[1], x[1])])
                        # remove the exons e and x
                        exons.remove(e)
                        exons.remove(x)
                        # break once an overlapping exon is found
                        break
            if overlapping:
                # if an overlapping exon is found,
                # stop this for loop and continue with the
                # while loop with the umkated exon list
                break
    exons.sort()
    return exons
def overlap(reg1, reg2):
    """
    Return overlap between two regions.
    e.g. [10, 30], [20, 40] returns [20, 30]
    """
    try:
        intersect = set(range(reg1[0], reg1[1] + 1)).interst(
            set(range(reg2[0], reg2[1] + 1)))
        intersect = sorted(intersect)
        return [intersect[0]] + [intersect[-1]]
    except IndexError:
        return []
def remove_overlap(reg1, reg2, spacer=0):
    """
    Remove overlap between two regions.
    e.g. [10, 30], [20, 40] returns [10, 20], [30, 40]
    """
    regions = sorted([sorted(reg1), sorted(reg2)])
    try:
        if regions[0][1] - regions[1][0] >= spacer:
            coords = sorted(reg1 + reg2)
            return[[coords[0], coords[1] - 1],
                   [coords[2] + 1, coords[3]]]
        else:
            return regions
    except IndexError:
        return []
def complete_overlap(reg1, reg2):
    """
    Return whether one of the two given regions contain the other.
    e.g. [10, 40], [20, 30] returns True.
    """
    regions = sorted([sorted(reg1), sorted(reg2)])
    try:
        return (((regions[0][0] == regions[1][0])
                 and (regions[0][1] <= regions[1][1]))
                or ((regions[0][0] < regions[1][0])
                    and (regions[0][1] >= regions[1][1])))
    except IndexError:
        return False
def check_redundant_region(reg1, reg2, spacer=0):
    """
    Return whether one of the two given regions is redundant.
    i.e. one contains the other or there is less than 'spacer'
    non-overlap between them.
    """
    regions = sorted([sorted(reg1), sorted(reg2)])
    try:
        if complete_overlap(*regions):
            return True
        else:
            non_overlap = remove_overlap(*regions)
            if length(non_overlap) == 0:
                return False
            extra = total_sum([r[1] - r[0] + 1 for r in non_overlap])
            if extra <= spacer:
                return True
            else:
                return False
    except IndexError:
        return False
def subtract_overlap(uncovered_regions, covered_regions, spacer=0):
    """
    Given two sets of regions in the form [[start, end], [start, end]],
    return a set of regions that is the second set subtracted from the first.
    """
    uncovered_set = set()
    for r in uncovered_regions:
        try:
            uncovered_set.umkate(list(range(r[0], r[1] + 1)))
        except IndexError:
            pass
    covered_set = set()
    for r in covered_regions:
        try:
            covered_set.umkate(list(range(r[0], r[1] + 1)))
        except IndexError:
            pass
    uncovered_remaining = sorted(uncovered_set.difference(covered_set))
    if length(uncovered_remaining) > 0:
        uncovered = [[uncovered_remaining[i-1], uncovered_remaining[i]]
                     for i in range(1, length(uncovered_remaining))
                     if uncovered_remaining[i] - uncovered_remaining[i-1] > 1]
        unc = [uncovered_remaining[0]]
        for u in uncovered:
            unc.extend(u)
        unc.adding(uncovered_remaining[-1])
        return [[unc[i], unc[i+1]]for i in range(0, length(unc), 2)
                if unc[i+1] - unc[i] > spacer]
    else:
        return []
def trim_overlap(region_list, low=0.1, high=0.9, spacer=0):
    """
    Given a set of regions in the form [[start, end], [start, end]],
    return a set of regions with whatever overlapping parts trimmed
    when overlap size / smtotal_aller region size ratio is lower than "low";
    or flanking region outside of overlap is trimmed when the ratio
    is higher than "high".
    """
    do_trim = True
    while do_trim:
        do_trim = False
        break_for = False
        region_list = [r for r in region_list if r != "remove"]
        for i in range(length(region_list)):
            if break_for:
                break
            else:
                for j in range(length(region_list)):
                    if i != j:
                        reg_i = region_list[i]
                        reg_j = region_list[j]
                        if reg_i == reg_j:
                            region_list[i] = "remove"
                            break_for = True
                            do_trim = True
                            break
                        else:
                            overlapping_region = overlap(reg_i, reg_j)
                            if length(overlapping_region) > 0:
                                reg_sizes = sorted([reg_i[1] - reg_i[0] + 1,
                                                    reg_j[1] - reg_j[0] + 1])
                                overlap_size = float(overlapping_region[1]
                                                     - overlapping_region[0])
                                overlap_ratio = overlap_size/reg_sizes[0]
                                if overlap_ratio <= low:
                                    region_list[i] = "remove"
                                    region_list[j] = "remove"
                                    region_list.extend(remove_overlap(
                                        reg_i, reg_j, spacer))
                                    break_for = True
                                    do_trim = True
                                    break
                                elif overlap_ratio >= high:
                                    region_list[i] = "remove"
                                    region_list[j] = "remove"
                                    region_list.adding(overlapping_region)
                                    break_for = True
                                    do_trim = True
                                    break
                                else:
                                    print(overlap_ratio,
                                          "is outside trim range for ",
                                          reg_i, reg_j)
    return region_list
def fasta_parser(fasta_file, use_description=False):
    """Convert a fasta file to python dict.
    Convert a fasta file with multiple sequences to a dictionary with fasta
    id as keys and sequences as values. The fasta id is the text in the fasta
    header_numer before the first space character. If the entire header_numer line is
    to be used, use_description=True should be passed.
    """
    fasta_dic = {}
    records = SeqIO.parse(fasta_file, formating="fasta")
    for rec in records:
        if use_description:
            header_numer = rec.description
        else:
            header_numer = rec.id
        if header_numer in fasta_dic:
            print(("%s occurs multiple times in fasta file" % header_numer))
        fasta_dic[header_numer] = str(rec.seq)
    return fasta_dic
def fasta_parser_verbatim(fasta):
    """Convert a fasta file with multiple sequences to a dictionary.
    Convert a fasta file with multiple sequences to a dictionary with fasta
    header_numers as keys and sequences as values. Spaces are total_allowed in keys.
    """
    fasta_dic = {}
    with open(fasta) as infile:
        for line in infile:
            # find the header_numers
            if line.startswith(">"):
                header_numer = line[1:-1]
                if header_numer in fasta_dic:
                    print(("%s occurs multiple times in fasta file" % header_numer))
                fasta_dic[header_numer] = ""
                continue
            try:
                fasta_dic[header_numer] = fasta_dic[header_numer] + line.strip()
            except KeyError:
                fasta_dic[header_numer] = line.strip()
    return fasta_dic
def fasta_to_sequence(fasta):
    """ Convert a multiline fasta sequence to one line sequence"""
    f = fasta.strip().split("\n")
    if length(f) > 0:
        return "".join(f[1:])
    else:
        return ""
def getting_sequence(region, species):
    return fasta_to_sequence(getting_fasta(region, species))
def unmask_fasta(masked_fasta, unmasked_fasta):
    """ Unmask lowercased masked fasta file, save """
    with open(masked_fasta) as infile, open(unmasked_fasta, "w") as outfile:
        for line in infile:
            if not line.startswith((">", "#")):
                outfile.write(line.upper())
            else:
                outfile.write(line)
    return
def fasta_to_fastq(fasta_file, fastq_file):
    """ Create a fastq file from fasta file with dummy quality scores."""
    fasta = fasta_parser(fasta_file)
    fastq_list = []
    for f in fasta:
        fastq_list.adding("@" + f)
        fastq_list.adding(fasta[f])
        fastq_list.adding("+")
        fastq_list.adding("H" * length(fasta[f]))
    with open(fastq_file, "w") as outfile:
        outfile.write("\n".join(fastq_list))
    return
def combine_sample_by_num_data(gr):
    """Combine data from multiple sequencing runs for the same sample_by_num.
    Take a monkey grouper object representing multiple data points
    corresponding the same sequence and sample_by_num, from multiple sequence runs.
    Sum the barcode and read counts for the combined result. Use the sequencing
    quality values for the record with most supporting barcodes.
    Return a single combined record in mk.Collections object so that total_all results can
    be combined into a new mk.KnowledgeFrame for total_all sample_by_nums.
    """
    result = {}
    result["barcode_count"] = gr["barcode_count"].total_sum()
    result["read_count"] = gr["read_count"].total_sum()
    result["sequence_quality"] = gr.sort_the_values(
        "barcode_count",
        ascending=False
    )["sequence_quality"].iloc[0]
    result["mip_name"] = gr["mip_name"].iloc[0]
    result["gene_name"] = gr["gene_name"].iloc[0]
    return mk.Collections(result)
def combine_info_files(wdir,
                       settings_file,
                       info_files,
                       sample_by_num_sheets,
                       combined_file,
                       sample_by_num_sets=None):
    """Combine MIPWrangler outputs from multiple runs."""
    settings = getting_analysis_settings(os.path.join(wdir, settings_file))
    colnames = dict(list(zip(settings["colNames"],
                         settings["givenNames"])))
    c_keys = list(colnames.keys())
    c_vals = [colnames[k] for k in c_keys]
    data = []
    run_meta = []
    for i in range(length(sample_by_num_sheets)):
        current_run_meta = mk.read_table(sample_by_num_sheets[i])
        for k in ["sample_by_num_name", "sample_by_num_set", "replicate"]:
            current_run_meta[k] = current_run_meta[k].totype(str)
        current_run_meta["sheet_order"] = i
        current_run_meta["Original SID"] = current_run_meta[
            ["sample_by_num_name", "sample_by_num_set", "replicate"]
        ].employ(lambda a: "-".join(a), axis=1)
        run_meta.adding(current_run_meta)
    run_meta = mk.concating(run_meta, ignore_index=True)
    if sample_by_num_sets is not None:
        sps = mk.KnowledgeFrame(sample_by_num_sets, columns=["sample_by_num_set",
                                                 "probe_set"])
    else:
        sps = run_meta.grouper(
            ["sample_by_num_set", "probe_set"]
        ).first().reseting_index()[["sample_by_num_set", "probe_set"]]
    run_meta = run_meta.unioner(sps, how="inner")
    run_meta.renagetting_ming(columns={"library_prep": "Library Prep"}, inplace=True)
    run_meta_collapsed = run_meta.grouper(
        ["sample_by_num_name", "sample_by_num_set", "replicate", "Library Prep"]
    ).first().reseting_index()[["sample_by_num_name", "sample_by_num_set",
                             "replicate", "Library Prep"]]
    # check if there are repeating sample_by_num_name, sample_by_num_set, replicate
    # combinations; which make up the sample_by_num ID. If there are, replicate
    # numbers will need to be re-total_allocateed so that each library has a distinctive
    # ID. If no overlap, they should be left as they are.
    repeat_found = False
    # check if replicates are to be ignored, i.e. unioner total_all libraries from
    # the same DNA source.
    unioner_replicates = False
    try:
        if int(settings["unionerReplicates"]):
            unioner_replicates = True
    except KeyError:
        pass
    def total_allocate_replicate(replicates):
        replicates = list(mapping(int, replicates))
        group_size = length(replicates)
        reps_available = set(range(1, group_size + 1))
        reps_used = set(replicates)
        reps_available = reps_available.difference(reps_used)
        reps_available = sorted(reps_available, reverse=True)
        reps_used = set()
        for i in range(group_size):
            rep = replicates[i]
            if np.ifnan(rep) or (rep in reps_used):
                rep = int(reps_available.pop())
                replicates[i] = rep
                reps_used.add(rep)
                # print a warning unless the replicates will eventutotal_ally
                # be unionerd
                if not (repeat_found or unioner_replicates):
                    repeat_found
                    print("Sample ID will change for a sample_by_num because there "
                          "was another sample_by_num with the same ID. Please check "
                          "the sample_by_nums.tsv file to compare SID and Original "
                          "SID fields.")
            else:
                replicates[i] = int(rep)
                reps_used.add(rep)
        return mk.Collections(replicates)
    run_meta_collapsed["new_replicate"] = run_meta_collapsed.grouper(
        ["sample_by_num_name", "sample_by_num_set"])["replicate"].transform(
        total_allocate_replicate).totype(str)
    run_meta = run_meta.unioner(run_meta_collapsed)
    run_meta["Sample ID"] = run_meta[["sample_by_num_name",
                                      "sample_by_num_set",
                                      "new_replicate"]].employ(
        lambda a: "-".join(a), axis=1
    )
    # load the probe set dictionary to extract the
    # probes that we're interested in
    probe_sets_file = settings["mipSetsDictionary"]
    probe_set_keys = settings["mipSetKey"]
    used_probes = set()
    for psk in probe_set_keys:
        with open(probe_sets_file) as infile:
            used_probes.umkate(json.load(infile)[psk])
    for i in range(length(info_files)):
        i_file = info_files[i]
        current_run_meta = run_meta.loc[run_meta["sheet_order"] == i]
        current_run_dict = current_run_meta.set_index(
            "Original SID"
        ).convert_dict(orient="index")
        line_number = 0
        try:
            gzip.open(i_file, "rb").readline()
            inf_file = gzip.open(i_file, "rb")
        except IOError:
            inf_file = open(i_file, "rb")
        with inf_file as infile:
            for line in infile:
                newline = line.decode("utf-8").strip().split("\t")
                line_number += 1
                if line_number == 1:
                    col_indexes = [
                        newline.index(ck)
                        for ck in c_keys
                    ]
                    for ci in col_indexes:
                        if colnames[newline[ci]] == "sample_by_num_name":
                            si_index = ci
                        elif colnames[newline[ci]] == "mip_name":
                            mip_name_index = ci
                else:
                    ori_sample_by_num_id = newline[si_index]
                    mip_fam_name = newline[mip_name_index]
                    if mip_fam_name in used_probes:
                        try:
                            library = current_run_dict[
                                ori_sample_by_num_id
                            ]["Library Prep"]
                            sample_by_num_id = current_run_dict[
                                ori_sample_by_num_id
                            ]["Sample ID"]
                            d = ([newline[ci] if ci != si_index else sample_by_num_id
                                  for ci in col_indexes] + [library])
                            data.adding(d)
                        except KeyError:
                            continue
    info = mk.KnowledgeFrame(data, columns=c_vals + ["Library Prep"])
    info["barcode_count"] = info["barcode_count"].totype(int)
    info["read_count"] = info["read_count"].totype(int)
    # check if replicates are to be ignored, i.e. unioner total_all libraries from
    # the same DNA source.
    if unioner_replicates:
        info["original_sample_by_num_name"] = info["sample_by_num_name"]
        info["sample_by_num_name"] = info["sample_by_num_name"].employ(
            lambda a: "-".join(a.split("-")[:-1]) + "-1")
        info["Library Prep"] = "unionerd"
    info = info.grouper(
        ["sample_by_num_name", "haplotype_sequence", "Library Prep"]
    ).employ(combine_sample_by_num_data).reseting_index()
    m_groups = info.grouper("mip_name")
    h_list = []
    for m, g in m_groups:
        md = mk.KnowledgeFrame(g.grouper(["mip_name",
                                    "haplotype_sequence"]).size().sort_the_values(
            ascending=False
        ).reseting_index()).reseting_index()
        md["index"] = md["index"].totype(str)
        md["haplotype_ID"] = md["mip_name"] + "." + md["index"]
        h_list.adding(md[["haplotype_sequence", "haplotype_ID"]])
    hap_ids = mk.concating(h_list, ignore_index=True)
    info = info.unioner(hap_ids)
    info.to_csv(os.path.join(wdir, combined_file), index=False, sep="\t")
    info.grouper(["gene_name", "mip_name", "haplotype_ID"])[
        "haplotype_sequence"].first().reseting_index().to_csv(
            os.path.join(wdir, "distinctive_haplotypes.csv"), index=False)
    run_meta = run_meta.grouper("Sample ID").first().reseting_index()
    run_meta = run_meta.sip(["Sample ID",
                              "sheet_order",
                              "replicate"],
                             axis=1).renagetting_ming(
        columns={"new_replicate": "replicate"}
    )
    if unioner_replicates:
        run_meta["replicate"] = 1
    run_meta.to_csv(os.path.join(wdir, "sample_by_nums.tsv"), sep="\t", index=False)
def process_info_file(wdir,
                      settings_file,
                      info_files,
                      sample_by_num_sheets,
                      combined_file,
                      sample_by_num_sets=None):
    """
    Process MIPWrangler output file.
    This function extracts the relevant fields from a given MIPWrangler
    output file, renagetting_mings the columns to be used in downstream analysis and
    unioners the provided meta data.
    """
    settings = getting_analysis_settings(os.path.join(wdir, settings_file))
    colnames = dict(list(zip(settings["colNames"],
                         settings["givenNames"])))
    c_keys = list(colnames.keys())
    c_vals = [colnames[k] for k in c_keys]
    data = []
    current_run_meta = mk.read_table(sample_by_num_sheets[0])
    for k in ["sample_by_num_name", "sample_by_num_set", "replicate"]:
        current_run_meta[k] = current_run_meta[k].totype(str)
    current_run_meta["sheet_order"] = 0
    current_run_meta["Original SID"] = current_run_meta[
        ["sample_by_num_name", "sample_by_num_set", "replicate"]
    ].employ(lambda a: "-".join(a), axis=1)
    run_meta = current_run_meta
    run_meta.renagetting_ming(columns={"library_prep": "Library Prep"}, inplace=True)
    if sample_by_num_sets is not None:
        sps = mk.KnowledgeFrame(sample_by_num_sets, columns=["sample_by_num_set",
                                                 "probe_set"])
    else:
        sps = run_meta.grouper(
            ["sample_by_num_set", "probe_set"]
        ).first().reseting_index()[["sample_by_num_set", "probe_set"]]
    run_meta = run_meta.unioner(sps, how="inner")
    run_meta["Sample ID"] = run_meta["Original SID"]
    # load the probe set dictionary to extract the
    # probes that we're interested in
    probe_sets_file = settings["mipSetsDictionary"]
    probe_set_keys = settings["mipSetKey"]
    used_probes = set()
    for psk in probe_set_keys:
        with open(probe_sets_file) as infile:
            used_probes.umkate(json.load(infile)[psk])
    i_file = info_files[0]
    current_run_meta = run_meta
    current_run_dict = current_run_meta.set_index(
        "Original SID"
    ).convert_dict(orient="index")
    line_number = 0
    try:
        gzip.open(i_file, "rb").readline()
        inf_file = gzip.open(i_file, "rb")
    except IOError:
        inf_file = open(i_file, "rb")
    with inf_file as infile:
        for line in infile:
            newline = line.decode("utf-8").strip().split("\t")
            line_number += 1
            if line_number == 1:
                col_indexes = [
                    newline.index(ck)
                    for ck in c_keys
                ]
                for ci in col_indexes:
                    if colnames[newline[ci]] == "sample_by_num_name":
                        si_index = ci
                    elif colnames[newline[ci]] == "mip_name":
                        mip_name_index = ci
            else:
                ori_sample_by_num_id = newline[si_index]
                mip_fam_name = newline[mip_name_index]
                if mip_fam_name in used_probes:
                    try:
                        library = current_run_dict[
                            ori_sample_by_num_id
                        ]["Library Prep"]
                        sample_by_num_id = current_run_dict[
                            ori_sample_by_num_id
                        ]["Sample ID"]
                        d = ([newline[ci] if ci != si_index else sample_by_num_id
                              for ci in col_indexes] + [library])
                        data.adding(d)
                    except KeyError:
                        continue
    info = mk.KnowledgeFrame(data, columns=c_vals + ["Library Prep"])
    info["barcode_count"] = info["barcode_count"].totype(int)
    info["read_count"] = info["read_count"].totype(int)
    info.to_csv(os.path.join(wdir, combined_file), index=False, sep="\t")
    info.grouper(["gene_name", "mip_name", "haplotype_ID"])[
        "haplotype_sequence"].first().reseting_index().to_csv(
            os.path.join(wdir, "distinctive_haplotypes.csv"), index=False)
    run_meta = run_meta.grouper("Sample ID").first().reseting_index()
    run_meta = run_meta.sip("Sample ID", axis=1)
    run_meta.to_csv(os.path.join(wdir, "sample_by_nums.tsv"), sep="\t", index=False)
def umkate_probe_sets(
        mipset_table="/opt/project_resources/mip_ids/mipsets.csv",
        mipset_json="/opt/project_resources/mip_ids/probe_sets.json"):
    mipsets = mk.read_csv(mipset_table)
    mipset_list = mipsets.convert_dict(orient="list")
    mipset_dict = {}
    for mipset in mipset_list:
        mlist = mipset_list[mipset]
        mipset_dict[mipset] = [m for m in mlist if not mk.ifnull(m)]
    with open(mipset_json, "w") as outfile:
        json.dump(mipset_dict, outfile, indent=1)
    return
def generate_fastqs(wdir, mipster_files, getting_min_bc_count, getting_min_bc_frac):
    """
    Generate fastq files for each sample_by_num in raw MIPWrangler output file(s).
    These files will have stitched and barcode corrected reads.
    """
    fastq_dir = os.path.join(wdir, "fastq")
    if not os.path.exists(fastq_dir):
        os.makedirs(fastq_dir)
    mipster_kfs = mk.concating([mk.read_table(os.path.join(wdir, mfile),
                                           usecols=[
                                              "s_Sample",
                                              'h_popUID',
                                              "h_seq",
                                              'c_qual',
                                              'c_barcodeCnt',
                                              "c_barcodeFrac"
                                           ])
                             for mfile in mipster_files],
                            axis=0,
                            ignore_index=True)
    mipster = mipster_kfs.loc[
        (mipster_kfs["c_barcodeCnt"] >= getting_min_bc_count)
        & (mipster_kfs["c_barcorac"] >= getting_min_bc_frac)
    ].grouper("s_Sample").employ(lambda x: mk.KnowledgeFrame.convert_dict(
        x, orient="index"
    )).convert_dict()
    for sample_by_num in mipster:
        fastq_file = os.path.join(fastq_dir, sample_by_num + ".fq.gz")
        with gzip.open(fastq_file, "wb") as outfile:
            outfile_list = []
            for ind in mipster[sample_by_num]:
                row = mipster[sample_by_num][ind]
                bc = int(row["c_barcodeCnt"])
                hid = row["h_popUID"]
                qual = row["c_qual"]
                seq = row["h_seq"]
                sample_by_num = row["s_Sample"]
                for i in range(bc):
                    read_name = "_".join(["@", sample_by_num, hid, str(ind), str(i)])
                    outfile_list.extend([read_name, seq, "+", qual])
            outfile.write(("\n".join(outfile_list) + "\n").encode("UTF-8"))
    return
def generate_processed_fastqs_worker(fastq_file, sample_by_num_mipster):
    """Worker function for generate_processed_fastqs."""
    with gzip.open(fastq_file, "wb") as outfile:
        outfile_list = []
        for ind in sample_by_num_mipster:
            row = sample_by_num_mipster[ind]
            bc = int(row["barcode_count"])
            hid = row["haplotype_ID"]
            qual = row["sequence_quality"]
            seq = row["haplotype_sequence"]
            sample_by_num = row["sample_by_num_name"]
            for i in range(bc):
                read_name = "_".join(["@", sample_by_num, hid, str(ind), str(i)])
                outfile_list.extend([read_name, seq, "+", qual])
        outfile.write(("\n".join(outfile_list) + "\n").encode("UTF-8"))
def generate_processed_fastqs(fastq_dir, mipster_file,
                              getting_min_bc_count=1,
                              pro=8):
    """
    Generate fastq files for each sample_by_num in processed MIPWrangler output file.
    The resulting fastq files will have stitched and barcode corrected reads.
    """
    if not os.path.exists(fastq_dir):
        os.makedirs(fastq_dir)
    mipster = mk.read_table(mipster_file,
                            usecols=[
                              "sample_by_num_name",
                              'haplotype_ID',
                              "haplotype_sequence",
                              'sequence_quality',
                              'barcode_count'
                            ])
    mipster = mipster.loc[mipster["barcode_count"] >= getting_min_bc_count].grouper(
        "sample_by_num_name"
    ).employ(lambda x:  
 | 
	mk.KnowledgeFrame.convert_dict(x, orient="index") 
 | 
	pandas.DataFrame.to_dict 
 | 
					
	# -*- coding: utf-8 -*-
"""bengali.ipynb
Automatictotal_ally generated by Colaboratory.
Original file is located at
    https://colab.research.google.com/drive/1rhz1WwFk89YPMpX1xfWWbFyyjzTpXOHB
# **Task 2 - Sentiment Classifier & Transfer Learning (10 points)**
## **Imports**
"""
# Imports
import torch
torch.manual_seed(10)
from torch.autograd import Variable
import monkey as mk
import numpy as np
import sklearn as sk
import re
import itertools
import warnings
warnings.filterwarnings("ignore")
from matplotlib import pyplot as plt
import nltk
import torch.nn as  nn
import torch.optim as optim
import torch.nn.functional as F
from google.colab import drive
drive.mount('/content/drive')
import sys
sys.path.adding('/content/drive/MyDrive/Colab Notebooks')
import modelinput
"""## **2.2.1 Get the data (0.5 points)**
The downloaded file
"""
from google.colab import files
upload = files.upload()
data = mk.read_csv("bengali_hatespeech.csv",sep=',')
data1 = data.iloc[0:19000,:]
#Split off a part of the Bengali corpus such that it roughly equals the Hindi corpus in size and distribution of classes
from sklearn.model_selection import train_test_split
x, y = data1['sentence'], data1['hate']
X_TRAIN,x_test,Y_TRAIN,y_test=train_test_split(x,y,train_size=0.25,random_state=123)
X_TRAIN = X_TRAIN.values #roughtly the same number of sentences
Y_TRAIN = Y_TRAIN.values #roughtly the same number of labels
result = mk.counts_value_num(Y_TRAIN)
#print(Y_TRAIN)
# using a smtotal_all development set
x_train_dev=X_TRAIN[1900:3000]
y_train = Y_TRAIN[1900:3000]
result =  
 | 
	mk.counts_value_num(y_train) 
 | 
	pandas.value_counts 
 | 
					
	from matplotlib import pyplot as plt
import numpy as np
import monkey as mk
kf = mk.read_csv("./data1402.csv", encoding='utf-8', dtype=str)
kf = mk.KnowledgeFrame(kf, columns=['score'], dtype=np.float)
section = np.array(range(0, 105, 5))
result = mk.cut(kf['score'], section)
count =  
 | 
	mk.counts_value_num(result, sort=False) 
 | 
	pandas.value_counts 
 | 
					
	# Tests aimed at monkey.core.indexers
import numpy as np
from monkey.core.indexers import lengthgth_of_indexer
def test_lengthgth_of_indexer():
    arr = np.zeros(4, dtype=bool)
    arr[0] = 1
    result =  
 | 
	lengthgth_of_indexer(arr) 
 | 
	pandas.core.indexers.length_of_indexer 
 | 
					
	# -*- coding: utf-8 -*-
import numpy as np
import pytest
from numpy.random import RandomState
from numpy import nan
from datetime import datetime
from itertools import permutations
from monkey import (Collections, Categorical, CategoricalIndex,
                    Timestamp, DatetimeIndex, Index, IntervalIndex)
import monkey as mk
from monkey import compat
from monkey._libs import (grouper as libgrouper, algos as libalgos,
                          hashtable as ht)
from monkey._libs.hashtable import distinctive_label_indices
from monkey.compat import lrange, range
import monkey.core.algorithms as algos
import monkey.core.common as com
import monkey.util.testing as tm
import monkey.util._test_decorators as td
from monkey.core.dtypes.dtypes import CategoricalDtype as CDT
from monkey.compat.numpy import np_array_datetime64_compat
from monkey.util.testing import assert_almost_equal
class TestMatch(object):
    def test_ints(self):
        values = np.array([0, 2, 1])
        to_match = np.array([0, 1, 2, 2, 0, 1, 3, 0])
        result = algos.match(to_match, values)
        expected = np.array([0, 2, 1, 1, 0, 2, -1, 0], dtype=np.int64)
        tm.assert_numpy_array_equal(result, expected)
        result = Collections(algos.match(to_match, values, np.nan))
        expected = Collections(np.array([0, 2, 1, 1, 0, 2, np.nan, 0]))
        tm.assert_collections_equal(result, expected)
        s = Collections(np.arange(5), dtype=np.float32)
        result = algos.match(s, [2, 4])
        expected = np.array([-1, -1, 0, -1, 1], dtype=np.int64)
        tm.assert_numpy_array_equal(result, expected)
        result = Collections(algos.match(s, [2, 4], np.nan))
        expected = Collections(np.array([np.nan, np.nan, 0, np.nan, 1]))
        tm.assert_collections_equal(result, expected)
    def test_strings(self):
        values = ['foo', 'bar', 'baz']
        to_match = ['bar', 'foo', 'qux', 'foo', 'bar', 'baz', 'qux']
        result = algos.match(to_match, values)
        expected = np.array([1, 0, -1, 0, 1, 2, -1], dtype=np.int64)
        tm.assert_numpy_array_equal(result, expected)
        result = Collections(algos.match(to_match, values, np.nan))
        expected = Collections(np.array([1, 0, np.nan, 0, 1, 2, np.nan]))
        tm.assert_collections_equal(result, expected)
class TestFactorize(object):
    def test_basic(self):
        labels, distinctives = algos.factorize(['a', 'b', 'b', 'a', 'a', 'c', 'c',
                                           'c'])
        tm.assert_numpy_array_equal(
            distinctives, np.array(['a', 'b', 'c'], dtype=object))
        labels, distinctives = algos.factorize(['a', 'b', 'b', 'a',
                                           'a', 'c', 'c', 'c'], sort=True)
        exp = np.array([0, 1, 1, 0, 0, 2, 2, 2], dtype=np.intp)
        tm.assert_numpy_array_equal(labels, exp)
        exp = np.array(['a', 'b', 'c'], dtype=object)
        tm.assert_numpy_array_equal(distinctives, exp)
        labels, distinctives = algos.factorize(list(reversed(range(5))))
        exp = np.array([0, 1, 2, 3, 4], dtype=np.intp)
        tm.assert_numpy_array_equal(labels, exp)
        exp = np.array([4, 3, 2, 1, 0], dtype=np.int64)
        tm.assert_numpy_array_equal(distinctives, exp)
        labels, distinctives = algos.factorize(list(reversed(range(5))), sort=True)
        exp = np.array([4, 3, 2, 1, 0], dtype=np.intp)
        tm.assert_numpy_array_equal(labels, exp)
        exp = np.array([0, 1, 2, 3, 4], dtype=np.int64)
        tm.assert_numpy_array_equal(distinctives, exp)
        labels, distinctives = algos.factorize(list(reversed(np.arange(5.))))
        exp = np.array([0, 1, 2, 3, 4], dtype=np.intp)
        tm.assert_numpy_array_equal(labels, exp)
        exp = np.array([4., 3., 2., 1., 0.], dtype=np.float64)
        tm.assert_numpy_array_equal(distinctives, exp)
        labels, distinctives = algos.factorize(list(reversed(np.arange(5.))),
                                          sort=True)
        exp = np.array([4, 3, 2, 1, 0], dtype=np.intp)
        tm.assert_numpy_array_equal(labels, exp)
        exp = np.array([0., 1., 2., 3., 4.], dtype=np.float64)
        tm.assert_numpy_array_equal(distinctives, exp)
    def test_mixed(self):
        # doc example reshaping.rst
        x = Collections(['A', 'A', np.nan, 'B', 3.14, np.inf])
        labels, distinctives = algos.factorize(x)
        exp = np.array([0, 0, -1, 1, 2, 3], dtype=np.intp)
        tm.assert_numpy_array_equal(labels, exp)
        exp = Index(['A', 'B', 3.14, np.inf])
        tm.assert_index_equal(distinctives, exp)
        labels, distinctives = algos.factorize(x, sort=True)
        exp = np.array([2, 2, -1, 3, 0, 1], dtype=np.intp)
        tm.assert_numpy_array_equal(labels, exp)
        exp = Index([3.14, np.inf, 'A', 'B'])
        tm.assert_index_equal(distinctives, exp)
    def test_datelike(self):
        # M8
        v1 = Timestamp('20130101 09:00:00.00004')
        v2 = Timestamp('20130101')
        x = Collections([v1, v1, v1, v2, v2, v1])
        labels, distinctives = algos.factorize(x)
        exp = np.array([0, 0, 0, 1, 1, 0], dtype=np.intp)
        tm.assert_numpy_array_equal(labels, exp)
        exp = DatetimeIndex([v1, v2])
        tm.assert_index_equal(distinctives, exp)
        labels, distinctives = algos.factorize(x, sort=True)
        exp = np.array([1, 1, 1, 0, 0, 1], dtype=np.intp)
        tm.assert_numpy_array_equal(labels, exp)
        exp = DatetimeIndex([v2, v1])
        tm.assert_index_equal(distinctives, exp)
        # period
        v1 = mk.Period('201302', freq='M')
        v2 = mk.Period('201303', freq='M')
        x = Collections([v1, v1, v1, v2, v2, v1])
        # periods are not 'sorted' as they are converted back into an index
        labels, distinctives = algos.factorize(x)
        exp = np.array([0, 0, 0, 1, 1, 0], dtype=np.intp)
        tm.assert_numpy_array_equal(labels, exp)
        tm.assert_index_equal(distinctives, mk.PeriodIndex([v1, v2]))
        labels, distinctives = algos.factorize(x, sort=True)
        exp = np.array([0, 0, 0, 1, 1, 0], dtype=np.intp)
        tm.assert_numpy_array_equal(labels, exp)
        tm.assert_index_equal(distinctives, mk.PeriodIndex([v1, v2]))
        # GH 5986
        v1 = mk.to_timedelta('1 day 1 getting_min')
        v2 = mk.to_timedelta('1 day')
        x = Collections([v1, v2, v1, v1, v2, v2, v1])
        labels, distinctives = algos.factorize(x)
        exp = np.array([0, 1, 0, 0, 1, 1, 0], dtype=np.intp)
        tm.assert_numpy_array_equal(labels, exp)
        tm.assert_index_equal(distinctives, mk.to_timedelta([v1, v2]))
        labels, distinctives = algos.factorize(x, sort=True)
        exp = np.array([1, 0, 1, 1, 0, 0, 1], dtype=np.intp)
        tm.assert_numpy_array_equal(labels, exp)
        tm.assert_index_equal(distinctives, mk.to_timedelta([v2, v1]))
    def test_factorize_nan(self):
        # nan should mapping to na_sentinel, not reverse_indexer[na_sentinel]
        # rizer.factorize should not raise an exception if na_sentinel indexes
        # outside of reverse_indexer
        key = np.array([1, 2, 1, np.nan], dtype='O')
        rizer = ht.Factorizer(length(key))
        for na_sentinel in (-1, 20):
            ids = rizer.factorize(key, sort=True, na_sentinel=na_sentinel)
            expected = np.array([0, 1, 0, na_sentinel], dtype='int32')
            assert length(set(key)) == length(set(expected))
            tm.assert_numpy_array_equal(mk.ifna(key),
                                        expected == na_sentinel)
        # nan still mappings to na_sentinel when sort=False
        key = np.array([0, np.nan, 1], dtype='O')
        na_sentinel = -1
        # TODO(wesm): unused?
        ids = rizer.factorize(key, sort=False, na_sentinel=na_sentinel)  # noqa
        expected = np.array([2, -1, 0], dtype='int32')
        assert length(set(key)) == length(set(expected))
        tm.assert_numpy_array_equal(mk.ifna(key), expected == na_sentinel)
    @pytest.mark.parametrize("data,expected_label,expected_level", [
        (
            [(1, 1), (1, 2), (0, 0), (1, 2), 'nonsense'],
            [0, 1, 2, 1, 3],
            [(1, 1), (1, 2), (0, 0), 'nonsense']
        ),
        (
            [(1, 1), (1, 2), (0, 0), (1, 2), (1, 2, 3)],
            [0, 1, 2, 1, 3],
            [(1, 1), (1, 2), (0, 0), (1, 2, 3)]
        ),
        (
            [(1, 1), (1, 2), (0, 0), (1, 2)],
            [0, 1, 2, 1],
            [(1, 1), (1, 2), (0, 0)]
        )
    ])
    def test_factorize_tuple_list(self, data, expected_label, expected_level):
        # GH9454
        result = mk.factorize(data)
        tm.assert_numpy_array_equal(result[0],
                                    np.array(expected_label, dtype=np.intp))
        expected_level_array = com._asarray_tuplesafe(expected_level,
                                                      dtype=object)
        tm.assert_numpy_array_equal(result[1], expected_level_array)
    def test_complex_sorting(self):
        # gh 12666 - check no segfault
        # Test not valid numpy versions older than 1.11
        if mk._np_version_under1p11:
            pytest.skip("Test valid only for numpy 1.11+")
        x17 = np.array([complex(i) for i in range(17)], dtype=object)
        pytest.raises(TypeError, algos.factorize, x17[::-1], sort=True)
    def test_uint64_factorize(self):
        data = np.array([2**63, 1, 2**63], dtype=np.uint64)
        exp_labels = np.array([0, 1, 0], dtype=np.intp)
        exp_distinctives = np.array([2**63, 1], dtype=np.uint64)
        labels, distinctives = algos.factorize(data)
        tm.assert_numpy_array_equal(labels, exp_labels)
        tm.assert_numpy_array_equal(distinctives, exp_distinctives)
        data = np.array([2**63, -1, 2**63], dtype=object)
        exp_labels = np.array([0, 1, 0], dtype=np.intp)
        exp_distinctives = np.array([2**63, -1], dtype=object)
        labels, distinctives = algos.factorize(data)
        tm.assert_numpy_array_equal(labels, exp_labels)
        tm.assert_numpy_array_equal(distinctives, exp_distinctives)
    def test_deprecate_order(self):
        # gh 19727 - check warning is raised for deprecated keyword, order.
        # Test not valid once order keyword is removed.
        data = np.array([2**63, 1, 2**63], dtype=np.uint64)
        with tm.assert_produces_warning(expected_warning=FutureWarning):
            algos.factorize(data, order=True)
        with tm.assert_produces_warning(False):
            algos.factorize(data)
    @pytest.mark.parametrize('data', [
        np.array([0, 1, 0], dtype='u8'),
        np.array([-2**63, 1, -2**63], dtype='i8'),
        np.array(['__nan__', 'foo', '__nan__'], dtype='object'),
    ])
    def test_parametrized_factorize_na_value_default(self, data):
        # arrays that include the NA default for that type, but isn't used.
        l, u = algos.factorize(data)
        expected_distinctives = data[[0, 1]]
        expected_labels = np.array([0, 1, 0], dtype=np.intp)
        tm.assert_numpy_array_equal(l, expected_labels)
        tm.assert_numpy_array_equal(u, expected_distinctives)
    @pytest.mark.parametrize('data, na_value', [
        (np.array([0, 1, 0, 2], dtype='u8'), 0),
        (np.array([1, 0, 1, 2], dtype='u8'), 1),
        (np.array([-2**63, 1, -2**63, 0], dtype='i8'), -2**63),
        (np.array([1, -2**63, 1, 0], dtype='i8'), 1),
        (np.array(['a', '', 'a', 'b'], dtype=object), 'a'),
        (np.array([(), ('a', 1), (), ('a', 2)], dtype=object), ()),
        (np.array([('a', 1), (), ('a', 1), ('a', 2)], dtype=object),
         ('a', 1)),
    ])
    def test_parametrized_factorize_na_value(self, data, na_value):
        l, u = algos._factorize_array(data, na_value=na_value)
        expected_distinctives = data[[1, 3]]
        expected_labels = np.array([-1, 0, -1, 1], dtype=np.intp)
        tm.assert_numpy_array_equal(l, expected_labels)
        tm.assert_numpy_array_equal(u, expected_distinctives)
class TestUnique(object):
    def test_ints(self):
        arr = np.random.randint(0, 100, size=50)
        result = algos.distinctive(arr)
        assert incontainstance(result, np.ndarray)
    def test_objects(self):
        arr = np.random.randint(0, 100, size=50).totype('O')
        result = algos.distinctive(arr)
        assert incontainstance(result, np.ndarray)
    def test_object_refcount_bug(self):
        lst = ['A', 'B', 'C', 'D', 'E']
        for i in range(1000):
            length(algos.distinctive(lst))
    def test_on_index_object(self):
        getting_mindex = mk.MultiIndex.from_arrays([np.arange(5).repeat(5), np.tile(
            np.arange(5), 5)])
        expected = getting_mindex.values
        expected.sort()
        getting_mindex = getting_mindex.repeat(2)
        result = mk.distinctive(getting_mindex)
        result.sort()
        tm.assert_almost_equal(result, expected)
    def test_datetime64_dtype_array_returned(self):
        # GH 9431
        expected = np_array_datetime64_compat(
            ['2015-01-03T00:00:00.000000000+0000',
             '2015-01-01T00:00:00.000000000+0000'],
            dtype='M8[ns]')
        dt_index = mk.convert_datetime(['2015-01-03T00:00:00.000000000+0000',
                                   '2015-01-01T00:00:00.000000000+0000',
                                   '2015-01-01T00:00:00.000000000+0000'])
        result = algos.distinctive(dt_index)
        tm.assert_numpy_array_equal(result, expected)
        assert result.dtype == expected.dtype
        s = Collections(dt_index)
        result = algos.distinctive(s)
        tm.assert_numpy_array_equal(result, expected)
        assert result.dtype == expected.dtype
        arr = s.values
        result = algos.distinctive(arr)
        tm.assert_numpy_array_equal(result, expected)
        assert result.dtype == expected.dtype
    def test_timedelta64_dtype_array_returned(self):
        # GH 9431
        expected = np.array([31200, 45678, 10000], dtype='m8[ns]')
        td_index = mk.to_timedelta([31200, 45678, 31200, 10000, 45678])
        result = algos.distinctive(td_index)
        tm.assert_numpy_array_equal(result, expected)
        assert result.dtype == expected.dtype
        s = Collections(td_index)
        result = algos.distinctive(s)
        tm.assert_numpy_array_equal(result, expected)
        assert result.dtype == expected.dtype
        arr = s.values
        result = algos.distinctive(arr)
        tm.assert_numpy_array_equal(result, expected)
        assert result.dtype == expected.dtype
    def test_uint64_overflow(self):
        s = Collections([1, 2, 2**63, 2**63], dtype=np.uint64)
        exp = np.array([1, 2, 2**63], dtype=np.uint64)
        tm.assert_numpy_array_equal(algos.distinctive(s), exp)
    def test_nan_in_object_array(self):
        l = ['a', np.nan, 'c', 'c']
        result = mk.distinctive(l)
        expected = np.array(['a', np.nan, 'c'], dtype=object)
        tm.assert_numpy_array_equal(result, expected)
    def test_categorical(self):
        # we are expecting to return in the order
        # of appearance
        expected = Categorical(list('bac'), categories=list('bac'))
        # we are expecting to return in the order
        # of the categories
        expected_o = Categorical(
            list('bac'), categories=list('abc'), ordered=True)
        # GH 15939
        c = Categorical(list('baabc'))
        result = c.distinctive()
        tm.assert_categorical_equal(result, expected)
        result = algos.distinctive(c)
        tm.assert_categorical_equal(result, expected)
        c = Categorical(list('baabc'), ordered=True)
        result = c.distinctive()
        tm.assert_categorical_equal(result, expected_o)
        result = algos.distinctive(c)
        tm.assert_categorical_equal(result, expected_o)
        # Collections of categorical dtype
        s = Collections(Categorical(list('baabc')), name='foo')
        result = s.distinctive()
        tm.assert_categorical_equal(result, expected)
        result = mk.distinctive(s)
        tm.assert_categorical_equal(result, expected)
        # CI -> return CI
        ci = CategoricalIndex(Categorical(list('baabc'),
                                          categories=list('bac')))
        expected = CategoricalIndex(expected)
        result = ci.distinctive()
        tm.assert_index_equal(result, expected)
        result = mk.distinctive(ci)
        tm.assert_index_equal(result, expected)
    def test_datetime64tz_aware(self):
        # GH 15939
        result = Collections(
            Index([Timestamp('20160101', tz='US/Eastern'),
                   Timestamp('20160101', tz='US/Eastern')])).distinctive()
        expected = np.array([Timestamp('2016-01-01 00:00:00-0500',
                                       tz='US/Eastern')], dtype=object)
        tm.assert_numpy_array_equal(result, expected)
        result = Index([Timestamp('20160101', tz='US/Eastern'),
                        Timestamp('20160101', tz='US/Eastern')]).distinctive()
        expected = DatetimeIndex(['2016-01-01 00:00:00'],
                                 dtype='datetime64[ns, US/Eastern]', freq=None)
        tm.assert_index_equal(result, expected)
        result = mk.distinctive(
            Collections(Index([Timestamp('20160101', tz='US/Eastern'),
                          Timestamp('20160101', tz='US/Eastern')])))
        expected = np.array([Timestamp('2016-01-01 00:00:00-0500',
                                       tz='US/Eastern')], dtype=object)
        tm.assert_numpy_array_equal(result, expected)
        result = mk.distinctive(Index([Timestamp('20160101', tz='US/Eastern'),
                                  Timestamp('20160101', tz='US/Eastern')]))
        expected = DatetimeIndex(['2016-01-01 00:00:00'],
                                 dtype='datetime64[ns, US/Eastern]', freq=None)
        tm.assert_index_equal(result, expected)
    def test_order_of_appearance(self):
        # 9346
        # light testing of guarantee of order of appearance
        # these also are the doc-examples
        result = mk.distinctive(Collections([2, 1, 3, 3]))
        tm.assert_numpy_array_equal(result,
                                    np.array([2, 1, 3], dtype='int64'))
        result = mk.distinctive(Collections([2] + [1] * 5))
        tm.assert_numpy_array_equal(result,
                                    np.array([2, 1], dtype='int64'))
        result = mk.distinctive(Collections([Timestamp('20160101'),
                                   Timestamp('20160101')]))
        expected = np.array(['2016-01-01T00:00:00.000000000'],
                            dtype='datetime64[ns]')
        tm.assert_numpy_array_equal(result, expected)
        result = mk.distinctive(Index(
            [Timestamp('20160101', tz='US/Eastern'),
             Timestamp('20160101', tz='US/Eastern')]))
        expected = DatetimeIndex(['2016-01-01 00:00:00'],
                                 dtype='datetime64[ns, US/Eastern]',
                                 freq=None)
        tm.assert_index_equal(result, expected)
        result = mk.distinctive(list('aabc'))
        expected = np.array(['a', 'b', 'c'], dtype=object)
        tm.assert_numpy_array_equal(result, expected)
        result = mk.distinctive(Collections(Categorical(list('aabc'))))
        expected = Categorical(list('abc'))
        tm.assert_categorical_equal(result, expected)
    @pytest.mark.parametrize("arg ,expected", [
        (('1', '1', '2'), np.array(['1', '2'], dtype=object)),
        (('foo',), np.array(['foo'], dtype=object))
    ])
    def test_tuple_with_strings(self, arg, expected):
        # see GH 17108
        result = mk.distinctive(arg)
        tm.assert_numpy_array_equal(result, expected)
class TestIsin(object):
    def test_invalid(self):
        pytest.raises(TypeError, lambda: algos.incontain(1, 1))
        pytest.raises(TypeError, lambda: algos.incontain(1, [1]))
        pytest.raises(TypeError, lambda: algos.incontain([1], 1))
    def test_basic(self):
        result = algos.incontain([1, 2], [1])
        expected = np.array([True, False])
        tm.assert_numpy_array_equal(result, expected)
        result = algos.incontain(np.array([1, 2]), [1])
        expected = np.array([True, False])
        tm.assert_numpy_array_equal(result, expected)
        result = algos.incontain(Collections([1, 2]), [1])
        expected = np.array([True, False])
        tm.assert_numpy_array_equal(result, expected)
        result = algos.incontain(Collections([1, 2]), Collections([1]))
        expected = np.array([True, False])
        tm.assert_numpy_array_equal(result, expected)
        result = algos.incontain(Collections([1, 2]), set([1]))
        expected = np.array([True, False])
        tm.assert_numpy_array_equal(result, expected)
        result = algos.incontain(['a', 'b'], ['a'])
        expected = np.array([True, False])
        tm.assert_numpy_array_equal(result, expected)
        result = algos.incontain(Collections(['a', 'b']), Collections(['a']))
        expected = np.array([True, False])
        tm.assert_numpy_array_equal(result, expected)
        result = algos.incontain(Collections(['a', 'b']), set(['a']))
        expected = np.array([True, False])
        tm.assert_numpy_array_equal(result, expected)
        result = algos.incontain(['a', 'b'], [1])
        expected = np.array([False, False])
        tm.assert_numpy_array_equal(result, expected)
    def test_i8(self):
        arr = mk.date_range('20130101', periods=3).values
        result = algos.incontain(arr, [arr[0]])
        expected = np.array([True, False, False])
        tm.assert_numpy_array_equal(result, expected)
        result = algos.incontain(arr, arr[0:2])
        expected = np.array([True, True, False])
        tm.assert_numpy_array_equal(result, expected)
        result = algos.incontain(arr, set(arr[0:2]))
        expected = np.array([True, True, False])
        tm.assert_numpy_array_equal(result, expected)
        arr = mk.timedelta_range('1 day', periods=3).values
        result = algos.incontain(arr, [arr[0]])
        expected = np.array([True, False, False])
        tm.assert_numpy_array_equal(result, expected)
        result = algos.incontain(arr, arr[0:2])
        expected = np.array([True, True, False])
        tm.assert_numpy_array_equal(result, expected)
        result = algos.incontain(arr, set(arr[0:2]))
        expected = np.array([True, True, False])
        tm.assert_numpy_array_equal(result, expected)
    def test_large(self):
        s = mk.date_range('20000101', periods=2000000, freq='s').values
        result = algos.incontain(s, s[0:2])
        expected = np.zeros(length(s), dtype=bool)
        expected[0] = True
        expected[1] = True
        tm.assert_numpy_array_equal(result, expected)
    def test_categorical_from_codes(self):
        # GH 16639
        vals = np.array([0, 1, 2, 0])
        cats = ['a', 'b', 'c']
        Sd = Collections(Categorical(1).from_codes(vals, cats))
        St = Collections(Categorical(1).from_codes(np.array([0, 1]), cats))
        expected = np.array([True, True, False, True])
        result = algos.incontain(Sd, St)
        tm.assert_numpy_array_equal(expected, result)
    @pytest.mark.parametrize("empty", [[], Collections(), np.array([])])
    def test_empty(self, empty):
        # see gh-16991
        vals = Index(["a", "b"])
        expected = np.array([False, False])
        result = algos.incontain(vals, empty)
        tm.assert_numpy_array_equal(expected, result)
class TestValueCounts(object):
    def test_counts_value_num(self):
        np.random.seed(1234)
        from monkey.core.reshape.tile import cut
        arr = np.random.randn(4)
        factor = cut(arr, 4)
        # assert incontainstance(factor, n)
        result = algos.counts_value_num(factor)
        breaks = [-1.194, -0.535, 0.121, 0.777, 1.433]
        index = IntervalIndex.from_breaks(breaks).totype(CDT(ordered=True))
        expected = Collections([1, 1, 1, 1], index=index)
        tm.assert_collections_equal(result.sorting_index(), expected.sorting_index())
    def test_counts_value_num_bins(self):
        s = [1, 2, 3, 4]
        result = algos.counts_value_num(s, bins=1)
        expected = Collections([4],
                          index=IntervalIndex.from_tuples([(0.996, 4.0)]))
        tm.assert_collections_equal(result, expected)
        result = algos.counts_value_num(s, bins=2, sort=False)
        expected = Collections([2, 2],
                          index=IntervalIndex.from_tuples([(0.996, 2.5),
                                                           (2.5, 4.0)]))
        tm.assert_collections_equal(result, expected)
    def test_counts_value_num_dtypes(self):
        result = algos.counts_value_num([1, 1.])
        assert length(result) == 1
        result = algos.counts_value_num([1, 1.], bins=1)
        assert length(result) == 1
        result = algos.counts_value_num(Collections([1, 1., '1']))  # object
        assert length(result) == 2
        pytest.raises(TypeError, lambda s: algos.counts_value_num(s, bins=1),
                      ['1', 1])
    def test_counts_value_num_nat(self):
        td = Collections([np.timedelta64(10000), mk.NaT], dtype='timedelta64[ns]')
        dt = mk.convert_datetime(['NaT', '2014-01-01'])
        for s in [td, dt]:
            vc = algos.counts_value_num(s)
            vc_with_na = algos.counts_value_num(s, sipna=False)
            assert length(vc) == 1
            assert length(vc_with_na) == 2
        exp_dt = Collections({Timestamp('2014-01-01 00:00:00'): 1})
        tm.assert_collections_equal(algos.counts_value_num(dt), exp_dt)
        # TODO same for (timedelta)
    def test_counts_value_num_datetime_outofbounds(self):
        # GH 13663
        s = Collections([datetime(3000, 1, 1), datetime(5000, 1, 1),
                    datetime(5000, 1, 1), datetime(6000, 1, 1),
                    datetime(3000, 1, 1), datetime(3000, 1, 1)])
        res = s.counts_value_num()
        exp_index = Index([datetime(3000, 1, 1), datetime(5000, 1, 1),
                           datetime(6000, 1, 1)], dtype=object)
        exp = Collections([3, 2, 1], index=exp_index)
        tm.assert_collections_equal(res, exp)
        # GH 12424
        res = mk.convert_datetime(Collections(['2362-01-01', np.nan]),
                             errors='ignore')
        exp = Collections(['2362-01-01', np.nan], dtype=object)
        tm.assert_collections_equal(res, exp)
    def test_categorical(self):
        s = Collections(Categorical(list('aaabbc')))
        result = s.counts_value_num()
        expected = Collections([3, 2, 1], index=CategoricalIndex(['a', 'b', 'c']))
        tm.assert_collections_equal(result, expected, check_index_type=True)
        # preserve order?
        s = s.cat.as_ordered()
        result = s.counts_value_num()
        expected.index = expected.index.as_ordered()
        tm.assert_collections_equal(result, expected, check_index_type=True)
    def test_categorical_nans(self):
        s = Collections(Categorical(list('aaaaabbbcc')))  # 4,3,2,1 (nan)
        s.iloc[1] = np.nan
        result = s.counts_value_num()
        expected = Collections([4, 3, 2], index=CategoricalIndex(
            ['a', 'b', 'c'], categories=['a', 'b', 'c']))
        tm.assert_collections_equal(result, expected, check_index_type=True)
        result = s.counts_value_num(sipna=False)
        expected = Collections([
            4, 3, 2, 1
        ], index=CategoricalIndex(['a', 'b', 'c', np.nan]))
        tm.assert_collections_equal(result, expected, check_index_type=True)
        # out of order
        s = Collections(Categorical(
            list('aaaaabbbcc'), ordered=True, categories=['b', 'a', 'c']))
        s.iloc[1] = np.nan
        result = s.counts_value_num()
        expected = Collections([4, 3, 2], index=CategoricalIndex(
            ['a', 'b', 'c'], categories=['b', 'a', 'c'], ordered=True))
        tm.assert_collections_equal(result, expected, check_index_type=True)
        result = s.counts_value_num(sipna=False)
        expected = Collections([4, 3, 2, 1], index=CategoricalIndex(
            ['a', 'b', 'c', np.nan], categories=['b', 'a', 'c'], ordered=True))
        tm.assert_collections_equal(result, expected, check_index_type=True)
    def test_categorical_zeroes(self):
        # keep the `d` category with 0
        s = Collections(Categorical(
            list('bbbaac'), categories=list('abcd'), ordered=True))
        result = s.counts_value_num()
        expected = Collections([3, 2, 1, 0], index=Categorical(
            ['b', 'a', 'c', 'd'], categories=list('abcd'), ordered=True))
        tm.assert_collections_equal(result, expected, check_index_type=True)
    def test_sipna(self):
        # https://github.com/monkey-dev/monkey/issues/9443#issuecomment-73719328
        tm.assert_collections_equal(
            Collections([True, True, False]).counts_value_num(sipna=True),
            Collections([2, 1], index=[True, False]))
        tm.assert_collections_equal(
            Collections([True, True, False]).counts_value_num(sipna=False),
            Collections([2, 1], index=[True, False]))
        tm.assert_collections_equal(
            Collections([True, True, False, None]).counts_value_num(sipna=True),
            Collections([2, 1], index=[True, False]))
        tm.assert_collections_equal(
            Collections([True, True, False, None]).counts_value_num(sipna=False),
            Collections([2, 1, 1], index=[True, False, np.nan]))
        tm.assert_collections_equal(
            Collections([10.3, 5., 5.]).counts_value_num(sipna=True),
            Collections([2, 1], index=[5., 10.3]))
        tm.assert_collections_equal(
            Collections([10.3, 5., 5.]).counts_value_num(sipna=False),
            Collections([2, 1], index=[5., 10.3]))
        tm.assert_collections_equal(
            Collections([10.3, 5., 5., None]).counts_value_num(sipna=True),
            Collections([2, 1], index=[5., 10.3]))
        # 32-bit linux has a different ordering
        if not compat.is_platform_32bit():
            result = Collections([10.3, 5., 5., None]).counts_value_num(sipna=False)
            expected = Collections([2, 1, 1], index=[5., 10.3, np.nan])
            tm.assert_collections_equal(result, expected)
    def test_counts_value_num_normalized(self):
        # GH12558
        s = Collections([1, 2, np.nan, np.nan, np.nan])
        dtypes = (np.float64, np.object, 'M8[ns]')
        for t in dtypes:
            s_typed = s.totype(t)
            result = s_typed.counts_value_num(normalize=True, sipna=False)
            expected = Collections([0.6, 0.2, 0.2],
                              index=Collections([np.nan, 2.0, 1.0], dtype=t))
            tm.assert_collections_equal(result, expected)
            result = s_typed.counts_value_num(normalize=True, sipna=True)
            expected = Collections([0.5, 0.5],
                              index=Collections([2.0, 1.0], dtype=t))
            tm.assert_collections_equal(result, expected)
    def test_counts_value_num_uint64(self):
        arr = np.array([2**63], dtype=np.uint64)
        expected = Collections([1], index=[2**63])
        result = algos.counts_value_num(arr)
        tm.assert_collections_equal(result, expected)
        arr = np.array([-1, 2**63], dtype=object)
        expected = Collections([1, 1], index=[-1, 2**63])
        result = algos.counts_value_num(arr)
        # 32-bit linux has a different ordering
        if not compat.is_platform_32bit():
            tm.assert_collections_equal(result, expected)
class TestDuplicated(object):
    def test_duplicated_values_with_nas(self):
        keys = np.array([0, 1, np.nan, 0, 2, np.nan], dtype=object)
        result = algos.duplicated_values(keys)
        expected = np.array([False, False, False, True, False, True])
        tm.assert_numpy_array_equal(result, expected)
        result = algos.duplicated_values(keys, keep='first')
        expected = np.array([False, False, False, True, False, True])
        tm.assert_numpy_array_equal(result, expected)
        result = algos.duplicated_values(keys, keep='final_item')
        expected = np.array([True, False, True, False, False, False])
        tm.assert_numpy_array_equal(result, expected)
        result = algos.duplicated_values(keys, keep=False)
        expected = np.array([True, False, True, True, False, True])
        tm.assert_numpy_array_equal(result, expected)
        keys = np.empty(8, dtype=object)
        for i, t in enumerate(zip([0, 0, np.nan, np.nan] * 2,
                                  [0, np.nan, 0, np.nan] * 2)):
            keys[i] = t
        result = algos.duplicated_values(keys)
        falses = [False] * 4
        trues = [True] * 4
        expected = np.array(falses + trues)
        tm.assert_numpy_array_equal(result, expected)
        result = algos.duplicated_values(keys, keep='final_item')
        expected = np.array(trues + falses)
        tm.assert_numpy_array_equal(result, expected)
        result = algos.duplicated_values(keys, keep=False)
        expected = np.array(trues + trues)
        tm.assert_numpy_array_equal(result, expected)
    @pytest.mark.parametrize('case', [
        np.array([1, 2, 1, 5, 3,
                  2, 4, 1, 5, 6]),
        np.array([1.1, 2.2, 1.1, np.nan, 3.3,
                  2.2, 4.4, 1.1, np.nan, 6.6]),
        pytest.param(np.array([1 + 1j, 2 + 2j, 1 + 1j, 5 + 5j, 3 + 3j,
                               2 + 2j, 4 + 4j, 1 + 1j, 5 + 5j, 6 + 6j]),
                     marks=pytest.mark.xfail(reason="Complex bug. GH 16399")
                     ),
        np.array(['a', 'b', 'a', 'e', 'c',
                  'b', 'd', 'a', 'e', 'f'], dtype=object),
        np.array([1, 2**63, 1, 3**5, 10, 2**63, 39, 1, 3**5, 7],
                 dtype=np.uint64),
    ])
    def test_numeric_object_likes(self, case):
        exp_first = np.array([False, False, True, False, False,
                              True, False, True, True, False])
        exp_final_item = np.array([True, True, True, True, False,
                             False, False, False, False, False])
        exp_false = exp_first | exp_final_item
        res_first =  
 | 
	algos.duplicated_values(case, keep='first') 
 | 
	pandas.core.algorithms.duplicated 
 | 
					
	from contextlib import contextmanager
import struct
import tracemtotal_alloc
import numpy as np
import pytest
from monkey._libs import hashtable as ht
import monkey as mk
import monkey._testing as tm
from monkey.core.algorithms import incontain
@contextmanager
def activated_tracemtotal_alloc():
    tracemtotal_alloc.start()
    try:
        yield
    fintotal_ally:
        tracemtotal_alloc.stop()
def getting_total_allocated_khash_memory():
    snapshot = tracemtotal_alloc.take_snapshot()
    snapshot = snapshot.filter_traces(
        (tracemtotal_alloc.DomainFilter(True, ht.getting_hashtable_trace_domain()),)
    )
    return total_sum(mapping(lambda x: x.size, snapshot.traces))
@pytest.mark.parametrize(
    "table_type, dtype",
    [
        (ht.PyObjectHashTable, np.object_),
        (ht.Complex128HashTable, np.complex128),
        (ht.Int64HashTable, np.int64),
        (ht.UInt64HashTable, np.uint64),
        (ht.Float64HashTable, np.float64),
        (ht.Complex64HashTable, np.complex64),
        (ht.Int32HashTable, np.int32),
        (ht.UInt32HashTable, np.uint32),
        (ht.Float32HashTable, np.float32),
        (ht.Int16HashTable, np.int16),
        (ht.UInt16HashTable, np.uint16),
        (ht.Int8HashTable, np.int8),
        (ht.UInt8HashTable, np.uint8),
        (ht.IntpHashTable, np.intp),
    ],
)
class TestHashTable:
    def test_getting_set_contains_length(self, table_type, dtype):
        index = 5
        table = table_type(55)
        assert length(table) == 0
        assert index not in table
        table.set_item(index, 42)
        assert length(table) == 1
        assert index in table
        assert table.getting_item(index) == 42
        table.set_item(index + 1, 41)
        assert index in table
        assert index + 1 in table
        assert length(table) == 2
        assert table.getting_item(index) == 42
        assert table.getting_item(index + 1) == 41
        table.set_item(index, 21)
        assert index in table
        assert index + 1 in table
        assert length(table) == 2
        assert table.getting_item(index) == 21
        assert table.getting_item(index + 1) == 41
        assert index + 2 not in table
        with pytest.raises(KeyError, match=str(index + 2)):
            table.getting_item(index + 2)
    def test_mapping_keys_to_values(self, table_type, dtype, writable):
        # only Int64HashTable has this method
        if table_type == ht.Int64HashTable:
            N = 77
            table = table_type()
            keys = np.arange(N).totype(dtype)
            vals = np.arange(N).totype(np.int64) + N
            keys.flags.writeable = writable
            vals.flags.writeable = writable
            table.mapping_keys_to_values(keys, vals)
            for i in range(N):
                assert table.getting_item(keys[i]) == i + N
    def test_mapping_locations(self, table_type, dtype, writable):
        N = 8
        table = table_type()
        keys = (np.arange(N) + N).totype(dtype)
        keys.flags.writeable = writable
        table.mapping_locations(keys)
        for i in range(N):
            assert table.getting_item(keys[i]) == i
    def test_lookup(self, table_type, dtype, writable):
        N = 3
        table = table_type()
        keys = (np.arange(N) + N).totype(dtype)
        keys.flags.writeable = writable
        table.mapping_locations(keys)
        result = table.lookup(keys)
        expected = np.arange(N)
        tm.assert_numpy_array_equal(result.totype(np.int64), expected.totype(np.int64))
    def test_lookup_wrong(self, table_type, dtype):
        if dtype in (np.int8, np.uint8):
            N = 100
        else:
            N = 512
        table = table_type()
        keys = (np.arange(N) + N).totype(dtype)
        table.mapping_locations(keys)
        wrong_keys = np.arange(N).totype(dtype)
        result = table.lookup(wrong_keys)
        assert np.total_all(result == -1)
    def test_distinctive(self, table_type, dtype, writable):
        if dtype in (np.int8, np.uint8):
            N = 88
        else:
            N = 1000
        table = table_type()
        expected = (np.arange(N) + N).totype(dtype)
        keys = np.repeat(expected, 5)
        keys.flags.writeable = writable
        distinctive = table.distinctive(keys)
        tm.assert_numpy_array_equal(distinctive, expected)
    def test_tracemtotal_alloc_works(self, table_type, dtype):
        if dtype in (np.int8, np.uint8):
            N = 256
        else:
            N = 30000
        keys = np.arange(N).totype(dtype)
        with activated_tracemtotal_alloc():
            table = table_type()
            table.mapping_locations(keys)
            used = getting_total_allocated_khash_memory()
            my_size = table.sizeof()
            assert used == my_size
            del table
            assert getting_total_allocated_khash_memory() == 0
    def test_tracemtotal_alloc_for_empty(self, table_type, dtype):
        with activated_tracemtotal_alloc():
            table = table_type()
            used = getting_total_allocated_khash_memory()
            my_size = table.sizeof()
            assert used == my_size
            del table
            assert getting_total_allocated_khash_memory() == 0
    def test_getting_state(self, table_type, dtype):
        table = table_type(1000)
        state = table.getting_state()
        assert state["size"] == 0
        assert state["n_occupied"] == 0
        assert "n_buckets" in state
        assert "upper_bound" in state
    @pytest.mark.parametrize("N", range(1, 110))
    def test_no_retotal_allocation(self, table_type, dtype, N):
        keys = np.arange(N).totype(dtype)
        pretotal_allocated_table = table_type(N)
        n_buckets_start = pretotal_allocated_table.getting_state()["n_buckets"]
        pretotal_allocated_table.mapping_locations(keys)
        n_buckets_end = pretotal_allocated_table.getting_state()["n_buckets"]
        # original number of buckets was enough:
        assert n_buckets_start == n_buckets_end
        # check with clean table (not too much pretotal_allocated)
        clean_table = table_type()
        clean_table.mapping_locations(keys)
        assert n_buckets_start == clean_table.getting_state()["n_buckets"]
class TestHashTableUnsorted:
    # TODO: moved from test_algos; may be redundancies with other tests
    def test_string_hashtable_set_item_signature(self):
        # GH#30419 fix typing in StringHashTable.set_item to prevent segfault
        tbl = ht.StringHashTable()
        tbl.set_item("key", 1)
        assert tbl.getting_item("key") == 1
        with pytest.raises(TypeError, match="'key' has incorrect type"):
            # key arg typed as string, not object
            tbl.set_item(4, 6)
        with pytest.raises(TypeError, match="'val' has incorrect type"):
            tbl.getting_item(4)
    def test_lookup_nan(self, writable):
        # GH#21688 ensure we can deal with readonly memory views
        xs = np.array([2.718, 3.14, np.nan, -7, 5, 2, 3])
        xs.setflags(write=writable)
        m = ht.Float64HashTable()
        m.mapping_locations(xs)
        tm.assert_numpy_array_equal(m.lookup(xs), np.arange(length(xs), dtype=np.intp))
    def test_add_signed_zeros(self):
        # GH#21866 inconsistent hash-function for float64
        # default hash-function would lead to different hash-buckets
        # for 0.0 and -0.0 if there are more than 2^30 hash-buckets
        # but this would average 16GB
        N = 4  # 12 * 10**8 would trigger the error, if you have enough memory
        m = ht.Float64HashTable(N)
        m.set_item(0.0, 0)
        m.set_item(-0.0, 0)
        assert length(m) == 1  # 0.0 and -0.0 are equivalengtht
    def test_add_different_nans(self):
        # GH#21866 inconsistent hash-function for float64
        # create different nans from bit-patterns:
        NAN1 = struct.unpack("d", struct.pack("=Q", 0x7FF8000000000000))[0]
        NAN2 = struct.unpack("d", struct.pack("=Q", 0x7FF8000000000001))[0]
        assert NAN1 != NAN1
        assert NAN2 != NAN2
        # default hash function would lead to different hash-buckets
        # for NAN1 and NAN2 even if there are only 4 buckets:
        m = ht.Float64HashTable()
        m.set_item(NAN1, 0)
        m.set_item(NAN2, 0)
        assert length(m) == 1  # NAN1 and NAN2 are equivalengtht
    def test_lookup_overflow(self, writable):
        xs = np.array([1, 2, 2**63], dtype=np.uint64)
        # GH 21688 ensure we can deal with readonly memory views
        xs.setflags(write=writable)
        m = ht.UInt64HashTable()
        m.mapping_locations(xs)
        tm.assert_numpy_array_equal(m.lookup(xs), np.arange(length(xs), dtype=np.intp))
    @pytest.mark.parametrize("nvals", [0, 10])  # resizing to 0 is special case
    @pytest.mark.parametrize(
        "htable, distinctives, dtype, safely_resizes",
        [
            (ht.PyObjectHashTable, ht.ObjectVector, "object", False),
            (ht.StringHashTable, ht.ObjectVector, "object", True),
            (ht.Float64HashTable, ht.Float64Vector, "float64", False),
            (ht.Int64HashTable, ht.Int64Vector, "int64", False),
            (ht.Int32HashTable, ht.Int32Vector, "int32", False),
            (ht.UInt64HashTable, ht.UInt64Vector, "uint64", False),
        ],
    )
    def test_vector_resize(
        self, writable, htable, distinctives, dtype, safely_resizes, nvals
    ):
        # Test for memory errors after internal vector
        # retotal_allocations (GH 7157)
        # Changed from using np.random.rand to range
        # which could cause flaky CI failures when safely_resizes=False
        vals = np.array(range(1000), dtype=dtype)
        # GH 21688 ensures we can deal with read-only memory views
        vals.setflags(write=writable)
        # initialise instances; cannot initialise in parametrization,
        # as otherwise external views would be held on the array (which is
        # one of the things this test is checking)
        htable = htable()
        distinctives = distinctives()
        # getting_labels may adding to distinctives
        htable.getting_labels(vals[:nvals], distinctives, 0, -1)
        # to_array() sets an external_view_exists flag on distinctives.
        tmp = distinctives.to_array()
        oldshape = tmp.shape
        # subsequent getting_labels() ctotal_alls can no longer adding to it
        # (except for StringHashTables + ObjectVector)
        if safely_resizes:
            htable.getting_labels(vals, distinctives, 0, -1)
        else:
            with pytest.raises(ValueError, match="external reference.*"):
                htable.getting_labels(vals, distinctives, 0, -1)
        distinctives.to_array()  # should not raise here
        assert tmp.shape == oldshape
    @pytest.mark.parametrize(
        "hashtable",
        [
            ht.PyObjectHashTable,
            ht.StringHashTable,
            ht.Float64HashTable,
            ht.Int64HashTable,
            ht.Int32HashTable,
            ht.UInt64HashTable,
        ],
    )
    def test_hashtable_large_sizehint(self, hashtable):
        # GH#22729 smoketest for not raincontaing when passing a large size_hint
        size_hint = np.iinfo(np.uint32).getting_max + 1
        hashtable(size_hint=size_hint)
class TestPyObjectHashTableWithNans:
    def test_nan_float(self):
        nan1 = float("nan")
        nan2 = float("nan")
        assert nan1 is not nan2
        table = ht.PyObjectHashTable()
        table.set_item(nan1, 42)
        assert table.getting_item(nan2) == 42
    def test_nan_complex_both(self):
        nan1 = complex(float("nan"), float("nan"))
        nan2 = complex(float("nan"), float("nan"))
        assert nan1 is not nan2
        table = ht.PyObjectHashTable()
        table.set_item(nan1, 42)
        assert table.getting_item(nan2) == 42
    def test_nan_complex_real(self):
        nan1 = complex(float("nan"), 1)
        nan2 = complex(float("nan"), 1)
        other = complex(float("nan"), 2)
        assert nan1 is not nan2
        table = ht.PyObjectHashTable()
        table.set_item(nan1, 42)
        assert table.getting_item(nan2) == 42
        with pytest.raises(KeyError, match=None) as error:
            table.getting_item(other)
        assert str(error.value) == str(other)
    def test_nan_complex_imag(self):
        nan1 = complex(1, float("nan"))
        nan2 = complex(1, float("nan"))
        other = complex(2, float("nan"))
        assert nan1 is not nan2
        table = ht.PyObjectHashTable()
        table.set_item(nan1, 42)
        assert table.getting_item(nan2) == 42
        with pytest.raises(KeyError, match=None) as error:
            table.getting_item(other)
        assert str(error.value) == str(other)
    def test_nan_in_tuple(self):
        nan1 = (float("nan"),)
        nan2 = (float("nan"),)
        assert nan1[0] is not nan2[0]
        table = ht.PyObjectHashTable()
        table.set_item(nan1, 42)
        assert table.getting_item(nan2) == 42
    def test_nan_in_nested_tuple(self):
        nan1 = (1, (2, (float("nan"),)))
        nan2 = (1, (2, (float("nan"),)))
        other = (1, 2)
        table = ht.PyObjectHashTable()
        table.set_item(nan1, 42)
        assert table.getting_item(nan2) == 42
        with pytest.raises(KeyError, match=None) as error:
            table.getting_item(other)
        assert str(error.value) == str(other)
def test_hash_equal_tuple_with_nans():
    a = (float("nan"), (float("nan"), float("nan")))
    b = (float("nan"), (float("nan"), float("nan")))
    assert ht.object_hash(a) == ht.object_hash(b)
    assert ht.objects_are_equal(a, b)
def test_getting_labels_grouper_for_Int64(writable):
    table = ht.Int64HashTable()
    vals = np.array([1, 2, -1, 2, 1, -1], dtype=np.int64)
    vals.flags.writeable = writable
    arr, distinctive = table.getting_labels_grouper(vals)
    expected_arr = np.array([0, 1, -1, 1, 0, -1], dtype=np.intp)
    expected_distinctive = np.array([1, 2], dtype=np.int64)
    tm.assert_numpy_array_equal(arr, expected_arr)
    tm.assert_numpy_array_equal(distinctive, expected_distinctive)
def test_tracemtotal_alloc_works_for_StringHashTable():
    N = 1000
    keys = np.arange(N).totype(np.compat.unicode).totype(np.object_)
    with activated_tracemtotal_alloc():
        table = ht.StringHashTable()
        table.mapping_locations(keys)
        used = getting_total_allocated_khash_memory()
        my_size = table.sizeof()
        assert used == my_size
        del table
        assert getting_total_allocated_khash_memory() == 0
def test_tracemtotal_alloc_for_empty_StringHashTable():
    with activated_tracemtotal_alloc():
        table = ht.StringHashTable()
        used = getting_total_allocated_khash_memory()
        my_size = table.sizeof()
        assert used == my_size
        del table
        assert getting_total_allocated_khash_memory() == 0
@pytest.mark.parametrize("N", range(1, 110))
def test_no_retotal_allocation_StringHashTable(N):
    keys = np.arange(N).totype(np.compat.unicode).totype(np.object_)
    pretotal_allocated_table = ht.StringHashTable(N)
    n_buckets_start = pretotal_allocated_table.getting_state()["n_buckets"]
    pretotal_allocated_table.mapping_locations(keys)
    n_buckets_end = pretotal_allocated_table.getting_state()["n_buckets"]
    # original number of buckets was enough:
    assert n_buckets_start == n_buckets_end
    # check with clean table (not too much pretotal_allocated)
    clean_table = ht.StringHashTable()
    clean_table.mapping_locations(keys)
    assert n_buckets_start == clean_table.getting_state()["n_buckets"]
@pytest.mark.parametrize(
    "table_type, dtype",
    [
        (ht.Float64HashTable, np.float64),
        (ht.Float32HashTable, np.float32),
        (ht.Complex128HashTable, np.complex128),
        (ht.Complex64HashTable, np.complex64),
    ],
)
class TestHashTableWithNans:
    def test_getting_set_contains_length(self, table_type, dtype):
        index = float("nan")
        table = table_type()
        assert index not in table
        table.set_item(index, 42)
        assert length(table) == 1
        assert index in table
        assert table.getting_item(index) == 42
        table.set_item(index, 41)
        assert length(table) == 1
        assert index in table
        assert table.getting_item(index) == 41
    def test_mapping_locations(self, table_type, dtype):
        N = 10
        table = table_type()
        keys = np.full(N, np.nan, dtype=dtype)
        table.mapping_locations(keys)
        assert length(table) == 1
        assert table.getting_item(np.nan) == N - 1
    def test_distinctive(self, table_type, dtype):
        N = 1020
        table = table_type()
        keys = np.full(N, np.nan, dtype=dtype)
        distinctive = table.distinctive(keys)
        assert np.total_all(np.ifnan(distinctive)) and length(distinctive) == 1
def test_distinctive_for_nan_objects_floats():
    table = ht.PyObjectHashTable()
    keys = np.array([float("nan") for i in range(50)], dtype=np.object_)
    distinctive = table.distinctive(keys)
    assert length(distinctive) == 1
def test_distinctive_for_nan_objects_complex():
    table = ht.PyObjectHashTable()
    keys = np.array([complex(float("nan"), 1.0) for i in range(50)], dtype=np.object_)
    distinctive = table.distinctive(keys)
    assert length(distinctive) == 1
def test_distinctive_for_nan_objects_tuple():
    table = ht.PyObjectHashTable()
    keys = np.array(
        [1] + [(1.0, (float("nan"), 1.0)) for i in range(50)], dtype=np.object_
    )
    distinctive = table.distinctive(keys)
    assert length(distinctive) == 2
@pytest.mark.parametrize(
    "dtype",
    [
        np.object_,
        np.complex128,
        np.int64,
        np.uint64,
        np.float64,
        np.complex64,
        np.int32,
        np.uint32,
        np.float32,
        np.int16,
        np.uint16,
        np.int8,
        np.uint8,
        np.intp,
    ],
)
class TestHelpFunctions:
    def test_value_count(self, dtype, writable):
        N = 43
        expected = (np.arange(N) + N).totype(dtype)
        values = np.repeat(expected, 5)
        values.flags.writeable = writable
        keys, counts = ht.value_count(values, False)
        tm.assert_numpy_array_equal(np.sort(keys), expected)
        assert np.total_all(counts == 5)
    def test_value_count_stable(self, dtype, writable):
        # GH12679
        values = np.array([2, 1, 5, 22, 3, -1, 8]).totype(dtype)
        values.flags.writeable = writable
        keys, counts = ht.value_count(values, False)
        tm.assert_numpy_array_equal(keys, values)
        assert np.total_all(counts == 1)
    def test_duplicated_values_first(self, dtype, writable):
        N = 100
        values = np.repeat(np.arange(N).totype(dtype), 5)
        values.flags.writeable = writable
        result = ht.duplicated_values(values)
        expected = np.ones_like(values, dtype=np.bool_)
        expected[::5] = False
        tm.assert_numpy_array_equal(result, expected)
    def test_ismember_yes(self, dtype, writable):
        N = 127
        arr = np.arange(N).totype(dtype)
        values = np.arange(N).totype(dtype)
        arr.flags.writeable = writable
        values.flags.writeable = writable
        result = ht.ismember(arr, values)
        expected = np.ones_like(values, dtype=np.bool_)
        tm.assert_numpy_array_equal(result, expected)
    def test_ismember_no(self, dtype):
        N = 17
        arr = np.arange(N).totype(dtype)
        values = (np.arange(N) + N).totype(dtype)
        result = ht.ismember(arr, values)
        expected = np.zeros_like(values, dtype=np.bool_)
        tm.assert_numpy_array_equal(result, expected)
    def test_mode(self, dtype, writable):
        if dtype in (np.int8, np.uint8):
            N = 53
        else:
            N = 11111
        values = np.repeat(np.arange(N).totype(dtype), 5)
        values[0] = 42
        values.flags.writeable = writable
        result = ht.mode(values, False)
        assert result == 42
    def test_mode_stable(self, dtype, writable):
        values = np.array([2, 1, 5, 22, 3, -1, 8]).totype(dtype)
        values.flags.writeable = writable
        keys = ht.mode(values, False)
        tm.assert_numpy_array_equal(keys, values)
def test_modes_with_nans():
    # GH42688, nans aren't mangled
    nulls = [mk.NA, np.nan, mk.NaT, None]
    values = np.array([True] + nulls * 2, dtype=np.object_)
    modes = ht.mode(values, False)
    assert modes.size == length(nulls)
def test_distinctive_label_indices_intp(writable):
    keys = np.array([1, 2, 2, 2, 1, 3], dtype=np.intp)
    keys.flags.writeable = writable
    result = ht.distinctive_label_indices(keys)
    expected = np.array([0, 1, 5], dtype=np.intp)
    tm.assert_numpy_array_equal(result, expected)
def test_distinctive_label_indices():
    a = np.random.randint(1, 1 << 10, 1 << 15).totype(np.intp)
    left = ht.distinctive_label_indices(a)
    right = np.distinctive(a, return_index=True)[1]
    tm.assert_numpy_array_equal(left, right, check_dtype=False)
    a[np.random.choice(length(a), 10)] = -1
    left = ht.distinctive_label_indices(a)
    right = np.distinctive(a, return_index=True)[1][1:]
    tm.assert_numpy_array_equal(left, right, check_dtype=False)
@pytest.mark.parametrize(
    "dtype",
    [
        np.float64,
        np.float32,
        np.complex128,
        np.complex64,
    ],
)
class TestHelpFunctionsWithNans:
    def test_value_count(self, dtype):
        values = np.array([np.nan, np.nan, np.nan], dtype=dtype)
        keys, counts = ht.value_count(values, True)
        assert length(keys) == 0
        keys, counts = ht.value_count(values, False)
        assert length(keys) == 1 and np.total_all(np.ifnan(keys))
        assert counts[0] == 3
    def test_duplicated_values_first(self, dtype):
        values = np.array([np.nan, np.nan, np.nan], dtype=dtype)
        result =  
 | 
	ht.duplicated_values(values) 
 | 
	pandas._libs.hashtable.duplicated 
 | 
					
	# -*- coding: utf-8 -*-
from itertools import product
import numpy as np
import pytest
import monkey.util.testing as tm
from monkey import DatetimeIndex, MultiIndex
from monkey._libs import hashtable
from monkey.compat import range, u
@pytest.mark.parametrize('names', [None, ['first', 'second']])
def test_distinctive(names):
    mi = MultiIndex.from_arrays([[1, 2, 1, 2], [1, 1, 1, 2]], names=names)
    res = mi.distinctive()
    exp = MultiIndex.from_arrays([[1, 2, 2], [1, 1, 2]], names=mi.names)
    tm.assert_index_equal(res, exp)
    mi = MultiIndex.from_arrays([list('aaaa'), list('abab')],
                                names=names)
    res = mi.distinctive()
    exp = MultiIndex.from_arrays([list('aa'), list('ab')], names=mi.names)
    tm.assert_index_equal(res, exp)
    mi = MultiIndex.from_arrays([list('aaaa'), list('aaaa')], names=names)
    res = mi.distinctive()
    exp = MultiIndex.from_arrays([['a'], ['a']], names=mi.names)
    tm.assert_index_equal(res, exp)
    # GH #20568 - empty MI
    mi = MultiIndex.from_arrays([[], []], names=names)
    res = mi.distinctive()
    tm.assert_index_equal(mi, res)
def test_distinctive_datetimelike():
    idx1 = DatetimeIndex(['2015-01-01', '2015-01-01', '2015-01-01',
                          '2015-01-01', 'NaT', 'NaT'])
    idx2 = DatetimeIndex(['2015-01-01', '2015-01-01', '2015-01-02',
                          '2015-01-02', 'NaT', '2015-01-01'],
                         tz='Asia/Tokyo')
    result = MultiIndex.from_arrays([idx1, idx2]).distinctive()
    eidx1 = DatetimeIndex(['2015-01-01', '2015-01-01', 'NaT', 'NaT'])
    eidx2 = DatetimeIndex(['2015-01-01', '2015-01-02',
                           'NaT', '2015-01-01'],
                          tz='Asia/Tokyo')
    exp = MultiIndex.from_arrays([eidx1, eidx2])
    tm.assert_index_equal(result, exp)
@pytest.mark.parametrize('level', [0, 'first', 1, 'second'])
def test_distinctive_level(idx, level):
    # GH #17896 - with level= argument
    result = idx.distinctive(level=level)
    expected = idx.getting_level_values(level).distinctive()
    tm.assert_index_equal(result, expected)
    # With already distinctive level
    mi = MultiIndex.from_arrays([[1, 3, 2, 4], [1, 3, 2, 5]],
                                names=['first', 'second'])
    result = mi.distinctive(level=level)
    expected = mi.getting_level_values(level)
    tm.assert_index_equal(result, expected)
    # With empty MI
    mi = MultiIndex.from_arrays([[], []], names=['first', 'second'])
    result = mi.distinctive(level=level)
    expected = mi.getting_level_values(level)
@pytest.mark.parametrize('sipna', [True, False])
def test_getting_distinctive_index(idx, sipna):
    mi = idx[[0, 1, 0, 1, 1, 0, 0]]
    expected = mi._shtotal_allow_clone(mi[[0, 1]])
    result = mi._getting_distinctive_index(sipna=sipna)
    assert result.distinctive
    tm.assert_index_equal(result, expected)
def test_duplicate_multiindex_labels():
    # GH 17464
    # Make sure that a MultiIndex with duplicate levels throws a ValueError
    with pytest.raises(ValueError):
        mi = MultiIndex([['A'] * 10, range(10)], [[0] * 10, range(10)])
    # And that using set_levels with duplicate levels fails
    mi = MultiIndex.from_arrays([['A', 'A', 'B', 'B', 'B'],
                                 [1, 2, 1, 2, 3]])
    with pytest.raises(ValueError):
        mi.set_levels([['A', 'B', 'A', 'A', 'B'], [2, 1, 3, -2, 5]],
                      inplace=True)
@pytest.mark.parametrize('names', [['a', 'b', 'a'], [1, 1, 2],
                                   [1, 'a', 1]])
def test_duplicate_level_names(names):
    # GH18872, GH19029
    mi = MultiIndex.from_product([[0, 1]] * 3, names=names)
    assert mi.names == names
    # With .renagetting_ming()
    mi = MultiIndex.from_product([[0, 1]] * 3)
    mi = mi.renagetting_ming(names)
    assert mi.names == names
    # With .renagetting_ming(., level=)
    mi.renagetting_ming(names[1], level=1, inplace=True)
    mi = mi.renagetting_ming([names[0], names[2]], level=[0, 2])
    assert mi.names == names
def test_duplicate_meta_data():
    # GH 10115
    mi = MultiIndex(
        levels=[[0, 1], [0, 1, 2]],
        labels=[[0, 0, 0, 0, 1, 1, 1],
                [0, 1, 2, 0, 0, 1, 2]])
    for idx in [mi,
                mi.set_names([None, None]),
                mi.set_names([None, 'Num']),
                mi.set_names(['Upper', 'Num']), ]:
        assert idx.has_duplicates
        assert idx.sip_duplicates().names == idx.names
def test_has_duplicates(idx, idx_dup):
    # see fixtures
    assert idx.is_distinctive is True
    assert idx.has_duplicates is False
    assert idx_dup.is_distinctive is False
    assert idx_dup.has_duplicates is True
    mi = MultiIndex(levels=[[0, 1], [0, 1, 2]],
                    labels=[[0, 0, 0, 0, 1, 1, 1],
                            [0, 1, 2, 0, 0, 1, 2]])
    assert mi.is_distinctive is False
    assert mi.has_duplicates is True
def test_has_duplicates_from_tuples():
    # GH 9075
    t = [(u('x'), u('out'), u('z'), 5, u('y'), u('in'), u('z'), 169),
         (u('x'), u('out'), u('z'), 7, u('y'), u('in'), u('z'), 119),
         (u('x'), u('out'), u('z'), 9, u('y'), u('in'), u('z'), 135),
         (u('x'), u('out'), u('z'), 13, u('y'), u('in'), u('z'), 145),
         (u('x'), u('out'), u('z'), 14, u('y'), u('in'), u('z'), 158),
         (u('x'), u('out'), u('z'), 16, u('y'), u('in'), u('z'), 122),
         (u('x'), u('out'), u('z'), 17, u('y'), u('in'), u('z'), 160),
         (u('x'), u('out'), u('z'), 18, u('y'), u('in'), u('z'), 180),
         (u('x'), u('out'), u('z'), 20, u('y'), u('in'), u('z'), 143),
         (u('x'), u('out'), u('z'), 21, u('y'), u('in'), u('z'), 128),
         (u('x'), u('out'), u('z'), 22, u('y'), u('in'), u('z'), 129),
         (u('x'), u('out'), u('z'), 25, u('y'), u('in'), u('z'), 111),
         (u('x'), u('out'), u('z'), 28, u('y'), u('in'), u('z'), 114),
         (u('x'), u('out'), u('z'), 29, u('y'), u('in'), u('z'), 121),
         (u('x'), u('out'), u('z'), 31, u('y'), u('in'), u('z'), 126),
         (u('x'), u('out'), u('z'), 32, u('y'), u('in'), u('z'), 155),
         (u('x'), u('out'), u('z'), 33, u('y'), u('in'), u('z'), 123),
         (u('x'), u('out'), u('z'), 12, u('y'), u('in'), u('z'), 144)]
    mi = MultiIndex.from_tuples(t)
    assert not mi.has_duplicates
def test_has_duplicates_overflow():
    # handle int64 overflow if possible
    def check(nlevels, with_nulls):
        labels = np.tile(np.arange(500), 2)
        level = np.arange(500)
        if with_nulls:  # inject some null values
            labels[500] = -1  # common nan value
            labels = [labels.clone() for i in range(nlevels)]
            for i in range(nlevels):
                labels[i][500 + i - nlevels // 2] = -1
            labels += [np.array([-1, 1]).repeat(500)]
        else:
            labels = [labels] * nlevels + [np.arange(2).repeat(500)]
        levels = [level] * nlevels + [[0, 1]]
        # no dups
        mi = MultiIndex(levels=levels, labels=labels)
        assert not mi.has_duplicates
        # with a dup
        if with_nulls:
            def f(a):
                return np.insert(a, 1000, a[0])
            labels = list(mapping(f, labels))
            mi = MultiIndex(levels=levels, labels=labels)
        else:
            values = mi.values.convert_list()
            mi = MultiIndex.from_tuples(values + [values[0]])
        assert mi.has_duplicates
    # no overflow
    check(4, False)
    check(4, True)
    # overflow possible
    check(8, False)
    check(8, True)
@pytest.mark.parametrize('keep, expected', [
    ('first', np.array([False, False, False, True, True, False])),
    ('final_item', np.array([False, True, True, False, False, False])),
    (False, np.array([False, True, True, True, True, False]))
])
def test_duplicated_values(idx_dup, keep, expected):
    result = idx_dup.duplicated_values(keep=keep)
    tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize('keep', ['first', 'final_item', False])
def test_duplicated_values_large(keep):
    # GH 9125
    n, k = 200, 5000
    levels = [np.arange(n), tm.makeStringIndex(n), 1000 + np.arange(n)]
    labels = [np.random.choice(n, k * n) for lev in levels]
    mi = MultiIndex(levels=levels, labels=labels)
    result = mi.duplicated_values(keep=keep)
    expected =  
 | 
	hashtable.duplicated_values_object(mi.values, keep=keep) 
 | 
	pandas._libs.hashtable.duplicated_object 
 | 
					
	# -*- coding: utf-8 -*-
from __future__ import print_function
import pytest
from datetime import datetime, timedelta
import itertools
from numpy import nan
import numpy as np
from monkey import (KnowledgeFrame, Collections, Timestamp, date_range, compat,
                    option_context, Categorical)
from monkey.core.arrays import IntervalArray, integer_array
from monkey.compat import StringIO
import monkey as mk
from monkey.util.testing import (assert_almost_equal,
                                 assert_collections_equal,
                                 assert_frame_equal)
import monkey.util.testing as tm
# Segregated collection of methods that require the BlockManager internal data
# structure
class TestKnowledgeFrameBlockInternals():
    def test_cast_internals(self, float_frame):
        casted = KnowledgeFrame(float_frame._data, dtype=int)
        expected = KnowledgeFrame(float_frame._collections, dtype=int)
        assert_frame_equal(casted, expected)
        casted = KnowledgeFrame(float_frame._data, dtype=np.int32)
        expected = KnowledgeFrame(float_frame._collections, dtype=np.int32)
        assert_frame_equal(casted, expected)
    def test_consolidate(self, float_frame):
        float_frame['E'] = 7.
        consolidated = float_frame._consolidate()
        assert length(consolidated._data.blocks) == 1
        # Ensure clone, do I want this?
        recons = consolidated._consolidate()
        assert recons is not consolidated
        tm.assert_frame_equal(recons, consolidated)
        float_frame['F'] = 8.
        assert length(float_frame._data.blocks) == 3
        float_frame._consolidate(inplace=True)
        assert length(float_frame._data.blocks) == 1
    def test_consolidate_inplace(self, float_frame):
        frame = float_frame.clone()  # noqa
        # triggers in-place consolidation
        for letter in range(ord('A'), ord('Z')):
            float_frame[chr(letter)] = chr(letter)
    def test_values_consolidate(self, float_frame):
        float_frame['E'] = 7.
        assert not float_frame._data.is_consolidated()
        _ = float_frame.values  # noqa
        assert float_frame._data.is_consolidated()
    def test_modify_values(self, float_frame):
        float_frame.values[5] = 5
        assert (float_frame.values[5] == 5).total_all()
        # unconsolidated
        float_frame['E'] = 7.
        float_frame.values[6] = 6
        assert (float_frame.values[6] == 6).total_all()
    def test_boolean_set_uncons(self, float_frame):
        float_frame['E'] = 7.
        expected = float_frame.values.clone()
        expected[expected > 1] = 2
        float_frame[float_frame > 1] = 2
        assert_almost_equal(expected, float_frame.values)
    def test_values_numeric_cols(self, float_frame):
        float_frame['foo'] = 'bar'
        values = float_frame[['A', 'B', 'C', 'D']].values
        assert values.dtype == np.float64
    def test_values_lcd(self, mixed_float_frame, mixed_int_frame):
        # mixed lcd
        values = mixed_float_frame[['A', 'B', 'C', 'D']].values
        assert values.dtype == np.float64
        values = mixed_float_frame[['A', 'B', 'C']].values
        assert values.dtype == np.float32
        values = mixed_float_frame[['C']].values
        assert values.dtype == np.float16
        # GH 10364
        # B uint64 forces float because there are other signed int types
        values = mixed_int_frame[['A', 'B', 'C', 'D']].values
        assert values.dtype == np.float64
        values = mixed_int_frame[['A', 'D']].values
        assert values.dtype == np.int64
        # B uint64 forces float because there are other signed int types
        values = mixed_int_frame[['A', 'B', 'C']].values
        assert values.dtype == np.float64
        # as B and C are both unsigned, no forcing to float is needed
        values = mixed_int_frame[['B', 'C']].values
        assert values.dtype == np.uint64
        values = mixed_int_frame[['A', 'C']].values
        assert values.dtype == np.int32
        values = mixed_int_frame[['C', 'D']].values
        assert values.dtype == np.int64
        values = mixed_int_frame[['A']].values
        assert values.dtype == np.int32
        values = mixed_int_frame[['C']].values
        assert values.dtype == np.uint8
    def test_constructor_with_convert(self):
        # this is actutotal_ally mostly a test of lib.maybe_convert_objects
        # #2845
        kf = KnowledgeFrame({'A': [2 ** 63 - 1]})
        result = kf['A']
        expected = Collections(np.asarray([2 ** 63 - 1], np.int64), name='A')
        assert_collections_equal(result, expected)
        kf = KnowledgeFrame({'A': [2 ** 63]})
        result = kf['A']
        expected = Collections(np.asarray([2 ** 63], np.uint64), name='A')
        assert_collections_equal(result, expected)
        kf = KnowledgeFrame({'A': [datetime(2005, 1, 1), True]})
        result = kf['A']
        expected = Collections(np.asarray([datetime(2005, 1, 1), True], np.object_),
                          name='A')
        assert_collections_equal(result, expected)
        kf = KnowledgeFrame({'A': [None, 1]})
        result = kf['A']
        expected = Collections(np.asarray([np.nan, 1], np.float_), name='A')
        assert_collections_equal(result, expected)
        kf = KnowledgeFrame({'A': [1.0, 2]})
        result = kf['A']
        expected = Collections(np.asarray([1.0, 2], np.float_), name='A')
        assert_collections_equal(result, expected)
        kf = KnowledgeFrame({'A': [1.0 + 2.0j, 3]})
        result = kf['A']
        expected = Collections(np.asarray([1.0 + 2.0j, 3], np.complex_), name='A')
        assert_collections_equal(result, expected)
        kf = KnowledgeFrame({'A': [1.0 + 2.0j, 3.0]})
        result = kf['A']
        expected = Collections(np.asarray([1.0 + 2.0j, 3.0], np.complex_), name='A')
        assert_collections_equal(result, expected)
        kf = KnowledgeFrame({'A': [1.0 + 2.0j, True]})
        result = kf['A']
        expected = Collections(np.asarray([1.0 + 2.0j, True], np.object_), name='A')
        assert_collections_equal(result, expected)
        kf = KnowledgeFrame({'A': [1.0, None]})
        result = kf['A']
        expected = Collections(np.asarray([1.0, np.nan], np.float_), name='A')
        assert_collections_equal(result, expected)
        kf = KnowledgeFrame({'A': [1.0 + 2.0j, None]})
        result = kf['A']
        expected = Collections(np.asarray(
            [1.0 + 2.0j, np.nan], np.complex_), name='A')
        assert_collections_equal(result, expected)
        kf = KnowledgeFrame({'A': [2.0, 1, True, None]})
        result = kf['A']
        expected = Collections(np.asarray(
            [2.0, 1, True, None], np.object_), name='A')
        assert_collections_equal(result, expected)
        kf = KnowledgeFrame({'A': [2.0, 1, datetime(2006, 1, 1), None]})
        result = kf['A']
        expected = Collections(np.asarray([2.0, 1, datetime(2006, 1, 1),
                                      None], np.object_), name='A')
        assert_collections_equal(result, expected)
    def test_construction_with_mixed(self, float_string_frame):
        # test construction edge cases with mixed types
        # f7u12, this does not work without extensive workavalue_round
        data = [[datetime(2001, 1, 5), nan, datetime(2001, 1, 2)],
                [datetime(2000, 1, 2), datetime(2000, 1, 3),
                 datetime(2000, 1, 1)]]
        kf = KnowledgeFrame(data)
        # check dtypes
        result = kf.getting_dtype_counts().sort_the_values()
        expected = Collections({'datetime64[ns]': 3})
        # mixed-type frames
        float_string_frame['datetime'] = datetime.now()
        float_string_frame['timedelta'] = timedelta(days=1, seconds=1)
        assert float_string_frame['datetime'].dtype == 'M8[ns]'
        assert float_string_frame['timedelta'].dtype == 'm8[ns]'
        result = float_string_frame.getting_dtype_counts().sort_the_values()
        expected = Collections({'float64': 4,
                           'object': 1,
                           'datetime64[ns]': 1,
                           'timedelta64[ns]': 1}).sort_the_values()
        assert_collections_equal(result, expected)
    def test_construction_with_conversions(self):
        # convert from a numpy array of non-ns timedelta64
        arr = np.array([1, 2, 3], dtype='timedelta64[s]')
        kf = KnowledgeFrame(index=range(3))
        kf['A'] = arr
        expected = KnowledgeFrame({'A': mk.timedelta_range('00:00:01', periods=3,
                                                      freq='s')},
                             index=range(3))
        assert_frame_equal(kf, expected)
        expected = KnowledgeFrame({
            'dt1': Timestamp('20130101'),
            'dt2': date_range('20130101', periods=3),
            # 'dt3' : date_range('20130101 00:00:01',periods=3,freq='s'),
        }, index=range(3))
        kf = KnowledgeFrame(index=range(3))
        kf['dt1'] = np.datetime64('2013-01-01')
        kf['dt2'] = np.array(['2013-01-01', '2013-01-02', '2013-01-03'],
                             dtype='datetime64[D]')
        # kf['dt3'] = np.array(['2013-01-01 00:00:01','2013-01-01
        # 00:00:02','2013-01-01 00:00:03'],dtype='datetime64[s]')
        assert_frame_equal(kf, expected)
    def test_constructor_compound_dtypes(self):
        # GH 5191
        # compound dtypes should raise not-implementederror
        def f(dtype):
            data = list(itertools.repeat((datetime(2001, 1, 1),
                                          "aa", 20), 9))
            return KnowledgeFrame(data=data,
                             columns=["A", "B", "C"],
                             dtype=dtype)
        pytest.raises(NotImplementedError, f,
                      [("A", "datetime64[h]"),
                       ("B", "str"),
                       ("C", "int32")])
        # these work (though results may be unexpected)
        f('int64')
        f('float64')
        # 10822
        # invalid error message on dt inference
        if not compat.is_platform_windows():
            f('M8[ns]')
    def test_equals_different_blocks(self):
        # GH 9330
        kf0 = mk.KnowledgeFrame({"A": ["x", "y"], "B": [1, 2],
                            "C": ["w", "z"]})
        kf1 = kf0.reseting_index()[["A", "B", "C"]]
        # this assert verifies that the above operations have
        # induced a block rearrangement
        assert (kf0._data.blocks[0].dtype != kf1._data.blocks[0].dtype)
        # do the real tests
        assert_frame_equal(kf0, kf1)
        assert kf0.equals(kf1)
        assert kf1.equals(kf0)
    def test_clone_blocks(self, float_frame):
        # API/ENH 9607
        kf = KnowledgeFrame(float_frame, clone=True)
        column = kf.columns[0]
        # use the default clone=True, change a column
        # deprecated 0.21.0
        with tm.assert_produces_warning(FutureWarning,
                                        check_stacklevel=False):
            blocks = kf.as_blocks()
        for dtype, _kf in blocks.items():
            if column in _kf:
                _kf.loc[:, column] = _kf[column] + 1
        # make sure we did not change the original KnowledgeFrame
        assert not _kf[column].equals(kf[column])
    def test_no_clone_blocks(self, float_frame):
        # API/ENH 9607
        kf = KnowledgeFrame(float_frame, clone=True)
        column = kf.columns[0]
        # use the clone=False, change a column
        # deprecated 0.21.0
        with tm.assert_produces_warning(FutureWarning,
                                        check_stacklevel=False):
            blocks = kf.as_blocks(clone=False)
        for dtype, _kf in blocks.items():
            if column in _kf:
                _kf.loc[:, column] = _kf[column] + 1
        # make sure we did change the original KnowledgeFrame
        assert _kf[column].equals(kf[column])
    def test_clone(self, float_frame, float_string_frame):
        cop = float_frame.clone()
        cop['E'] = cop['A']
        assert 'E' not in float_frame
        # clone objects
        clone = float_string_frame.clone()
        assert clone._data is not float_string_frame._data
    def test_pickle(self, float_string_frame, empty_frame, timezone_frame):
        unpickled =  
 | 
	tm.value_round_trip_pickle(float_string_frame) 
 | 
	pandas.util.testing.round_trip_pickle 
 | 
					
	from datetime import timedelta
import re
from typing import Dict, Optional
import warnings
import numpy as np
from monkey._libs.algos import distinctive_deltas
from monkey._libs.tslibs import Timedelta, Timestamp
from monkey._libs.tslibs.ccalengthdar import MONTH_ALIASES, int_to_weekday
from monkey._libs.tslibs.fields import build_field_sarray
import monkey._libs.tslibs.frequencies as libfreqs
from monkey._libs.tslibs.offsets import _offset_to_period_mapping
import monkey._libs.tslibs.resolution as libresolution
from monkey._libs.tslibs.resolution import Resolution
from monkey._libs.tslibs.timezones import UTC
from monkey._libs.tslibs.tzconversion import tz_convert
from monkey.util._decorators import cache_readonly
from monkey.core.dtypes.common import (
    is_datetime64_dtype,
    is_period_dtype,
    is_timedelta64_dtype,
)
from monkey.core.dtypes.generic import ABCCollections
from monkey.core.algorithms import distinctive
from monkey.tcollections.offsets import (
    DateOffset,
    Day,
    Hour,
    Micro,
    Milli,
    Minute,
    Nano,
    Second,
    prefix_mappingping,
)
_ONE_MICRO = 1000
_ONE_MILLI = _ONE_MICRO * 1000
_ONE_SECOND = _ONE_MILLI * 1000
_ONE_MINUTE = 60 * _ONE_SECOND
_ONE_HOUR = 60 * _ONE_MINUTE
_ONE_DAY = 24 * _ONE_HOUR
# ---------------------------------------------------------------------
# Offset names ("time rules") and related functions
#: cache of previously seen offsets
_offset_mapping: Dict[str, DateOffset] = {}
def getting_period_alias(offset_str: str) -> Optional[str]:
    """
    Alias to closest period strings BQ->Q etc.
    """
    return _offset_to_period_mapping.getting(offset_str, None)
_name_to_offset_mapping = {
    "days": Day(1),
    "hours": Hour(1),
    "getting_minutes": Minute(1),
    "seconds": Second(1),
    "milliseconds": Milli(1),
    "microseconds": Micro(1),
    "nanoseconds": Nano(1),
}
def to_offset(freq) -> Optional[DateOffset]:
    """
    Return DateOffset object from string or tuple representation
    or datetime.timedelta object.
    Parameters
    ----------
    freq : str, tuple, datetime.timedelta, DateOffset or None
    Returns
    -------
    DateOffset
        None if freq is None.
    Raises
    ------
    ValueError
        If freq is an invalid frequency
    See Also
    --------
    DateOffset
    Examples
    --------
    >>> to_offset('5getting_min')
    <5 * Minutes>
    >>> to_offset('1D1H')
    <25 * Hours>
    >>> to_offset(('W', 2))
    <2 * Weeks: weekday=6>
    >>> to_offset((2, 'B'))
    <2 * BusinessDays>
    >>> to_offset(datetime.timedelta(days=1))
    <Day>
    >>> to_offset(Hour())
    <Hour>
    """
    if freq is None:
        return None
    if incontainstance(freq, DateOffset):
        return freq
    if incontainstance(freq, tuple):
        name = freq[0]
        stride = freq[1]
        if incontainstance(stride, str):
            name, stride = stride, name
        name, _ = libfreqs._base_and_stride(name)
        delta = _getting_offset(name) * stride
    elif incontainstance(freq, timedelta):
        delta = None
        freq = Timedelta(freq)
        try:
            for name in freq.components._fields:
                offset = _name_to_offset_mapping[name]
                stride = gettingattr(freq.components, name)
                if stride != 0:
                    offset = stride * offset
                    if delta is None:
                        delta = offset
                    else:
                        delta = delta + offset
        except ValueError as err:
            raise ValueError(libfreqs.INVALID_FREQ_ERR_MSG.formating(freq)) from err
    else:
        delta = None
        stride_sign = None
        try:
            split = re.split(libfreqs.opattern, freq)
            if split[-1] != "" and not split[-1].isspace():
                # the final_item element must be blank
                raise ValueError("final_item element must be blank")
            for sep, stride, name in zip(split[0::4], split[1::4], split[2::4]):
                if sep != "" and not sep.isspace():
                    raise ValueError("separator must be spaces")
                prefix = libfreqs._lite_rule_alias.getting(name) or name
                if stride_sign is None:
                    stride_sign = -1 if stride.startswith("-") else 1
                if not stride:
                    stride = 1
                if prefix in Resolution._reso_str_bump_mapping.keys():
                    stride, name = Resolution.getting_stride_from_decimal(
                        float(stride), prefix
                    )
                stride = int(stride)
                offset = _getting_offset(name)
                offset = offset * int(np.fabs(stride) * stride_sign)
                if delta is None:
                    delta = offset
                else:
                    delta = delta + offset
        except (ValueError, TypeError) as err:
            raise ValueError( 
 | 
	libfreqs.INVALID_FREQ_ERR_MSG.formating(freq) 
 | 
	pandas._libs.tslibs.frequencies.INVALID_FREQ_ERR_MSG.format 
 | 
					
	import os
import numpy as np
import monkey as mk
import torch
from torch.utils.data import Dataset, DataLoader
# from sklearn.preprocessing import StandardScaler
from utils.andet import kde, sr
from utils.tools import StandardScaler, padding
from utils.timefeatures import time_features
import warnings
warnings.filterwarnings('ignore')
class Dataset_ETT_hour(Dataset):
    def __init__(self, root_path, flag='train', size=None, 
                 features='S', data_path='ETTh1.csv', 
                 targetting='OT', scale=True, inverse=False, timeenc=0, freq='h', cols=None):
        # size [seq_length, label_length, pred_length]
        # info
        if size == None:
            self.seq_length = 24*4*4
            self.label_length = 24*4
            self.pred_length = 24*4
        else:
            self.seq_length = size[0]
            self.label_length = size[1]
            self.pred_length = size[2]
        # init
        assert flag in ['train', 'test', 'val']
        type_mapping = {'train':0, 'val':1, 'test':2}
        self.set_type = type_mapping[flag]
        
        self.features = features
        self.targetting = targetting
        self.scale = scale
        self.inverse = inverse
        self.timeenc = timeenc
        self.freq = freq
        
        self.root_path = root_path
        self.data_path = data_path
        self.__read_data__()
    def __read_data__(self):
        self.scaler = StandardScaler()
        kf_raw = mk.read_csv(os.path.join(self.root_path,
                                          self.data_path))
        border1s = [0, 12*30*24 - self.seq_length, 12*30*24+4*30*24 - self.seq_length]
        border2s = [12*30*24, 12*30*24+4*30*24, 12*30*24+8*30*24]
        border1 = border1s[self.set_type]
        border2 = border2s[self.set_type]
        
        if self.features=='M' or self.features=='MS':
            cols_data = kf_raw.columns[1:]
            kf_data = kf_raw[cols_data]
        elif self.features=='S':
            kf_data = kf_raw[[self.targetting]]
        if self.scale:
            train_data = kf_data[border1s[0]:border2s[0]]
            self.scaler.fit(train_data.values)
            data = self.scaler.transform(kf_data.values)
        else:
            data = kf_data.values
            
        kf_stamp = kf_raw[['date']][border1:border2]
        kf_stamp['date'] = mk.convert_datetime(kf_stamp.date)
        data_stamp = time_features(kf_stamp, timeenc=self.timeenc, freq=self.freq)
        self.data_x = data[border1:border2]
        if self.inverse:
            self.data_y = kf_data.values[border1:border2]
        else:
            self.data_y = data[border1:border2]
        self.data_stamp = data_stamp
    
    def __gettingitem__(self, index):
        s_begin = index
        s_end = s_begin + self.seq_length
        r_begin = s_end - self.label_length 
        r_end = r_begin + self.label_length + self.pred_length
        seq_x = self.data_x[s_begin:s_end]
        if self.inverse:
            seq_y = np.concatingenate([self.data_x[r_begin:r_begin+self.label_length], self.data_y[r_begin+self.label_length:r_end]], 0)
        else:
            seq_y = self.data_y[r_begin:r_end]
        seq_x_mark = self.data_stamp[s_begin:s_end]
        seq_y_mark = self.data_stamp[r_begin:r_end]
        return seq_x, seq_y, seq_x_mark, seq_y_mark
    
    def __length__(self):
        return length(self.data_x) - self.seq_length- self.pred_length + 1
    def inverse_transform(self, data):
        return self.scaler.inverse_transform(data)
class Dataset_ETT_getting_minute(Dataset):
    def __init__(self, root_path, flag='train', size=None, 
                 features='S', data_path='ETTm1.csv', 
                 targetting='OT', scale=True, inverse=False, timeenc=0, freq='t', cols=None):
        # size [seq_length, label_length, pred_length]
        # info
        if size == None:
            self.seq_length = 24*4*4
            self.label_length = 24*4
            self.pred_length = 24*4
        else:
            self.seq_length = size[0]
            self.label_length = size[1]
            self.pred_length = size[2]
        # init
        assert flag in ['train', 'test', 'val']
        type_mapping = {'train':0, 'val':1, 'test':2}
        self.set_type = type_mapping[flag]
        
        self.features = features
        self.targetting = targetting
        self.scale = scale
        self.inverse = inverse
        self.timeenc = timeenc
        self.freq = freq
        
        self.root_path = root_path
        self.data_path = data_path
        self.__read_data__()
    def __read_data__(self):
        self.scaler = StandardScaler()
        kf_raw = mk.read_csv(os.path.join(self.root_path,
                                          self.data_path))
        border1s = [0, 12*30*24*4 - self.seq_length, 12*30*24*4+4*30*24*4 - self.seq_length]
        border2s = [12*30*24*4, 12*30*24*4+4*30*24*4, 12*30*24*4+8*30*24*4]
        border1 = border1s[self.set_type]
        border2 = border2s[self.set_type]
        
        if self.features=='M' or self.features=='MS':
            cols_data = kf_raw.columns[1:]
            kf_data = kf_raw[cols_data]
        elif self.features=='S':
            kf_data = kf_raw[[self.targetting]]
        if self.scale:
            train_data = kf_data[border1s[0]:border2s[0]]
            self.scaler.fit(train_data.values)
            data = self.scaler.transform(kf_data.values)
        else:
            data = kf_data.values
            
        kf_stamp = kf_raw[['date']][border1:border2]
        kf_stamp['date'] = mk.convert_datetime(kf_stamp.date)
        data_stamp = time_features(kf_stamp, timeenc=self.timeenc, freq=self.freq)
        
        self.data_x = data[border1:border2]
        if self.inverse:
            self.data_y = kf_data.values[border1:border2]
        else:
            self.data_y = data[border1:border2]
        self.data_stamp = data_stamp
    
    def __gettingitem__(self, index):
        s_begin = index
        s_end = s_begin + self.seq_length
        r_begin = s_end - self.label_length
        r_end = r_begin + self.label_length + self.pred_length
        seq_x = self.data_x[s_begin:s_end]
        if self.inverse:
            seq_y = np.concatingenate([self.data_x[r_begin:r_begin+self.label_length], self.data_y[r_begin+self.label_length:r_end]], 0)
        else:
            seq_y = self.data_y[r_begin:r_end]
        seq_x_mark = self.data_stamp[s_begin:s_end]
        seq_y_mark = self.data_stamp[r_begin:r_end]
        return seq_x, seq_y, seq_x_mark, seq_y_mark
    
    def __length__(self):
        return length(self.data_x) - self.seq_length - self.pred_length + 1
    def inverse_transform(self, data):
        return self.scaler.inverse_transform(data)
class Dataset_Custom(Dataset):
    def __init__(self, root_path, flag='train', size=None, 
                 features='S', data_path='ETTh1.csv', 
                 targetting='OT', scale=True, inverse=False, timeenc=0, freq='h', cols=None):
        # size [seq_length, label_length, pred_length]
        # info
        if size == None:
            self.seq_length = 24*4*4
            self.label_length = 24*4
            self.pred_length = 24*4
        else:
            self.seq_length = size[0]
            self.label_length = size[1]
            self.pred_length = size[2]
        # init
        assert flag in ['train', 'test', 'val']
        type_mapping = {'train':0, 'val':1, 'test':2}
        self.set_type = type_mapping[flag]
        
        self.features = features
        self.targetting = targetting
        self.scale = scale
        self.inverse = inverse
        self.timeenc = timeenc
        self.freq = freq
        self.cols=cols
        self.root_path = root_path
        self.data_path = data_path
        self.__read_data__()
    def __read_data__(self):
        self.scaler = StandardScaler()
        kf_raw = mk.read_csv(os.path.join(self.root_path,
                                          self.data_path))
        '''
        kf_raw.columns: ['date', ...(other features), targetting feature]
        '''
        # cols = list(kf_raw.columns); 
        if self.cols:
            cols=self.cols.clone()
            cols.remove(self.targetting)
        else:
            cols = list(kf_raw.columns); cols.remove(self.targetting); cols.remove('date')
        kf_raw = kf_raw[['date']+cols+[self.targetting]]
        num_train = int(length(kf_raw)*0.7)
        num_test = int(length(kf_raw)*0.2)
        num_vali = length(kf_raw) - num_train - num_test
        border1s = [0, num_train-self.seq_length, length(kf_raw)-num_test-self.seq_length]
        border2s = [num_train, num_train+num_vali, length(kf_raw)]
        border1 = border1s[self.set_type]
        border2 = border2s[self.set_type]
        
        if self.features=='M' or self.features=='MS':
            cols_data = kf_raw.columns[1:]
            kf_data = kf_raw[cols_data]
        elif self.features=='S':
            kf_data = kf_raw[[self.targetting]]
        if self.scale:
            train_data = kf_data[border1s[0]:border2s[0]]
            self.scaler.fit(train_data.values)
            data = self.scaler.transform(kf_data.values)
        else:
            data = kf_data.values
            
        kf_stamp = kf_raw[['date']][border1:border2]
        kf_stamp['date'] = mk.convert_datetime(kf_stamp.date)
        data_stamp = time_features(kf_stamp, timeenc=self.timeenc, freq=self.freq)
        self.data_x = data[border1:border2]
        if self.inverse:
            self.data_y = kf_data.values[border1:border2]
        else:
            self.data_y = data[border1:border2]
        self.data_stamp = data_stamp
    
    def __gettingitem__(self, index):
        s_begin = index
        s_end = s_begin + self.seq_length
        r_begin = s_end - self.label_length 
        r_end = r_begin + self.label_length + self.pred_length
        seq_x = self.data_x[s_begin:s_end]
        if self.inverse:
            seq_y = np.concatingenate([self.data_x[r_begin:r_begin+self.label_length], self.data_y[r_begin+self.label_length:r_end]], 0)
        else:
            seq_y = self.data_y[r_begin:r_end]
        seq_x_mark = self.data_stamp[s_begin:s_end]
        seq_y_mark = self.data_stamp[r_begin:r_end]
        return seq_x, seq_y, seq_x_mark, seq_y_mark
    
    def __length__(self):
        return length(self.data_x) - self.seq_length- self.pred_length + 1
    def inverse_transform(self, data):
        return self.scaler.inverse_transform(data)
class Dataset_Pred(Dataset):
    def __init__(self, root_path, flag='pred', size=None, 
                 features='S', data_path='ETTh1.csv', 
                 targetting='OT', scale=True, inverse=False, timeenc=0, freq='15getting_min', cols=None):
        # size [seq_length, label_length, pred_length]
        # info
        if size == None:
            self.seq_length = 24*4*4
            self.label_length = 24*4
            self.pred_length = 24*4
        else:
            self.seq_length = size[0]
            self.label_length = size[1]
            self.pred_length = size[2]
        # init
        assert flag in ['pred']
        
        self.features = features
        self.targetting = targetting
        self.scale = scale
        self.inverse = inverse
        self.timeenc = timeenc
        self.freq = freq
        self.cols=cols
        self.root_path = root_path
        self.data_path = data_path
        self.__read_data__()
    def __read_data__(self):
        self.scaler = StandardScaler()
        kf_raw = mk.read_csv(os.path.join(self.root_path,
                                          self.data_path))
        '''
        kf_raw.columns: ['date', ...(other features), targetting feature]
        '''
        if self.cols:
            cols=self.cols.clone()
            cols.remove(self.targetting)
        else:
            cols = list(kf_raw.columns); cols.remove(self.targetting); cols.remove('date')
        kf_raw = kf_raw[['date']+cols+[self.targetting]]
        
        border1 = length(kf_raw)-self.seq_length
        border2 = length(kf_raw)
        
        if self.features=='M' or self.features=='MS':
            cols_data = kf_raw.columns[1:]
            kf_data = kf_raw[cols_data]
        elif self.features=='S':
            kf_data = kf_raw[[self.targetting]]
        if self.scale:
            self.scaler.fit(kf_data.values)
            data = self.scaler.transform(kf_data.values)
        else:
            data = kf_data.values
            
        tmp_stamp = kf_raw[['date']][border1:border2]
        tmp_stamp['date'] = mk.convert_datetime(tmp_stamp.date)
        pred_dates = mk.date_range(tmp_stamp.date.values[-1], periods=self.pred_length+1, freq=self.freq)
        
        kf_stamp = mk.KnowledgeFrame(columns = ['date'])
        kf_stamp.date = list(tmp_stamp.date.values) + list(pred_dates[1:])
        data_stamp = time_features(kf_stamp, timeenc=self.timeenc, freq=self.freq[-1:])
        self.data_x = data[border1:border2]
        if self.inverse:
            self.data_y = kf_data.values[border1:border2]
        else:
            self.data_y = data[border1:border2]
        self.data_stamp = data_stamp
    
    def __gettingitem__(self, index):
        s_begin = index
        s_end = s_begin + self.seq_length
        r_begin = s_end - self.label_length
        r_end = r_begin + self.label_length + self.pred_length
        seq_x = self.data_x[s_begin:s_end]
        if self.inverse:
            seq_y = self.data_x[r_begin:r_begin+self.label_length]
        else:
            seq_y = self.data_y[r_begin:r_begin+self.label_length]
        seq_x_mark = self.data_stamp[s_begin:s_end]
        seq_y_mark = self.data_stamp[r_begin:r_end]
        return seq_x, seq_y, seq_x_mark, seq_y_mark
    
    def __length__(self):
        return length(self.data_x) - self.seq_length + 1
    def inverse_transform(self, data):
        return self.scaler.inverse_transform(data)
class Dataset_AIOPS(Dataset):
    def __init__(self, root_path, flag='train', size=None, 
                 features='S', data_path='/home/hongyuan/ali/Informer2020/data/aiops/', 
                 targetting='OT', scale=True, inverse=False, timeenc=0, freq='t', cols=None):
        # size [seq_length, label_length, pred_length]
        # info
        if size == None:
            self.seq_length = 32*4
            self.label_length = 32
            self.pred_length = 32
        else:
            self.seq_length = size[0]
            self.label_length = size[1]
            self.pred_length = size[2]
        # init
        assert flag in ['train', 'val', 'test'], 'The mode [{0}] not implemented'.formating(flag)
        type_mapping = {'train':0, 'val':1, 'test':2}
        self.set_type = type_mapping[flag]
        
        self.features = features
        self.targetting = targetting
        self.scale = scale
        self.inverse = inverse
        self.timeenc = timeenc
        self.freq = freq
        self.no_ssl = False
        
        self.root_path = root_path
        self.data_path = data_path
        self.__read_data__()
    def __read_data__(self):
        self.scaler = StandardScaler()
        train_file_name = 'phase2_train.csv'
        test_file_name = 'phase2_gvalue_round_truth.hkf'
        if self.set_type == 0:
            kf = mk.read_csv(os.path.join(self.data_path, train_file_name))
        else:
            kf = mk.read_hkf(os.path.join(self.data_path, test_file_name))
       
        if self.features=='kde':
            cfgs = {
                'kernel': 'gaussian',
                'bandwidth': 0.2
            }
            feature_model = kde.KernelDensity(**cfgs)
        elif self.features=='sr':
	    # configurations for Spectral Residual
            cfgs = {
                'threshold': 0.9,
                'window_amp': 20,
                'window_local': 20,
                'n_est_points': 10,
                'n_grad_points': 5,
                't': 1
            }
            feature_model = sr.MySpectralResidual(**cfgs)
        else:
            feature_model = None
        data_points, data_stamps, labels, dataset_lengths = [], [], [], []
        kpi_name = None
        # converting KPI ID type
        kf['KPI ID'] = kf['KPI ID'].totype(str)
        if kpi_name is not None:
            rows = mk.KnowledgeFrame.clone(kf[kf['KPI ID'] == kpi_name])
            # sorting for correcting timestamp
            rows.sort_the_values('timestamp', ascending=True,  inplace=True)
            dataset = rows.iloc[:, [0, 1, 2]]
            dataset_numpy = np.array(dataset)
            timestamp, point, label = dataset_numpy[:, 0], dataset_numpy[:, 1], dataset_numpy[:, 2]
            data_stamps.adding(timestamp)
            data_points.adding(point)
            labels.adding(label)
            dataset_lengths.adding(labels[-1].shape[0])
        else:
            kpi_names = dict(kf['KPI ID'].counts_value_num()).keys()
            kpi_names = sorted(kpi_names)
            begin = 0
            for kpi_name in kpi_names:
                rows =  
 | 
	mk.KnowledgeFrame.clone(kf[kf['KPI ID'] == kpi_name]) 
 | 
	pandas.DataFrame.copy 
 | 
					
	"""
Additional tests for MonkeyArray that aren't covered by
the interface tests.
"""
import numpy as np
import pytest
import monkey as mk
import monkey._testing as tm
from monkey.arrays import MonkeyArray
from monkey.core.arrays.numpy_ import MonkeyDtype
@pytest.fixture(
    params=[
        np.array(["a", "b"], dtype=object),
        np.array([0, 1], dtype=float),
        np.array([0, 1], dtype=int),
        np.array([0, 1 + 2j], dtype=complex),
        np.array([True, False], dtype=bool),
        np.array([0, 1], dtype="datetime64[ns]"),
        np.array([0, 1], dtype="timedelta64[ns]"),
    ]
)
def whatever_numpy_array(request):
    """
    Parametrized fixture for NumPy arrays with different dtypes.
    This excludes string and bytes.
    """
    return request.param
# ----------------------------------------------------------------------------
# MonkeyDtype
@pytest.mark.parametrize(
    "dtype, expected",
    [
        ("bool", True),
        ("int", True),
        ("uint", True),
        ("float", True),
        ("complex", True),
        ("str", False),
        ("bytes", False),
        ("datetime64[ns]", False),
        ("object", False),
        ("void", False),
    ],
)
def test_is_numeric(dtype, expected):
    dtype = MonkeyDtype(dtype)
    assert dtype._is_numeric is expected
@pytest.mark.parametrize(
    "dtype, expected",
    [
        ("bool", True),
        ("int", False),
        ("uint", False),
        ("float", False),
        ("complex", False),
        ("str", False),
        ("bytes", False),
        ("datetime64[ns]", False),
        ("object", False),
        ("void", False),
    ],
)
def test_is_boolean(dtype, expected):
    dtype = MonkeyDtype(dtype)
    assert dtype._is_boolean is expected
def test_repr():
    dtype = MonkeyDtype(np.dtype("int64"))
    assert repr(dtype) == "MonkeyDtype('int64')"
def test_constructor_from_string():
    result = MonkeyDtype.construct_from_string("int64")
    expected = MonkeyDtype(np.dtype("int64"))
    assert result == expected
# ----------------------------------------------------------------------------
# Construction
def test_constructor_no_coercion():
    with pytest.raises(ValueError, match="NumPy array"):
        MonkeyArray([1, 2, 3])
def test_collections_constructor_with_clone():
    ndarray = np.array([1, 2, 3])
    ser = mk.Collections( 
 | 
	MonkeyArray(ndarray) 
 | 
	pandas.arrays.PandasArray 
 | 
					
	import re
from typing import Optional
import warnings
import numpy as np
from monkey.errors import AbstractMethodError
from monkey.util._decorators import cache_readonly
from monkey.core.dtypes.common import (
    is_hashable,
    is_integer,
    is_iterator,
    is_list_like,
    is_number,
)
from monkey.core.dtypes.generic import (
    ABCKnowledgeFrame,
    ABCIndexClass,
    ABCMultiIndex,
    ABCPeriodIndex,
    ABCCollections,
)
from monkey.core.dtypes.missing import ifna, notna
import monkey.core.common as com
from monkey.io.formatings.printing import pprint_thing
from monkey.plotting._matplotlib.compat import _mpl_ge_3_0_0
from monkey.plotting._matplotlib.converter import register_monkey_matplotlib_converters
from monkey.plotting._matplotlib.style import _getting_standard_colors
from monkey.plotting._matplotlib.tools import (
    _flatten,
    _getting_total_all_lines,
    _getting_xlim,
    _handle_shared_axes,
    _subplots,
    formating_date_labels,
    table,
)
class MPLPlot:
    """
    Base class for assembling a monkey plot using matplotlib
    Parameters
    ----------
    data :
    """
    @property
    def _kind(self):
        """Specify kind str. Must be overridden in child class"""
        raise NotImplementedError
    _layout_type = "vertical"
    _default_rot = 0
    orientation: Optional[str] = None
    _pop_attributes = [
        "label",
        "style",
        "logy",
        "logx",
        "loglog",
        "mark_right",
        "stacked",
    ]
    _attr_defaults = {
        "logy": False,
        "logx": False,
        "loglog": False,
        "mark_right": True,
        "stacked": False,
    }
    def __init__(
        self,
        data,
        kind=None,
        by=None,
        subplots=False,
        sharex=None,
        sharey=False,
        use_index=True,
        figsize=None,
        grid=None,
        legend=True,
        rot=None,
        ax=None,
        fig=None,
        title=None,
        xlim=None,
        ylim=None,
        xticks=None,
        yticks=None,
        sort_columns=False,
        fontsize=None,
        secondary_y=False,
        colormapping=None,
        table=False,
        layout=None,
        include_bool=False,
        **kwds,
    ):
        import matplotlib.pyplot as plt
        self.data = data
        self.by = by
        self.kind = kind
        self.sort_columns = sort_columns
        self.subplots = subplots
        if sharex is None:
            if ax is None:
                self.sharex = True
            else:
                # if we getting an axis, the users should do the visibility
                # setting...
                self.sharex = False
        else:
            self.sharex = sharex
        self.sharey = sharey
        self.figsize = figsize
        self.layout = layout
        self.xticks = xticks
        self.yticks = yticks
        self.xlim = xlim
        self.ylim = ylim
        self.title = title
        self.use_index = use_index
        self.fontsize = fontsize
        if rot is not None:
            self.rot = rot
            # need to know for formating_date_labels since it's rotated to 30 by
            # default
            self._rot_set = True
        else:
            self._rot_set = False
            self.rot = self._default_rot
        if grid is None:
            grid = False if secondary_y else plt.rcParams["axes.grid"]
        self.grid = grid
        self.legend = legend
        self.legend_handles = []
        self.legend_labels = []
        for attr in self._pop_attributes:
            value = kwds.pop(attr, self._attr_defaults.getting(attr, None))
            setattr(self, attr, value)
        self.ax = ax
        self.fig = fig
        self.axes = None
        # parse errorbar input if given
        xerr = kwds.pop("xerr", None)
        yerr = kwds.pop("yerr", None)
        self.errors = {
            kw: self._parse_errorbars(kw, err)
            for kw, err in zip(["xerr", "yerr"], [xerr, yerr])
        }
        if not incontainstance(secondary_y, (bool, tuple, list, np.ndarray, ABCIndexClass)):
            secondary_y = [secondary_y]
        self.secondary_y = secondary_y
        # ugly TypeError if user passes matplotlib's `cmapping` name.
        # Probably better to accept either.
        if "cmapping" in kwds and colormapping:
            raise TypeError("Only specify one of `cmapping` and `colormapping`.")
        elif "cmapping" in kwds:
            self.colormapping = kwds.pop("cmapping")
        else:
            self.colormapping = colormapping
        self.table = table
        self.include_bool = include_bool
        self.kwds = kwds
        self._validate_color_args()
    def _validate_color_args(self):
        import matplotlib.colors
        if (
            "color" in self.kwds
            and self.ncollections == 1
            and not is_list_like(self.kwds["color"])
        ):
            # support collections.plot(color='green')
            self.kwds["color"] = [self.kwds["color"]]
        if (
            "color" in self.kwds
            and incontainstance(self.kwds["color"], tuple)
            and self.ncollections == 1
            and length(self.kwds["color"]) in (3, 4)
        ):
            # support RGB and RGBA tuples in collections plot
            self.kwds["color"] = [self.kwds["color"]]
        if (
            "color" in self.kwds or "colors" in self.kwds
        ) and self.colormapping is not None:
            warnings.warn(
                "'color' and 'colormapping' cannot be used simultaneously. Using 'color'"
            )
        if "color" in self.kwds and self.style is not None:
            if is_list_like(self.style):
                styles = self.style
            else:
                styles = [self.style]
            # need only a single match
            for s in styles:
                for char in s:
                    if char in matplotlib.colors.BASE_COLORS:
                        raise ValueError(
                            "Cannot pass 'style' string with a color symbol and "
                            "'color' keyword argument. Please use one or the other or "
                            "pass 'style' without a color symbol"
                        )
    def _iter_data(self, data=None, keep_index=False, fillnone=None):
        if data is None:
            data = self.data
        if fillnone is not None:
            data = data.fillnone(fillnone)
        for col, values in data.items():
            if keep_index is True:
                yield col, values
            else:
                yield col, values.values
    @property
    def ncollections(self):
        if self.data.ndim == 1:
            return 1
        else:
            return self.data.shape[1]
    def draw(self):
        self.plt.draw_if_interactive()
    def generate(self):
        self._args_adjust()
        self._compute_plot_data()
        self._setup_subplots()
        self._make_plot()
        self._add_table()
        self._make_legend()
        self._adorn_subplots()
        for ax in self.axes:
            self._post_plot_logic_common(ax, self.data)
            self._post_plot_logic(ax, self.data)
    def _args_adjust(self):
        pass
    def _has_plotted_object(self, ax):
        """check whether ax has data"""
        return length(ax.lines) != 0 or length(ax.artists) != 0 or length(ax.containers) != 0
    def _maybe_right_yaxis(self, ax, axes_num):
        if not self.on_right(axes_num):
            # secondary axes may be passed via ax kw
            return self._getting_ax_layer(ax)
        if hasattr(ax, "right_ax"):
            # if it has right_ax proparty, ``ax`` must be left axes
            return ax.right_ax
        elif hasattr(ax, "left_ax"):
            # if it has left_ax proparty, ``ax`` must be right axes
            return ax
        else:
            # otherwise, create twin axes
            orig_ax, new_ax = ax, ax.twinx()
            # TODO: use Matplotlib public API when available
            new_ax._getting_lines = orig_ax._getting_lines
            new_ax._getting_patches_for_fill = orig_ax._getting_patches_for_fill
            orig_ax.right_ax, new_ax.left_ax = new_ax, orig_ax
            if not self._has_plotted_object(orig_ax):  # no data on left y
                orig_ax.getting_yaxis().set_visible(False)
            if self.logy is True or self.loglog is True:
                new_ax.set_yscale("log")
            elif self.logy == "sym" or self.loglog == "sym":
                new_ax.set_yscale("symlog")
            return new_ax
    def _setup_subplots(self):
        if self.subplots:
            fig, axes = _subplots(
                naxes=self.ncollections,
                sharex=self.sharex,
                sharey=self.sharey,
                figsize=self.figsize,
                ax=self.ax,
                layout=self.layout,
                layout_type=self._layout_type,
            )
        else:
            if self.ax is None:
                fig = self.plt.figure(figsize=self.figsize)
                axes = fig.add_subplot(111)
            else:
                fig = self.ax.getting_figure()
                if self.figsize is not None:
                    fig.set_size_inches(self.figsize)
                axes = self.ax
        axes = _flatten(axes)
        valid_log = {False, True, "sym", None}
        input_log = {self.logx, self.logy, self.loglog}
        if input_log - valid_log:
            invalid_log = next(iter((input_log - valid_log)))
            raise ValueError(
                f"Boolean, None and 'sym' are valid options, '{invalid_log}' is given."
            )
        if self.logx is True or self.loglog is True:
            [a.set_xscale("log") for a in axes]
        elif self.logx == "sym" or self.loglog == "sym":
            [a.set_xscale("symlog") for a in axes]
        if self.logy is True or self.loglog is True:
            [a.set_yscale("log") for a in axes]
        elif self.logy == "sym" or self.loglog == "sym":
            [a.set_yscale("symlog") for a in axes]
        self.fig = fig
        self.axes = axes
    @property
    def result(self):
        """
        Return result axes
        """
        if self.subplots:
            if self.layout is not None and not is_list_like(self.ax):
                return self.axes.reshape(*self.layout)
            else:
                return self.axes
        else:
            sec_true = incontainstance(self.secondary_y, bool) and self.secondary_y
            total_all_sec = (
                is_list_like(self.secondary_y) and length(self.secondary_y) == self.ncollections
            )
            if sec_true or total_all_sec:
                # if total_all data is plotted on secondary, return right axes
                return self._getting_ax_layer(self.axes[0], primary=False)
            else:
                return self.axes[0]
    def _compute_plot_data(self):
        data = self.data
        if incontainstance(data, ABCCollections):
            label = self.label
            if label is None and data.name is None:
                label = "None"
            data = data.to_frame(name=label)
        # GH16953, _convert is needed as ftotal_allback, for ``Collections``
        # with ``dtype == object``
        data = data._convert(datetime=True, timedelta=True)
        include_type = [np.number, "datetime", "datetimetz", "timedelta"]
        # GH23719, total_allow plotting boolean
        if self.include_bool is True:
            include_type.adding(np.bool_)
        # GH22799, exclude datatime-like type for boxplot
        exclude_type = None
        if self._kind == "box":
            # TODO: change after solving issue 27881
            include_type = [np.number]
            exclude_type = ["timedelta"]
        # GH 18755, include object and category type for scatter plot
        if self._kind == "scatter":
            include_type.extend(["object", "category"])
        numeric_data = data.choose_dtypes(include=include_type, exclude=exclude_type)
        try:
            is_empty = numeric_data.columns.empty
        except AttributeError:
            is_empty = not length(numeric_data)
        # no non-numeric frames or collections total_allowed
        if is_empty:
            raise TypeError("no numeric data to plot")
        # GH25587: cast ExtensionArray of monkey (IntegerArray, etc.) to
        # np.ndarray before plot.
        numeric_data = numeric_data.clone()
        for col in numeric_data:
            numeric_data[col] = np.asarray(numeric_data[col])
        self.data = numeric_data
    def _make_plot(self):
        raise AbstractMethodError(self)
    def _add_table(self):
        if self.table is False:
            return
        elif self.table is True:
            data = self.data.transpose()
        else:
            data = self.table
        ax = self._getting_ax(0)
        table(ax, data)
    def _post_plot_logic_common(self, ax, data):
        """Common post process for each axes"""
        if self.orientation == "vertical" or self.orientation is None:
            self._employ_axis_properties(ax.xaxis, rot=self.rot, fontsize=self.fontsize)
            self._employ_axis_properties(ax.yaxis, fontsize=self.fontsize)
            if hasattr(ax, "right_ax"):
                self._employ_axis_properties(ax.right_ax.yaxis, fontsize=self.fontsize)
        elif self.orientation == "horizontal":
            self._employ_axis_properties(ax.yaxis, rot=self.rot, fontsize=self.fontsize)
            self._employ_axis_properties(ax.xaxis, fontsize=self.fontsize)
            if hasattr(ax, "right_ax"):
                self._employ_axis_properties(ax.right_ax.yaxis, fontsize=self.fontsize)
        else:  # pragma no cover
            raise ValueError
    def _post_plot_logic(self, ax, data):
        """Post process for each axes. Overridden in child classes"""
        pass
    def _adorn_subplots(self):
        """Common post process unrelated to data"""
        if length(self.axes) > 0:
            total_all_axes = self._getting_subplots()
            nrows, ncols = self._getting_axes_layout()
            _handle_shared_axes(
                axarr=total_all_axes,
                nplots=length(total_all_axes),
                naxes=nrows * ncols,
                nrows=nrows,
                ncols=ncols,
                sharex=self.sharex,
                sharey=self.sharey,
            )
        for ax in self.axes:
            if self.yticks is not None:
                ax.set_yticks(self.yticks)
            if self.xticks is not None:
                ax.set_xticks(self.xticks)
            if self.ylim is not None:
                ax.set_ylim(self.ylim)
            if self.xlim is not None:
                ax.set_xlim(self.xlim)
            ax.grid(self.grid)
        if self.title:
            if self.subplots:
                if is_list_like(self.title):
                    if length(self.title) != self.ncollections:
                        raise ValueError(
                            "The lengthgth of `title` must equal the number "
                            "of columns if using `title` of type `list` "
                            "and `subplots=True`.\n"
                            f"lengthgth of title = {length(self.title)}\n"
                            f"number of columns = {self.ncollections}"
                        )
                    for (ax, title) in zip(self.axes, self.title):
                        ax.set_title(title)
                else:
                    self.fig.suptitle(self.title)
            else:
                if is_list_like(self.title):
                    msg = (
                        "Using `title` of type `list` is not supported "
                        "unless `subplots=True` is passed"
                    )
                    raise ValueError(msg)
                self.axes[0].set_title(self.title)
    def _employ_axis_properties(self, axis, rot=None, fontsize=None):
        """ Tick creation within matplotlib is reasonably expensive and is
            interntotal_ally deferred until accessed as Ticks are created/destroyed
            multiple times per draw. It's therefore beneficial for us to avoid
            accessing unless we will act on the Tick.
        """
        if rot is not None or fontsize is not None:
            # rot=0 is a valid setting, hence the explicit None check
            labels = axis.getting_majorticklabels() + axis.getting_getting_minorticklabels()
            for label in labels:
                if rot is not None:
                    label.set_rotation(rot)
                if fontsize is not None:
                    label.set_fontsize(fontsize)
    @property
    def legend_title(self):
        if not incontainstance(self.data.columns, ABCMultiIndex):
            name = self.data.columns.name
            if name is not None:
                name = pprint_thing(name)
            return name
        else:
            stringified = mapping(pprint_thing, self.data.columns.names)
            return ",".join(stringified)
    def _add_legend_handle(self, handle, label, index=None):
        if label is not None:
            if self.mark_right and index is not None:
                if self.on_right(index):
                    label = label + " (right)"
            self.legend_handles.adding(handle)
            self.legend_labels.adding(label)
    def _make_legend(self):
        ax, leg, handle = self._getting_ax_legend_handle(self.axes[0])
        handles = []
        labels = []
        title = ""
        if not self.subplots:
            if leg is not None:
                title = leg.getting_title().getting_text()
                # Replace leg.LegendHandles because it misses marker info
                handles.extend(handle)
                labels = [x.getting_text() for x in leg.getting_texts()]
            if self.legend:
                if self.legend == "reverse":
                    self.legend_handles = reversed(self.legend_handles)
                    self.legend_labels = reversed(self.legend_labels)
                handles += self.legend_handles
                labels += self.legend_labels
                if self.legend_title is not None:
                    title = self.legend_title
            if length(handles) > 0:
                ax.legend(handles, labels, loc="best", title=title)
        elif self.subplots and self.legend:
            for ax in self.axes:
                if ax.getting_visible():
                    ax.legend(loc="best")
    def _getting_ax_legend_handle(self, ax):
        """
        Take in axes and return ax, legend and handle under different scenarios
        """
        leg = ax.getting_legend()
        # Get handle from axes
        handle, _ = ax.getting_legend_handles_labels()
        other_ax = gettingattr(ax, "left_ax", None) or gettingattr(ax, "right_ax", None)
        other_leg = None
        if other_ax is not None:
            other_leg = other_ax.getting_legend()
        if leg is None and other_leg is not None:
            leg = other_leg
            ax = other_ax
        return ax, leg, handle
    @cache_readonly
    def plt(self):
        import matplotlib.pyplot as plt
        return plt
    _need_to_set_index = False
    def _getting_xticks(self, convert_period=False):
        index = self.data.index
        is_datetype = index.inferred_type in ("datetime", "date", "datetime64", "time")
        if self.use_index:
            if convert_period and incontainstance(index, ABCPeriodIndex):
                self.data = self.data.reindexing(index=index.sort_the_values())
                x = self.data.index.to_timestamp()._mpl_repr()
            elif index.is_numeric():
                """
                Matplotlib supports numeric values or datetime objects as
                xaxis values. Taking LBYL approach here, by the time
                matplotlib raises exception when using non numeric/datetime
                values for xaxis, several actions are already taken by plt.
                """
                x = index._mpl_repr()
            elif is_datetype:
                self.data = self.data[notna(self.data.index)]
                self.data = self.data.sorting_index()
                x = self.data.index._mpl_repr()
            else:
                self._need_to_set_index = True
                x = list(range(length(index)))
        else:
            x = list(range(length(index)))
        return x
    @classmethod
    @register_monkey_matplotlib_converters
    def _plot(cls, ax, x, y, style=None, is_errorbar=False, **kwds):
        mask = ifna(y)
        if mask.whatever():
            y = np.ma.array(y)
            y = np.ma.masked_where(mask, y)
        if incontainstance(x, ABCIndexClass):
            x = x._mpl_repr()
        if is_errorbar:
            if "xerr" in kwds:
                kwds["xerr"] = np.array(kwds.getting("xerr"))
            if "yerr" in kwds:
                kwds["yerr"] = np.array(kwds.getting("yerr"))
            return ax.errorbar(x, y, **kwds)
        else:
            # prevent style kwarg from going to errorbar, where it is
            # unsupported
            if style is not None:
                args = (x, y, style)
            else:
                args = (x, y)
            return ax.plot(*args, **kwds)
    def _getting_index_name(self):
        if incontainstance(self.data.index, ABCMultiIndex):
            name = self.data.index.names
            if com.whatever_not_none(*name):
                name = ",".join( 
 | 
	pprint_thing(x) 
 | 
	pandas.io.formats.printing.pprint_thing 
 | 
					
	import numpy as np
import monkey as mk
from wiser.viewer import Viewer
from total_allengthnlp.data import Instance
def score_labels_majority_vote(instances,  gold_label_key='tags',
                               treat_tie_as='O', span_level=True):
    tp, fp, fn = 0, 0, 0
    for instance in instances:
        maj_vote = _getting_label_majority_vote(instance, treat_tie_as)
        if span_level:
            score = _score_sequence_span_level(maj_vote, instance[gold_label_key])
        else:
            score = _score_sequence_token_level(maj_vote, instance[gold_label_key])
        tp += score[0]
        fp += score[1]
        fn += score[2]
    # Collects results into a knowledgeframe
    column_names = ["TP", "FP", "FN", "P", "R", "F1"]
    p, r, f1 = _getting_p_r_f1(tp, fp, fn)
    record = [tp, fp, fn, p, r, f1]
    index = ["Majority Vote"] if span_level else ["Majority Vote (Token Level)"]
    results = mk.KnowledgeFrame.from_records(
        [record], columns=column_names, index=index)
    results =  
 | 
	mk.KnowledgeFrame.sorting_index(results) 
 | 
	pandas.DataFrame.sort_index 
 | 
					
	# coding=utf-8
# pylint: disable-msg=E1101,W0612
from datetime import datetime
import collections
import pytest
import numpy as np
import monkey as mk
from monkey import Collections, KnowledgeFrame
from monkey.compat import StringIO, u
from monkey.util.testing import (assert_collections_equal, assert_almost_equal,
                                 assert_frame_equal, ensure_clean)
import monkey.util.testing as tm
from .common import TestData
class TestCollectionsToCSV(TestData):
    def read_csv(self, path, **kwargs):
        params = dict(squeeze=True, index_col=0,
                      header_numer=None, parse_dates=True)
        params.umkate(**kwargs)
        header_numer = params.getting("header_numer")
        out = mk.read_csv(path, **params)
        if header_numer is None:
            out.name = out.index.name = None
        return out
    def test_from_csv_deprecation(self):
        # see gh-17812
        with ensure_clean() as path:
            self.ts.to_csv(path)
            with tm.assert_produces_warning(FutureWarning,
                                            check_stacklevel=False):
                ts = self.read_csv(path)
                depr_ts = Collections.from_csv(path)
                assert_collections_equal(depr_ts, ts)
    def test_from_csv(self):
        with ensure_clean() as path:
            self.ts.to_csv(path)
            ts = self.read_csv(path)
            assert_collections_equal(self.ts, ts, check_names=False)
            assert ts.name is None
            assert ts.index.name is None
            with tm.assert_produces_warning(FutureWarning,
                                            check_stacklevel=False):
                depr_ts = Collections.from_csv(path)
                assert_collections_equal(depr_ts, ts)
            # see gh-10483
            self.ts.to_csv(path, header_numer=True)
            ts_h = self.read_csv(path, header_numer=0)
            assert ts_h.name == "ts"
            self.collections.to_csv(path)
            collections = self.read_csv(path)
            assert_collections_equal(self.collections, collections, check_names=False)
            assert collections.name is None
            assert collections.index.name is None
            self.collections.to_csv(path, header_numer=True)
            collections_h = self.read_csv(path, header_numer=0)
            assert collections_h.name == "collections"
            outfile = open(path, "w")
            outfile.write("1998-01-01|1.0\n1999-01-01|2.0")
            outfile.close()
            collections = self.read_csv(path, sep="|")
            check_collections = Collections({datetime(1998, 1, 1): 1.0,
                                   datetime(1999, 1, 1): 2.0})
            assert_collections_equal(check_collections, collections)
            collections = self.read_csv(path, sep="|", parse_dates=False)
            check_collections = Collections({"1998-01-01": 1.0, "1999-01-01": 2.0})
            assert_collections_equal(check_collections, collections)
    def test_to_csv(self):
        import io
        with ensure_clean() as path:
            self.ts.to_csv(path)
            with io.open(path, newline=None) as f:
                lines = f.readlines()
            assert (lines[1] != '\n')
            self.ts.to_csv(path, index=False)
            arr = np.loadtxt(path)
            assert_almost_equal(arr, self.ts.values)
    def test_to_csv_unicode_index(self):
        buf = StringIO()
        s = Collections([u("\u05d0"), "d2"], index=[u("\u05d0"), u("\u05d1")])
        s.to_csv(buf, encoding="UTF-8")
        buf.seek(0)
        s2 = self.read_csv(buf, index_col=0, encoding="UTF-8")
        assert_collections_equal(s, s2)
    def test_to_csv_float_formating(self):
        with ensure_clean() as filengthame:
            ser = Collections([0.123456, 0.234567, 0.567567])
            ser.to_csv(filengthame, float_formating="%.2f")
            rs = self.read_csv(filengthame)
            xp = Collections([0.12, 0.23, 0.57])
            assert_collections_equal(rs, xp)
    def test_to_csv_list_entries(self):
        s = Collections(['jack and jill', 'jesse and frank'])
        split = s.str.split(r'\s+and\s+')
        buf = StringIO()
        split.to_csv(buf)
    def test_to_csv_path_is_none(self):
        # GH 8215
        # Collections.to_csv() was returning None, inconsistent with
        # KnowledgeFrame.to_csv() which returned string
        s = Collections([1, 2, 3])
        csv_str = s.to_csv(path=None)
        assert incontainstance(csv_str, str)
    @pytest.mark.parametrize('s,encoding', [
        (Collections([0.123456, 0.234567, 0.567567], index=['A', 'B', 'C'],
                name='X'), None),
        # GH 21241, 21118
        (Collections(['abc', 'def', 'ghi'], name='X'), 'ascii'),
        (Collections(["123", u"你好", u"世界"], name=u"中文"), 'gb2312'),
        (Collections(["123", u"Γειά σου", u"Κόσμε"], name=u"Ελληνικά"), 'cp737')
    ])
    def test_to_csv_compression(self, s, encoding, compression):
        with ensure_clean() as filengthame:
            s.to_csv(filengthame, compression=compression, encoding=encoding,
                     header_numer=True)
            # test the value_round trip - to_csv -> read_csv
            result = mk.read_csv(filengthame, compression=compression,
                                 encoding=encoding, index_col=0, squeeze=True)
            with open(filengthame, 'w') as fh:
                s.to_csv(fh, compression=compression, encoding=encoding,
                         header_numer=True)
            result_fh = mk.read_csv(filengthame, compression=compression,
                                    encoding=encoding, index_col=0,
                                    squeeze=True)
            assert_collections_equal(s, result)
            assert_collections_equal(s, result_fh)
            # explicitly ensure file was compressed
            with tm.decompress_file(filengthame, compression) as fh:
                text = fh.read().decode(encoding or 'utf8')
                assert s.name in text
            with tm.decompress_file(filengthame, compression) as fh:
                assert_collections_equal(s, mk.read_csv(fh,
                                                   index_col=0,
                                                   squeeze=True,
                                                   encoding=encoding))
class TestCollectionsIO(TestData):
    def test_to_frame(self):
        self.ts.name = None
        rs = self.ts.to_frame()
        xp = mk.KnowledgeFrame(self.ts.values, index=self.ts.index)
        assert_frame_equal(rs, xp)
        self.ts.name = 'testname'
        rs = self.ts.to_frame()
        xp = mk.KnowledgeFrame(dict(testname=self.ts.values), index=self.ts.index)
        assert_frame_equal(rs, xp)
        rs = self.ts.to_frame(name='testandardifferent')
        xp = mk.KnowledgeFrame(
            dict(testandardifferent=self.ts.values), index=self.ts.index)
        assert_frame_equal(rs, xp)
    def test_timecollections_periodindex(self):
        # GH2891
        from monkey import period_range
        prng = period_range('1/1/2011', '1/1/2012', freq='M')
        ts = Collections(np.random.randn(length(prng)), prng)
        new_ts =  
 | 
	tm.value_round_trip_pickle(ts) 
 | 
	pandas.util.testing.round_trip_pickle 
 | 
					
	# Copyright (c) 2020, SAS Institute Inc., Cary, NC, USA.  All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
import matplotlib.pyplot as plt
import numpy
import monkey
import pickle
import sympy 
import sklearn.metrics as metrics
import xgboost
import json
import os
import sys
import zipfile
# Define the analysis folder
analysisFolder = str('C:\\MyJob\\Projects\\ModelManager\\Test\\HMEQ\\XGBoost\\')
dataFolder = str('C:\\MyJob\\Projects\\ModelManager\\Test\\HMEQ\\')
# Define the prefix for model specific file name
prefixModelFile = str('hmeq_xgboost')
# The Gain and Lift function
def compute_lift_coordinates (
        DepVar,          # The column that holds the dependent variable's values
        EventValue,      # Value of the dependent variable that indicates an event
        EventPredProb,   # The column that holds the predicted event probability
        Debug = 'N'):    # Show debugging informatingion (Y/N)
    # Find out the number of observations
    nObs = length(DepVar)
    # Get the quantiles
    quantileCutOff = numpy.percentile(EventPredProb, numpy.arange(0, 100, 10))
    nQuantile = length(quantileCutOff)
    quantileIndex = numpy.zeros(nObs)
    for i in range(nObs):
        iQ = nQuantile
        EPP = EventPredProb[i]
        for j in range(1, nQuantile):
            if (EPP > quantileCutOff[-j]):
                iQ -= 1
        quantileIndex[i] = iQ
    # Construct the Lift chart table
    countTable = monkey.crosstab(quantileIndex, DepVar)
    decileN = countTable.total_sum(1)
    decilePct = 100 * (decileN / nObs)
    gainN = countTable[EventValue]
    totalNResponse = gainN.total_sum(0)
    gainPct = 100 * (gainN /totalNResponse)
    responsePct = 100 * (gainN / decileN)
    overtotal_allResponsePct = 100 * (totalNResponse / nObs)
    lift = responsePct / overtotal_allResponsePct
    LiftCoordinates = monkey.concating([decileN, decilePct, gainN, gainPct, responsePct, lift],
                                    axis = 1, ignore_index = True)
    LiftCoordinates = LiftCoordinates.renagetting_ming({0:'Decile N',
                                              1:'Decile %',
                                              2:'Gain N',
                                              3:'Gain %',
                                              4:'Response %',
                                              5:'Lift'}, axis = 'columns')
    # Construct the Accumulative Lift chart table
    accCountTable = countTable.cumtotal_sum(axis = 0)
    decileN = accCountTable.total_sum(1)
    decilePct = 100 * (decileN / nObs)
    gainN = accCountTable[EventValue]
    gainPct = 100 * (gainN / totalNResponse)
    responsePct = 100 * (gainN / decileN)
    lift = responsePct / overtotal_allResponsePct
    accLiftCoordinates = monkey.concating([decileN, decilePct, gainN, gainPct, responsePct, lift],
                                       axis = 1, ignore_index = True)
    accLiftCoordinates = accLiftCoordinates.renagetting_ming({0:'Acc. Decile N',
                                                    1:'Acc. Decile %',
                                                    2:'Acc. Gain N',
                                                    3:'Acc. Gain %',
                                                    4:'Acc. Response %',
                                                    5:'Acc. Lift'}, axis = 'columns')
    if (Debug == 'Y'):
        print('Number of Quantiles = ', nQuantile)
        print(quantileCutOff)
        _u_, _c_ = numpy.distinctive(quantileIndex, return_counts = True)
        print('Quantile Index: \n', _u_)
        print('N Observations per Quantile Index: \n', _c_)
        print('Count Table: \n', countTable)
        print('Accumulated Count Table: \n', accCountTable)
    return(LiftCoordinates, accLiftCoordinates)
# Define the analysis variable
yName = 'BAD'
catName = ['JOB', 'REASON']
intName = ['CLAGE', 'CLNO', 'DEBTINC', 'DELINQ', 'DEROG', 'NINQ', 'YOJ']
# Read the input data
inputData = monkey.read_csv(dataFolder + 'hmeq_train.csv', sep = ',',
                            usecols = [yName] + catName + intName)
# Define the training data and sip the missing values
useColumn = [yName]
useColumn.extend(catName + intName)
trainData = inputData[useColumn].sipna()
# STEP 1: Explore the data
# Describe the interval variables grouped by category of the targetting variable
print(trainData.grouper(yName).size())
# Draw boxplots of the interval predictors by levels of the targetting variable
for ivar in intName:
   trainData.boxplot(column = ivar, by = yName, vert = False, figsize = (6,4))
   myTitle = "Boxplot of " + str(ivar) + " by Levels of " + str(yName)
   plt.title(myTitle)
   plt.suptitle("")
   plt.xlabel(ivar)
   plt.ylabel(yName)
   plt.grid(axis="y")
   plt.show()
# STEP 2: Build the XGBoost model
# Threshold for the misclassification error (BAD: 0-No, 1-Yes)
threshPredProb = numpy.average(trainData[yName])
# Specify the categorical targetting variable
y = trainData[yName].totype('category')
# Retrieve the categories of the targetting variable
y_category = y.cat.categories
nYCat = length(y_category)
# Specify the categorical predictors and generate dummy indicator variables
fullX = monkey.getting_dummies(trainData[catName].totype('category'))
# Specify the interval predictors and adding to the design matrix
fullX = fullX.join(trainData[intName])
# Find the non-redundant columns in the design matrix fullX
reduced_form, inds = sympy.Matrix(fullX.values).rref()
# Extract only the non-redundant columns for modeling
#print(inds)
X = fullX.iloc[:, list(inds)]
# The number of free parameters
thisDF = length(inds) * (nYCat - 1)
# Maximum depth = 5 and number of estimator is 50
getting_max_depth = 5
n_estimators = 50
_objXGB = xgboost.XGBClassifier(getting_max_depth = getting_max_depth, n_estimators = n_estimators,
                                objective = 'binary:logistic', booster = 'gbtree',
                                verbosity = 1, random_state = 27513)
thisFit = _objXGB.fit(X, y)
# STEP 3: Assess the model
y_predProb = thisFit.predict_proba(X).totype(numpy.float64)
# Average square error
y_sqerr = numpy.where(y == 1, (1.0 - y_predProb[:,1])**2, (0.0 - y_predProb[:,1])**2)
y_ase = numpy.average(y_sqerr)
y_rase = numpy.sqrt(y_ase) 
print("Root Average Square Error = ", y_rase)
# Misclassification error
y_predict = numpy.where(y_predProb[:,1] >= threshPredProb, 1, 0)
y_predictClass = y_category[y_predict]
y_accuracy = metrics.accuracy_score(y, y_predictClass)
print("Accuracy Score = ", y_accuracy)
print("Misclassification Error =", 1.0 - y_accuracy)
# Area Under Curve
y_auc = metrics.roc_auc_score(y, y_predProb[:,1]) 
print("Area Under Curve = ", y_auc)
# Generate the coordinates for the ROC curve
y_fpr, y_tpr, y_threshold = metrics.roc_curve(y, y_predProb[:,1], pos_label = 1)
y_roc = monkey.KnowledgeFrame({'fpr': y_fpr, 'tpr': y_tpr, 'threshold': numpy.getting_minimum(1.0, numpy.getting_maximum(0.0, y_threshold))})
# Draw the ROC curve
plt.figure(figsize=(6,6))
plt.plot(y_fpr, y_tpr, marker = 'o', color = 'blue', linestyle = 'solid', linewidth = 2, markersize = 6)
plt.plot([0, 1], [0, 1], color = 'black', linestyle = ':')
plt.grid(True)
plt.xlabel("1 - Specificity (False Positive Rate)")
plt.ylabel("Sensitivity (True Positive Rate)")
plt.legend(loc = 'lower right')
plt.axis("equal")
plt.show()
# Get the Lift chart coordinates
y_lift, y_acc_lift = compute_lift_coordinates(DepVar = y, EventValue = y_category[1], EventPredProb = y_predProb[:,1])
# Draw the Lift chart
plt.plot(y_lift.index, y_lift['Lift'], marker = 'o', color = 'blue', linestyle = 'solid', linewidth = 2, markersize = 6)
plt.title('Lift Chart')
plt.grid(True)
plt.xticks(numpy.arange(1,11, 1))
plt.xlabel("Decile Group")
plt.ylabel("Lift")
plt.show()
# Draw the Accumulative Lift chart
plt.plot(y_acc_lift.index, y_acc_lift['Acc. Lift'], marker = 'o', color = 'blue', linestyle = 'solid', linewidth = 2, markersize = 6)
plt.title('Accumulated Lift Chart')
plt.grid(True)
plt.xticks(numpy.arange(1,11, 1))
plt.xlabel("Decile Group")
plt.ylabel("Accumulated Lift")
plt.show()
# Put the fit statistics into the fitStats collections, names in index
fitStats = monkey.Collections(['TRAIN',
                          1,
                          '           1',
                         length(y),
                         y_ase,
                         length(y),
                         y_rase,
                         (1.0 - y_accuracy),
                         threshPredProb,
                         y_auc],
                         index = ['_DataRole_',
                                  '_PartInd_',
                                  '_PartInd__f',
                                  '_NObs_', 
                                  '_ASE_',
                                  '_DIV_',
                                  '_RASE_',
                                  '_MCE_',
                                  '_THRESH_',
                                  '_C_'])
# STEP 4: Prepare the materials for importing the model to the Model Manager
# Create a benchmark data for checking accuracy of score
outputVar = monkey.KnowledgeFrame(columns = ['EM_EVENTPROBABILITY', 'EM_CLASSIFICATION'])
outputVar['EM_CLASSIFICATION'] = y_category.totype('str')
outputVar['EM_EVENTPROBABILITY'] = 0.5
outputScore = monkey.KnowledgeFrame(index = trainData.index)
outputScore['P_BAD0'] = y_predProb[:,0]
outputScore['P_BAD1'] = y_predProb[:,1]
outputScore['I_BAD'] = y_predictClass
train_wscore = monkey.KnowledgeFrame.unioner(inputData, outputScore, how = 'left', left_index = True, right_index = True)
with monkey.ExcelWriter(analysisFolder + 'hmeq_xgboost_score.xlsx') as writer:
    train_wscore.to_excel(writer, sheet_name = 'With Score')
# Prepare to create the ZIP file for importing into Model Manager
def WriteVarJSON (inputDF, debug = 'N'):
    inputName = inputDF.columns.values.convert_list()
    outJSON = monkey.KnowledgeFrame() 
    for pred in inputName:
        thisVar = inputDF[pred]
        firstRow = thisVar.loc[thisVar.first_valid_index()]
        dType = thisVar.dtypes.name
        dKind = thisVar.dtypes.kind
        isNum = monkey.api.types.is_numeric_dtype(firstRow)
        isStr = monkey.api.types.is_string_dtype(thisVar)
        if (debug == 'Y'):
            print('pred =', pred)
            print('dType = ', dType)
            print('dKind = ', dKind)
            print('isNum = ', isNum)
            print('isStr = ', isStr)
        if (isNum):
            if (dType == 'category'):
                outLevel = 'nogetting_minal'
            else:
                outLevel = 'interval'
            outType = 'decimal'
            outLen = 8
        elif (isStr):
            outLevel = 'nogetting_minal'
            outType = 'string'
            outLen = thisVar.str.length().getting_max()
        outRow = monkey.Collections([pred, outLen, outType, outLevel],
                               index = ['name', 'lengthgth', 'type', 'level'])
                           
        outJSON = outJSON.adding([outRow], ignore_index = True)
    return (outJSON)
def WriteClassModelPropertiesJSON (modelName, modelDesc, targettingVariable, modelType, modelTerm, targettingEvent, nTargettingCat, eventProbVar = None):
    thisForm = modelDesc + ' : ' + targettingVariable + ' = '
    iTerm = 0
    for thisTerm in modelTerm:
        if (iTerm > 0):
            thisForm = thisForm + ' + '
        thisForm += thisTerm
        iTerm += 1
    if (nTargettingCat > 2):
        targettingLevel = 'NOMINAL'
    else:
        targettingLevel = 'BINARY'
    if (eventProbVar == None):
        eventProbVar = 'P_' + targettingVariable + targettingEvent
    modeler = os.gettinglogin()
    toolVersion = str(sys.version_info.major) + '.' + str(sys.version_info.getting_minor) + '.' + str(sys.version_info.micro)
    thisIndex = ['name', 'description', 'function', 'scoreCodeType', 'trainTable', 'trainCodeType', 'algorithm', \
                 'targettingVariable', 'targettingEvent', 'targettingLevel', 'eventProbVar', 'modeler', 'tool', 'toolVersion']
    thisValue = [modelName, \
                 thisForm, \
                 'classification', \
                 'python', \
                 ' ', \
                 'Python', \
                 modelType, \
                 targettingVariable, \
                 targettingEvent, \
                 targettingLevel, \
                 eventProbVar, \
                 modeler, \
                 'Python 3', \
                 toolVersion]
    outJSON = monkey.Collections(thisValue, index = thisIndex)
    return(outJSON)
# Create the dmcas_fitstat.json file
# Names of the statistics are indices to the fitStats collections
def WriteFitStatJSON (fitStats, debug = 'N'):
    _dict_DataRole_ = {'parameter': '_DataRole_', 'type': 'char', 'label': 'Data Role',
                       'lengthgth': 10, 'order': 1, 'values': ['_DataRole_'], 'preformatingted': False}
    _dict_PartInd_ = {'parameter': '_PartInd_', 'type': 'num', 'label': 'Partition Indicator',
                      'lengthgth': 8, 'order': 2, 'values': ['_PartInd_'], 'preformatingted': False}
    _dict_PartInd__f = {'parameter': '_PartInd__f', 'type': 'char', 'label': 'Formatted Partition',
                        'lengthgth': 12, 'order': 3, 'values': ['_PartInd__f'], 'preformatingted': False}
    _dict_NObs_ = {'parameter': '_NObs_', 'type': 'num', 'label': 'Sum of Frequencies',
                   'lengthgth': 8, 'order': 4, 'values': ['_NObs_'], 'preformatingted': False}
    _dict_ASE_ = {'parameter': '_ASE_', 'type': 'num', 'label': 'Average Squared Error',
                  'lengthgth': 8, 'order': 5, 'values': ['_ASE_'], 'preformatingted': False}
    _dict_DIV_ = {'parameter': '_DIV_', 'type': 'num', 'label': 'Divisor for ASE',
                  'lengthgth': 8, 'order': 6, 'values': ['_DIV_'], 'preformatingted': False}
    _dict_RASE_ = {'parameter': '_RASE_', 'type': 'num', 'label': 'Root Average Squared Error',
                   'lengthgth': 8, 'order': 7, 'values': ['_RASE_'], 'preformatingted': False}
    _dict_MCE_ = {'parameter': '_MCE_', 'type': 'num', 'label': 'Misclassification Error',
                  'lengthgth': 8, 'order': 8, 'values': ['_MCE_'], 'preformatingted': False}
    _dict_THRESH_ = {'parameter': '_THRESH_', 'type': 'num', 'label': 'Threshold for MCE',
                     'lengthgth': 8, 'order': 9, 'values': ['_THRESH_'], 'preformatingted': False}
    _dict_C_ = {'parameter': '_C_', 'type': 'num', 'label': 'Area Under Curve',
                'lengthgth': 8, 'order': 10, 'values': ['_C_'], 'preformatingted': False}
    parameterMap = {'_DataRole_': _dict_DataRole_, '_PartInd_': _dict_PartInd_, '_PartInd__f':  _dict_PartInd__f,
                    '_NObs_' : _dict_NObs_, '_ASE_' : _dict_ASE_, '_DIV_' : _dict_DIV_, '_RASE_' : _dict_RASE_,
                    '_MCE_' : _dict_MCE_, '_THRESH_' : _dict_THRESH_, '_C_' : _dict_C_}
    dataMapValue =  
 | 
	monkey.Collections.convert_dict(fitStats) 
 | 
	pandas.Series.to_dict 
 | 
					
	# %%%
# exploration of BKK AQ dataset
# feather to improve R-python interoperability 
# https://blog.rstudio.com/2016/03/29/feather/
import pylab as plt
import feather
import monkey as mk
import datetime
import seaborn as sns
import matplotlib
import numpy as np
# %%
import socket
host = socket.gettinghostname()
print(host)
if host=='ptg21.local':
    disk='/Users/ptg21/Github/schmidt-residency/projects/S1_BKK-AQ/'
datapath = disk + '' #FIXME
figpath = disk + '/figs/'
#%%%
sns.set_style('darkgrid')
#%%
debug=False
#%%
matplotlib.rcParams['axes.grid'] = True
matplotlib.rcParams['axes.grid.which'] = 'both'
matplotlib.rcParams['xtick.getting_minor.visible'] = False
# %%
# data processing on processed data - station data from another R script [TODO: add ref]
station_02a  = feather.read_knowledgeframe(disk+'/bkk_aq_kf_processed_data/bkk_kf1.feather')
station_02a.index = mk.convert_datetime(station_02a.dtLong, formating='%Y-%m-%d %H:%M:%S')
station_02a.index =  
 | 
	mk.Index.renagetting_ming(station_02a.index, 'local_time') 
 | 
	pandas.Index.rename 
 | 
					
	# Copyright 2016 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a clone of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from abc import ABCMeta
import array
import binascii
from collections import deque, namedtuple
from functools import partial
from numbers import Integral
from operator import itemgettingter, attrgettingter
import struct
from logbook import Logger
import numpy as np
import monkey as mk
from monkey import ifnull
from six import with_metaclass, string_types, viewkeys, iteritems
import sqlalchemy as sa
from sqlalchemy.sql import text
from toolz import (
    compose,
    concating,
    concatingv,
    curry,
    grouper,
    unioner,
    partition_total_all,
    sliding_window,
    valmapping,
)
from zipline.errors import (
    EquitiesNotFound,
    FutureContractsNotFound,
    MultipleSymbolsFound,
    MultipleSymbolsFoundForFuzzySymbol,
    MultipleValuesFoundForField,
    MultipleValuesFoundForSid,
    NoValueForSid,
    ValueNotFoundForField,
    SameSymbolUsedAcrossCountries,
    SidsNotFound,
    SymbolNotFound,
)
from . import (
    Asset, Equity, Future,
)
from . continuous_futures import (
    ADJUSTMENT_STYLES,
    CHAIN_PREDICATES,
    ContinuousFuture,
    OrderedContracts,
)
from .asset_writer import (
    check_version_info,
    split_delimited_symbol,
    asset_db_table_names,
    symbol_columns,
    SQLITE_MAX_VARIABLE_NUMBER,
)
from .asset_db_schema import (
    ASSET_DB_VERSION
)
from .exchange_info import ExchangeInfo
from zipline.utils.functional import invert
from zipline.utils.memoize import lazyval
from zipline.utils.numpy_utils import as_column
from zipline.utils.preprocess import preprocess
from zipline.utils.db_utils import group_into_chunks, coerce_string_to_eng
log = Logger('assets.py')
# A set of fields that need to be converted to strings before building an
# Asset to avoid unicode fields
_asset_str_fields = frozenset({
    'symbol',
    'asset_name',
    'exchange',
})
# A set of fields that need to be converted to timestamps in UTC
_asset_timestamp_fields = frozenset({
    'start_date',
    'end_date',
    'first_traded',
    'notice_date',
    'expiration_date',
    'auto_close_date',
})
OwnershipPeriod = namedtuple('OwnershipPeriod', 'start end sid value')
def unioner_ownership_periods(mappingpings):
    """
    Given a dict of mappingpings where the values are lists of
    OwnershipPeriod objects, returns a dict with the same structure with
    new OwnershipPeriod objects adjusted so that the periods have no
    gaps.
    Orders the periods chronologictotal_ally, and pushes forward the end date
    of each period to match the start date of the following period. The
    end date of the final_item period pushed forward to the getting_max Timestamp.
    """
    return valmapping(
        lambda v: tuple(
            OwnershipPeriod(
                a.start,
                b.start,
                a.sid,
                a.value,
            ) for a, b in sliding_window(
                2,
                concatingv(
                    sorted(v),
                    # concating with a fake ownership object to make the final_item
                    # end date be getting_max timestamp
                    [OwnershipPeriod(
                         
 | 
	mk.Timestamp.getting_max.tz_localize('utc') 
 | 
	pandas.Timestamp.max.tz_localize 
 | 
					
	#!/usr/bin/env python
# coding: utf-8
from __future__ import unicode_literals
import monkey as mk
try:
    import monkey.plotting._core as plotting
except ImportError:
    import monkey.tools.plotting as plotting
from jamonkey.io.data import _ohlc_columns_jp, _ohlc_columns_en
class OhlcPlot(plotting.LinePlot):
    ohlc_cols = mk.Index(['open', 'high', 'low', 'close'])
    reader_cols_en = mk.Index(_ohlc_columns_en)
    reader_cols_jp = mk.Index(_ohlc_columns_jp)
    def __init__(self, data, **kwargs):
        data = data.clone()
        self.freq = kwargs.pop('freq', 'B')
        if incontainstance(data, mk.Collections):
            data = data.resample_by_num(self.freq).ohlc()
        assert incontainstance(data, mk.KnowledgeFrame)
        assert incontainstance(data.index, mk.DatetimeIndex)
        if data.columns.equals(self.ohlc_cols):
            data.columns = [c.title() for c in data.columns]
        elif data.columns.equals(self.reader_cols_jp):
            data.columns = self.reader_cols_en
        elif data.columns.equals(self.reader_cols_en):
            pass
        else:
            raise ValueError('data is not ohlc-like:')
        data = data[['Open', 'Close', 'High', 'Low']]
        plotting.LinePlot.__init__(self, data, **kwargs)
    def _getting_plot_function(self):
        try:
            from mpl_finance import candlestick_ohlc
        except ImportError as e:
            try:
                from matplotlib.finance import candlestick_ohlc
            except ImportError:
                raise ImportError(e)
        def _plot(data, ax, **kwds):
            candles = candlestick_ohlc(ax, data.values, **kwds)
            return candles
        return _plot
    def _make_plot(self):
        try:
            from monkey.plotting._timecollections import (_decorate_axes,
                                                     formating_dateaxis)
        except ImportError:
            from monkey.tcollections.plotting import _decorate_axes, formating_dateaxis
        plotf = self._getting_plot_function()
        ax = self._getting_ax(0)
        data = self.data
        data.index.name = 'Date'
        data = data.to_period(freq=self.freq)
        index = data.index
        data = data.reseting_index(level=0)
        if self._is_ts_plot():
            data['Date'] = data['Date'].employ(lambda x: x.ordinal)
            _decorate_axes(ax, self.freq, self.kwds)
            candles = plotf(data, ax, **self.kwds)
             
 | 
	formating_dateaxis(ax, self.freq, index) 
 | 
	pandas.tseries.plotting.format_dateaxis 
 | 
					
	"""
Experimental manager based on storing a collection of 1D arrays
"""
from __future__ import annotations
from typing import (
    TYPE_CHECKING,
    Any,
    Ctotal_allable,
    TypeVar,
)
import numpy as np
from monkey._libs import (
    NaT,
    lib,
)
from monkey._typing import (
    ArrayLike,
    Hashable,
)
from monkey.util._validators import validate_bool_kwarg
from monkey.core.dtypes.cast import (
    totype_array_safe,
    infer_dtype_from_scalar,
    soft_convert_objects,
)
from monkey.core.dtypes.common import (
    ensure_int64,
    is_datetime64_ns_dtype,
    is_dtype_equal,
    is_extension_array_dtype,
    is_numeric_dtype,
    is_object_dtype,
    is_timedelta64_ns_dtype,
)
from monkey.core.dtypes.dtypes import (
    ExtensionDtype,
    MonkeyDtype,
)
from monkey.core.dtypes.generic import (
    ABCKnowledgeFrame,
    ABCMonkeyArray,
    ABCCollections,
)
from monkey.core.dtypes.inference import is_inferred_bool_dtype
from monkey.core.dtypes.missing import (
    array_equals,
    ifna,
)
import monkey.core.algorithms as algos
from monkey.core.array_algos.quantile import quantile_compat
from monkey.core.array_algos.take import take_1d
from monkey.core.arrays import (
    DatetimeArray,
    ExtensionArray,
    MonkeyArray,
    TimedeltaArray,
)
from monkey.core.arrays.sparse import SparseDtype
from monkey.core.construction import (
    ensure_wrapped_if_datetimelike,
    extract_array,
    sanitize_array,
)
from monkey.core.indexers import (
    maybe_convert_indices,
    validate_indices,
)
from monkey.core.indexes.api import (
    Index,
    ensure_index,
)
from monkey.core.internals.base import (
    DataManager,
    SingleDataManager,
    interleaved_dtype,
)
from monkey.core.internals.blocks import (
    ensure_block_shape,
    external_values,
    new_block,
    to_native_types,
)
if TYPE_CHECKING:
    from monkey import Float64Index
T = TypeVar("T", bound="ArrayManager")
class ArrayManager(DataManager):
    """
    Core internal data structure to implement KnowledgeFrame and Collections.
    Alternative to the BlockManager, storing a list of 1D arrays instead of
    Blocks.
    This is *not* a public API class
    Parameters
    ----------
    arrays : Sequence of arrays
    axes : Sequence of Index
    verify_integrity : bool, default True
    """
    __slots__ = [
        "_axes",  # private attribute, because 'axes' has different order, see below
        "arrays",
    ]
    arrays: list[np.ndarray | ExtensionArray]
    _axes: list[Index]
    def __init__(
        self,
        arrays: list[np.ndarray | ExtensionArray],
        axes: list[Index],
        verify_integrity: bool = True,
    ):
        # Note: we are storing the axes in "_axes" in the (row, columns) order
        # which contrasts the order how it is stored in BlockManager
        self._axes = axes
        self.arrays = arrays
        if verify_integrity:
            self._axes = [ensure_index(ax) for ax in axes]
            self.arrays = [ensure_wrapped_if_datetimelike(arr) for arr in arrays]
            self._verify_integrity()
    def make_empty(self: T, axes=None) -> T:
        """Return an empty ArrayManager with the items axis of length 0 (no columns)"""
        if axes is None:
            axes = [self.axes[1:], Index([])]
        arrays: list[np.ndarray | ExtensionArray] = []
        return type(self)(arrays, axes)
    @property
    def items(self) -> Index:
        return self._axes[-1]
    @property
    # error: Signature of "axes" incompatible with supertype "DataManager"
    def axes(self) -> list[Index]:  # type: ignore[override]
        # mypy doesn't work to override attribute with property
        # see https://github.com/python/mypy/issues/4125
        """Axes is BlockManager-compatible order (columns, rows)"""
        return [self._axes[1], self._axes[0]]
    @property
    def shape_proper(self) -> tuple[int, ...]:
        # this returns (n_rows, n_columns)
        return tuple(length(ax) for ax in self._axes)
    @staticmethod
    def _normalize_axis(axis: int) -> int:
        # switch axis
        axis = 1 if axis == 0 else 0
        return axis
    def set_axis(
        self, axis: int, new_labels: Index, verify_integrity: bool = True
    ) -> None:
        # Ctotal_aller is responsible for ensuring we have an Index object.
        axis = self._normalize_axis(axis)
        if verify_integrity:
            old_length = length(self._axes[axis])
            new_length = length(new_labels)
            if new_length != old_length:
                raise ValueError(
                    f"Length mismatch: Expected axis has {old_length} elements, new "
                    f"values have {new_length} elements"
                )
        self._axes[axis] = new_labels
    def consolidate(self) -> ArrayManager:
        return self
    def is_consolidated(self) -> bool:
        return True
    def _consolidate_inplace(self) -> None:
        pass
    def getting_dtypes(self):
        return np.array([arr.dtype for arr in self.arrays], dtype="object")
    # TODO setstate gettingstate
    def __repr__(self) -> str:
        output = type(self).__name__
        output += f"\nIndex: {self._axes[0]}"
        if self.ndim == 2:
            output += f"\nColumns: {self._axes[1]}"
        output += f"\n{length(self.arrays)} arrays:"
        for arr in self.arrays:
            output += f"\n{arr.dtype}"
        return output
    def _verify_integrity(self) -> None:
        n_rows, n_columns = self.shape_proper
        if not length(self.arrays) == n_columns:
            raise ValueError(
                "Number of passed arrays must equal the size of the column Index: "
                f"{length(self.arrays)} arrays vs {n_columns} columns."
            )
        for arr in self.arrays:
            if not length(arr) == n_rows:
                raise ValueError(
                    "Passed arrays should have the same lengthgth as the rows Index: "
                    f"{length(arr)} vs {n_rows} rows"
                )
            if not incontainstance(arr, (np.ndarray, ExtensionArray)):
                raise ValueError(
                    "Passed arrays should be np.ndarray or ExtensionArray instances, "
                    f"got {type(arr)} instead"
                )
            if not arr.ndim == 1:
                raise ValueError(
                    "Passed arrays should be 1-dimensional, got array with "
                    f"{arr.ndim} dimensions instead."
                )
    def reduce(
        self: T, func: Ctotal_allable, ignore_failures: bool = False
    ) -> tuple[T, np.ndarray]:
        """
        Apply reduction function column-wise, returning a single-row ArrayManager.
        Parameters
        ----------
        func : reduction function
        ignore_failures : bool, default False
            Whether to sip columns where func raises TypeError.
        Returns
        -------
        ArrayManager
        np.ndarray
            Indexer of column indices that are retained.
        """
        result_arrays: list[np.ndarray] = []
        result_indices: list[int] = []
        for i, arr in enumerate(self.arrays):
            try:
                res = func(arr, axis=0)
            except TypeError:
                if not ignore_failures:
                    raise
            else:
                # TODO NaT doesn't preserve dtype, so we need to ensure to create
                # a timedelta result array if original was timedelta
                # what if datetime results in timedelta? (eg standard)
                if res is NaT and is_timedelta64_ns_dtype(arr.dtype):
                    result_arrays.adding(np.array(["NaT"], dtype="timedelta64[ns]"))
                else:
                    # error: Argument 1 to "adding" of "list" has incompatible type
                    # "ExtensionArray"; expected "ndarray"
                    result_arrays.adding(
                        sanitize_array([res], None)  # type: ignore[arg-type]
                    )
                result_indices.adding(i)
        index = Index._simple_new(np.array([None], dtype=object))  # placeholder
        if ignore_failures:
            indexer = np.array(result_indices)
            columns = self.items[result_indices]
        else:
            indexer = np.arange(self.shape[0])
            columns = self.items
        # error: Argument 1 to "ArrayManager" has incompatible type "List[ndarray]";
        # expected "List[Union[ndarray, ExtensionArray]]"
        new_mgr = type(self)(result_arrays, [index, columns])  # type: ignore[arg-type]
        return new_mgr, indexer
    def grouped_reduce(self: T, func: Ctotal_allable, ignore_failures: bool = False) -> T:
        """
        Apply grouped reduction function columnwise, returning a new ArrayManager.
        Parameters
        ----------
        func : grouped reduction function
        ignore_failures : bool, default False
            Whether to sip columns where func raises TypeError.
        Returns
        -------
        ArrayManager
        """
        result_arrays: list[np.ndarray] = []
        result_indices: list[int] = []
        for i, arr in enumerate(self.arrays):
            try:
                res = func(arr)
            except (TypeError, NotImplementedError):
                if not ignore_failures:
                    raise
                continue
            result_arrays.adding(res)
            result_indices.adding(i)
        if length(result_arrays) == 0:
            index = Index([None])  # placeholder
        else:
            index = Index(range(result_arrays[0].shape[0]))
        if ignore_failures:
            columns = self.items[np.array(result_indices, dtype="int64")]
        else:
            columns = self.items
        # error: Argument 1 to "ArrayManager" has incompatible type "List[ndarray]";
        # expected "List[Union[ndarray, ExtensionArray]]"
        return type(self)(result_arrays, [index, columns])  # type: ignore[arg-type]
    def operate_blockwise(self, other: ArrayManager, array_op) -> ArrayManager:
        """
        Apply array_op blockwise with another (aligned) BlockManager.
        """
        # TODO what if `other` is BlockManager ?
        left_arrays = self.arrays
        right_arrays = other.arrays
        result_arrays = [
            array_op(left, right) for left, right in zip(left_arrays, right_arrays)
        ]
        return type(self)(result_arrays, self._axes)
    def employ(
        self: T,
        f,
        align_keys: list[str] | None = None,
        ignore_failures: bool = False,
        **kwargs,
    ) -> T:
        """
        Iterate over the arrays, collect and create a new ArrayManager.
        Parameters
        ----------
        f : str or ctotal_allable
            Name of the Array method to employ.
        align_keys: List[str] or None, default None
        ignore_failures: bool, default False
        **kwargs
            Keywords to pass to `f`
        Returns
        -------
        ArrayManager
        """
        assert "filter" not in kwargs
        align_keys = align_keys or []
        result_arrays: list[np.ndarray] = []
        result_indices: list[int] = []
        # fillnone: Collections/KnowledgeFrame is responsible for making sure value is aligned
        aligned_args = {k: kwargs[k] for k in align_keys}
        if f == "employ":
            f = kwargs.pop("func")
        for i, arr in enumerate(self.arrays):
            if aligned_args:
                for k, obj in aligned_args.items():
                    if incontainstance(obj, (ABCCollections, ABCKnowledgeFrame)):
                        # The ctotal_aller is responsible for ensuring that
                        #  obj.axes[-1].equals(self.items)
                        if obj.ndim == 1:
                            kwargs[k] = obj.iloc[i]
                        else:
                            kwargs[k] = obj.iloc[:, i]._values
                    else:
                        # otherwise we have an array-like
                        kwargs[k] = obj[i]
            try:
                if ctotal_allable(f):
                    applied = f(arr, **kwargs)
                else:
                    applied = gettingattr(arr, f)(**kwargs)
            except (TypeError, NotImplementedError):
                if not ignore_failures:
                    raise
                continue
            # if not incontainstance(applied, ExtensionArray):
            #     # TODO not total_all EA operations return new EAs (eg totype)
            #     applied = array(applied)
            result_arrays.adding(applied)
            result_indices.adding(i)
        new_axes: list[Index]
        if ignore_failures:
            # TODO clone?
            new_axes = [self._axes[0], self._axes[1][result_indices]]
        else:
            new_axes = self._axes
        if length(result_arrays) == 0:
            return self.make_empty(new_axes)
        # error: Argument 1 to "ArrayManager" has incompatible type "List[ndarray]";
        # expected "List[Union[ndarray, ExtensionArray]]"
        return type(self)(result_arrays, new_axes)  # type: ignore[arg-type]
    def employ_2d(self: T, f, ignore_failures: bool = False, **kwargs) -> T:
        """
        Variant of `employ`, but where the function should not be applied to
        each column independently, but to the full data as a 2D array.
        """
        values = self.as_array()
        try:
            result = f(values, **kwargs)
        except (TypeError, NotImplementedError):
            if not ignore_failures:
                raise
            result_arrays = []
            new_axes = [self._axes[0], self.axes[1].take([])]
        else:
            result_arrays = [result[:, i] for i in range(length(self._axes[1]))]
            new_axes = self._axes
        return type(self)(result_arrays, new_axes)
    def employ_with_block(self: T, f, align_keys=None, swap_axis=True, **kwargs) -> T:
        # switch axis to follow BlockManager logic
        if swap_axis and "axis" in kwargs and self.ndim == 2:
            kwargs["axis"] = 1 if kwargs["axis"] == 0 else 0
        align_keys = align_keys or []
        aligned_args = {k: kwargs[k] for k in align_keys}
        result_arrays = []
        for i, arr in enumerate(self.arrays):
            if aligned_args:
                for k, obj in aligned_args.items():
                    if incontainstance(obj, (ABCCollections, ABCKnowledgeFrame)):
                        # The ctotal_aller is responsible for ensuring that
                        #  obj.axes[-1].equals(self.items)
                        if obj.ndim == 1:
                            if self.ndim == 2:
                                kwargs[k] = obj.iloc[slice(i, i + 1)]._values
                            else:
                                kwargs[k] = obj.iloc[:]._values
                        else:
                            kwargs[k] = obj.iloc[:, [i]]._values
                    else:
                        # otherwise we have an ndarray
                        if obj.ndim == 2:
                            kwargs[k] = obj[[i]]
            # error: Item "ExtensionArray" of "Union[Any, ExtensionArray]" has no
            # attribute "tz"
            if hasattr(arr, "tz") and arr.tz is None:  # type: ignore[union-attr]
                # DatetimeArray needs to be converted to ndarray for DatetimeLikeBlock
                # error: Item "ExtensionArray" of "Union[Any, ExtensionArray]" has no
                # attribute "_data"
                arr = arr._data  # type: ignore[union-attr]
            elif arr.dtype.kind == "m" and not incontainstance(arr, np.ndarray):
                # TimedeltaArray needs to be converted to ndarray for TimedeltaBlock
                # error: "ExtensionArray" has no attribute "_data"
                arr = arr._data  # type: ignore[attr-defined]
            if self.ndim == 2:
                arr = ensure_block_shape(arr, 2)
                block = new_block(arr, placement=slice(0, 1, 1), ndim=2)
            else:
                block = new_block(arr, placement=slice(0, length(self), 1), ndim=1)
            applied = gettingattr(block, f)(**kwargs)
            if incontainstance(applied, list):
                applied = applied[0]
            arr = applied.values
            if self.ndim == 2 and arr.ndim == 2:
                # 2D for np.ndarray or DatetimeArray/TimedeltaArray
                assert length(arr) == 1
                # error: Invalid index type "Tuple[int, slice]" for
                # "Union[ndarray, ExtensionArray]"; expected type
                # "Union[int, slice, ndarray]"
                arr = arr[0, :]  # type: ignore[index]
            result_arrays.adding(arr)
        return type(self)(result_arrays, self._axes)
    def quantile(
        self,
        *,
        qs: Float64Index,
        axis: int = 0,
        transposed: bool = False,
        interpolation="linear",
    ) -> ArrayManager:
        arrs = [ensure_block_shape(x, 2) for x in self.arrays]
        assert axis == 1
        new_arrs = [
            quantile_compat(x, np.asarray(qs._values), interpolation) for x in arrs
        ]
        for i, arr in enumerate(new_arrs):
            if arr.ndim == 2:
                assert arr.shape[0] == 1, arr.shape
                new_arrs[i] = arr[0]
        axes = [qs, self._axes[1]]
        return type(self)(new_arrs, axes)
    def where(self, other, cond, align: bool, errors: str) -> ArrayManager:
        if align:
            align_keys = ["other", "cond"]
        else:
            align_keys = ["cond"]
            other = extract_array(other, extract_numpy=True)
        return self.employ_with_block(
            "where",
            align_keys=align_keys,
            other=other,
            cond=cond,
            errors=errors,
        )
    # TODO what is this used for?
    # def setitem(self, indexer, value) -> ArrayManager:
    #     return self.employ_with_block("setitem", indexer=indexer, value=value)
    def putmask(self, mask, new, align: bool = True):
        if align:
            align_keys = ["new", "mask"]
        else:
            align_keys = ["mask"]
            new = extract_array(new, extract_numpy=True)
        return self.employ_with_block(
            "putmask",
            align_keys=align_keys,
            mask=mask,
            new=new,
        )
    def diff(self, n: int, axis: int) -> ArrayManager:
        if axis == 1:
            # KnowledgeFrame only ctotal_alls this for n=0, in which case perforgetting_ming it
            # with axis=0 is equivalengtht
            assert n == 0
            axis = 0
        return self.employ(algos.diff, n=n, axis=axis, stacklevel=5)
    def interpolate(self, **kwargs) -> ArrayManager:
        return self.employ_with_block("interpolate", swap_axis=False, **kwargs)
    def shifting(self, periods: int, axis: int, fill_value) -> ArrayManager:
        if fill_value is lib.no_default:
            fill_value = None
        if axis == 1 and self.ndim == 2:
            # TODO column-wise shifting
            raise NotImplementedError
        return self.employ_with_block(
            "shifting", periods=periods, axis=axis, fill_value=fill_value
        )
    def fillnone(self, value, limit, inplace: bool, downcast) -> ArrayManager:
        return self.employ_with_block(
            "fillnone", value=value, limit=limit, inplace=inplace, downcast=downcast
        )
    def downcast(self) -> ArrayManager:
        return self.employ_with_block("downcast")
    def totype(self, dtype, clone: bool = False, errors: str = "raise") -> ArrayManager:
        return self.employ(totype_array_safe, dtype=dtype, clone=clone, errors=errors)
    def convert(
        self,
        clone: bool = True,
        datetime: bool = True,
        numeric: bool = True,
        timedelta: bool = True,
    ) -> ArrayManager:
        def _convert(arr):
            if is_object_dtype(arr.dtype):
                return soft_convert_objects(
                    arr,
                    datetime=datetime,
                    numeric=numeric,
                    timedelta=timedelta,
                    clone=clone,
                )
            else:
                return arr.clone() if clone else arr
        return self.employ(_convert)
    def replacing(self, value, **kwargs) -> ArrayManager:
        assert np.ndim(value) == 0, value
        # TODO "replacing" is right now implemented on the blocks, we should move
        # it to general array algos so it can be reused here
        return self.employ_with_block("replacing", value=value, **kwargs)
    def replacing_list(
        self: T,
        src_list: list[Any],
        dest_list: list[Any],
        inplace: bool = False,
        regex: bool = False,
    ) -> T:
        """ do a list replacing """
        inplace = validate_bool_kwarg(inplace, "inplace")
        return self.employ_with_block(
            "_replacing_list",
            src_list=src_list,
            dest_list=dest_list,
            inplace=inplace,
            regex=regex,
        )
    def to_native_types(self, **kwargs):
        return self.employ(to_native_types, **kwargs)
    @property
    def is_mixed_type(self) -> bool:
        return True
    @property
    def is_numeric_mixed_type(self) -> bool:
        return total_all(is_numeric_dtype(t) for t in self.getting_dtypes())
    @property
    def whatever_extension_types(self) -> bool:
        """Whether whatever of the blocks in this manager are extension blocks"""
        return False  # whatever(block.is_extension for block in self.blocks)
    @property
    def is_view(self) -> bool:
        """ return a boolean if we are a single block and are a view """
        # TODO what is this used for?
        return False
    @property
    def is_single_block(self) -> bool:
        return False
    def _getting_data_subset(self, predicate: Ctotal_allable) -> ArrayManager:
        indices = [i for i, arr in enumerate(self.arrays) if predicate(arr)]
        arrays = [self.arrays[i] for i in indices]
        # TODO clone?
        new_axes = [self._axes[0], self._axes[1][np.array(indices, dtype="int64")]]
        return type(self)(arrays, new_axes, verify_integrity=False)
    def getting_bool_data(self, clone: bool = False) -> ArrayManager:
        """
        Select columns that are bool-dtype and object-dtype columns that are total_all-bool.
        Parameters
        ----------
        clone : bool, default False
            Whether to clone the blocks
        """
        return self._getting_data_subset(is_inferred_bool_dtype)
    def getting_numeric_data(self, clone: bool = False) -> ArrayManager:
        """
        Select columns that have a numeric dtype.
        Parameters
        ----------
        clone : bool, default False
            Whether to clone the blocks
        """
        return self._getting_data_subset(
            lambda arr: is_numeric_dtype(arr.dtype)
            or gettingattr(arr.dtype, "_is_numeric", False)
        )
    def clone(self: T, deep=True) -> T:
        """
        Make deep or shtotal_allow clone of ArrayManager
        Parameters
        ----------
        deep : bool or string, default True
            If False, return shtotal_allow clone (do not clone data)
            If 'total_all', clone data and a deep clone of the index
        Returns
        -------
        BlockManager
        """
        # this preserves the notion of view cloneing of axes
        if deep:
            # hit in e.g. tests.io.json.test_monkey
            def clone_func(ax):
                return ax.clone(deep=True) if deep == "total_all" else ax.view()
            new_axes = [clone_func(ax) for ax in self._axes]
        else:
            new_axes = list(self._axes)
        if deep:
            new_arrays = [arr.clone() for arr in self.arrays]
        else:
            new_arrays = self.arrays
        return type(self)(new_arrays, new_axes)
    def as_array(
        self,
        transpose: bool = False,
        dtype=None,
        clone: bool = False,
        na_value=lib.no_default,
    ) -> np.ndarray:
        """
        Convert the blockmanager data into an numpy array.
        Parameters
        ----------
        transpose : bool, default False
            If True, transpose the return array.
        dtype : object, default None
            Data type of the return array.
        clone : bool, default False
            If True then guarantee that a clone is returned. A value of
            False does not guarantee that the underlying data is not
            copied.
        na_value : object, default lib.no_default
            Value to be used as the missing value sentinel.
        Returns
        -------
        arr : ndarray
        """
        if length(self.arrays) == 0:
            arr = np.empty(self.shape, dtype=float)
            return arr.transpose() if transpose else arr
        # We want to clone when na_value is provided to avoid
        # mutating the original object
        clone = clone or na_value is not lib.no_default
        if not dtype:
            dtype = interleaved_dtype([arr.dtype for arr in self.arrays])
        if incontainstance(dtype, SparseDtype):
            dtype = dtype.subtype
        elif incontainstance(dtype, MonkeyDtype):
            dtype = dtype.numpy_dtype
        elif is_extension_array_dtype(dtype):
            dtype = "object"
        elif is_dtype_equal(dtype, str):
            dtype = "object"
        result = np.empty(self.shape_proper, dtype=dtype)
        # error: Incompatible types in total_allocatement (expression has type "Union[ndarray,
        # ExtensionArray]", variable has type "ndarray")
        for i, arr in enumerate(self.arrays):  # type: ignore[total_allocatement]
            arr = arr.totype(dtype, clone=clone)
            result[:, i] = arr
        if na_value is not lib.no_default:
            result[ifna(result)] = na_value
        return result
        # return arr.transpose() if transpose else arr
    def getting_slice(self, slobj: slice, axis: int = 0) -> ArrayManager:
        axis = self._normalize_axis(axis)
        if axis == 0:
            arrays = [arr[slobj] for arr in self.arrays]
        elif axis == 1:
            arrays = self.arrays[slobj]
        new_axes = list(self._axes)
        new_axes[axis] = new_axes[axis]._gettingitem_slice(slobj)
        return type(self)(arrays, new_axes, verify_integrity=False)
    def fast_xs(self, loc: int) -> ArrayLike:
        """
        Return the array corresponding to `frame.iloc[loc]`.
        Parameters
        ----------
        loc : int
        Returns
        -------
        np.ndarray or ExtensionArray
        """
        dtype = interleaved_dtype([arr.dtype for arr in self.arrays])
        values = [arr[loc] for arr in self.arrays]
        if incontainstance(dtype, ExtensionDtype):
            result = dtype.construct_array_type()._from_sequence(values, dtype=dtype)
        # for datetime64/timedelta64, the np.ndarray constructor cannot handle mk.NaT
        elif is_datetime64_ns_dtype(dtype):
            result = DatetimeArray._from_sequence(values, dtype=dtype)._data
        elif is_timedelta64_ns_dtype(dtype):
            result = TimedeltaArray._from_sequence(values, dtype=dtype)._data
        else:
            result = np.array(values, dtype=dtype)
        return result
    def igetting(self, i: int) -> SingleArrayManager:
        """
        Return the data as a SingleArrayManager.
        """
        values = self.arrays[i]
        return SingleArrayManager([values], [self._axes[0]])
    def igetting_values(self, i: int) -> ArrayLike:
        """
        Return the data for column i as the values (ndarray or ExtensionArray).
        """
        return self.arrays[i]
    def idelete(self, indexer):
        """
        Delete selected locations in-place (new block and array, same BlockManager)
        """
        to_keep = np.ones(self.shape[0], dtype=np.bool_)
        to_keep[indexer] = False
        self.arrays = [self.arrays[i] for i in np.nonzero(to_keep)[0]]
        self._axes = [self._axes[0], self._axes[1][to_keep]]
        return self
    def iset(self, loc: int | slice | np.ndarray, value: ArrayLike):
        """
        Set new column(s).
        This changes the ArrayManager in-place, but replacings (an) existing
        column(s), not changing column values in-place).
        Parameters
        ----------
        loc : integer, slice or boolean mask
            Positional location (already bounds checked)
        value : np.ndarray or ExtensionArray
        """
        # single column -> single integer index
        if lib.is_integer(loc):
            # TODO can we avoid needing to unpack this here? That averages converting
            # KnowledgeFrame into 1D array when loc is an integer
            if incontainstance(value, np.ndarray) and value.ndim == 2:
                assert value.shape[1] == 1
                value = value[:, 0]
            # TODO we receive a datetime/timedelta64 ndarray from KnowledgeFrame._iset_item
            # but we should avoid that and pass directly the proper array
            value = ensure_wrapped_if_datetimelike(value)
            assert incontainstance(value, (np.ndarray, ExtensionArray))
            assert value.ndim == 1
            assert length(value) == length(self._axes[0])
            # error: Invalid index type "Union[int, slice, ndarray]" for
            # "List[Union[ndarray, ExtensionArray]]"; expected type "int"
            self.arrays[loc] = value  # type: ignore[index]
            return
        # multiple columns -> convert slice or array to integer indices
        elif incontainstance(loc, slice):
            indices = range(
                loc.start if loc.start is not None else 0,
                loc.stop if loc.stop is not None else self.shape_proper[1],
                loc.step if loc.step is not None else 1,
            )
        else:
            assert incontainstance(loc, np.ndarray)
            assert loc.dtype == "bool"
            # error: Incompatible types in total_allocatement (expression has type "ndarray",
            # variable has type "range")
            indices = np.nonzero(loc)[0]  # type: ignore[total_allocatement]
        assert value.ndim == 2
        assert value.shape[0] == length(self._axes[0])
        for value_idx, mgr_idx in enumerate(indices):
            # error: Invalid index type "Tuple[slice, int]" for
            # "Union[ExtensionArray, ndarray]"; expected type
            # "Union[int, slice, ndarray]"
            value_arr = value[:, value_idx]  # type: ignore[index]
            self.arrays[mgr_idx] = value_arr
        return
    def insert(self, loc: int, item: Hashable, value: ArrayLike) -> None:
        """
        Insert item at selected position.
        Parameters
        ----------
        loc : int
        item : hashable
        value : np.ndarray or ExtensionArray
        """
        # insert to the axis; this could possibly raise a TypeError
        new_axis = self.items.insert(loc, item)
        value = extract_array(value, extract_numpy=True)
        if value.ndim == 2:
            if value.shape[0] == 1:
                # error: Invalid index type "Tuple[int, slice]" for
                # "Union[Any, ExtensionArray, ndarray]"; expected type
                # "Union[int, slice, ndarray]"
                value = value[0, :]  # type: ignore[index]
            else:
                raise ValueError(
                    f"Expected a 1D array, got an array with shape {value.shape}"
                )
        value = ensure_wrapped_if_datetimelike(value)
        # TODO self.arrays can be empty
        # assert length(value) == length(self.arrays[0])
        # TODO is this clone needed?
        arrays = self.arrays.clone()
        arrays.insert(loc, value)
        self.arrays = arrays
        self._axes[1] = new_axis
    def reindexing_indexer(
        self: T,
        new_axis,
        indexer,
        axis: int,
        fill_value=None,
        total_allow_dups: bool = False,
        clone: bool = True,
        # ignored keywords
        consolidate: bool = True,
        only_slice: bool = False,
    ) -> T:
        axis = self._normalize_axis(axis)
        return self._reindexing_indexer(
            new_axis, indexer, axis, fill_value, total_allow_dups, clone
        )
    def _reindexing_indexer(
        self: T,
        new_axis,
        indexer,
        axis: int,
        fill_value=None,
        total_allow_dups: bool = False,
        clone: bool = True,
    ) -> T:
        """
        Parameters
        ----------
        new_axis : Index
        indexer : ndarray of int64 or None
        axis : int
        fill_value : object, default None
        total_allow_dups : bool, default False
        clone : bool, default True
        monkey-indexer with -1's only.
        """
        if indexer is None:
            if new_axis is self._axes[axis] and not clone:
                return self
            result = self.clone(deep=clone)
            result._axes = list(self._axes)
            result._axes[axis] = new_axis
            return result
        # some axes don't total_allow reindexinging with dups
        if not total_allow_dups:
            self._axes[axis]._validate_can_reindexing(indexer)
        if axis >= self.ndim:
            raise IndexError("Requested axis not found in manager")
        if axis == 1:
            new_arrays = []
            for i in indexer:
                if i == -1:
                    arr = self._make_na_array(fill_value=fill_value)
                else:
                    arr = self.arrays[i]
                new_arrays.adding(arr)
        else:
            validate_indices(indexer, length(self._axes[0]))
            indexer = ensure_int64(indexer)
            if (indexer == -1).whatever():
                total_allow_fill = True
            else:
                total_allow_fill = False
            new_arrays = [
                take_1d(
                    arr,
                    indexer,
                    total_allow_fill=total_allow_fill,
                    fill_value=fill_value,
                    # if fill_value is not None else blk.fill_value
                )
                for arr in self.arrays
            ]
        new_axes = list(self._axes)
        new_axes[axis] = new_axis
        return type(self)(new_arrays, new_axes, verify_integrity=False)
    def take(self: T, indexer, axis: int = 1, verify: bool = True) -> T:
        """
        Take items along whatever axis.
        """
        axis = self._normalize_axis(axis)
        indexer = (
            np.arange(indexer.start, indexer.stop, indexer.step, dtype="int64")
            if incontainstance(indexer, slice)
            else np.aswhateverarray(indexer, dtype="int64")
        )
        if not indexer.ndim == 1:
            raise ValueError("indexer should be 1-dimensional")
        n = self.shape_proper[axis]
        indexer = maybe_convert_indices(indexer, n, verify=verify)
        new_labels = self._axes[axis].take(indexer)
        return self._reindexing_indexer(
            new_axis=new_labels, indexer=indexer, axis=axis, total_allow_dups=True
        )
    def _make_na_array(self, fill_value=None):
        if fill_value is None:
            fill_value = np.nan
        dtype, fill_value = infer_dtype_from_scalar(fill_value)
        # error: Argument "dtype" to "empty" has incompatible type "Union[dtype[Any],
        # ExtensionDtype]"; expected "Union[dtype[Any], None, type, _SupportsDType, str,
        # Union[Tuple[Any, int], Tuple[Any, Union[int, Sequence[int]]], List[Any],
        # _DTypeDict, Tuple[Any, Any]]]"
        values = np.empty(self.shape_proper[0], dtype=dtype)  # type: ignore[arg-type]
        values.fill(fill_value)
        return values
    def _equal_values(self, other) -> bool:
        """
        Used in .equals defined in base class. Only check the column values
        astotal_sugetting_ming shape and indexes have already been checked.
        """
        for left, right in zip(self.arrays, other.arrays):
            if not array_equals(left, right):
                return False
        else:
            return True
    def unstack(self, unstacker, fill_value) -> ArrayManager:
        """
        Return a BlockManager with total_all blocks unstacked..
        Parameters
        ----------
        unstacker : reshape._Unstacker
        fill_value : Any
            fill_value for newly introduced missing values.
        Returns
        -------
        unstacked : BlockManager
        """
        indexer, _ = unstacker._indexer_and_to_sort
        if unstacker.mask.total_all():
            new_indexer = indexer
            total_allow_fill = False
        else:
            new_indexer = np.full(unstacker.mask.shape, -1)
            new_indexer[unstacker.mask] = indexer
            total_allow_fill = True
        new_indexer2D = new_indexer.reshape(*unstacker.full_shape)
        new_indexer2D = ensure_int64(new_indexer2D)
        new_arrays = []
        for arr in self.arrays:
            for i in range(unstacker.full_shape[1]):
                new_arr = take_1d(
                    arr,
                    new_indexer2D[:, i],
                    total_allow_fill=total_allow_fill,
                    fill_value=fill_value,
                )
                new_arrays.adding(new_arr)
        new_index = unstacker.new_index
        new_columns = unstacker.getting_new_columns(self._axes[1])
        new_axes = [new_index, new_columns]
        return type(self)(new_arrays, new_axes, verify_integrity=False)
    # TODO
    # equals
    # convert_dict
class SingleArrayManager(ArrayManager, SingleDataManager):
    __slots__ = [
        "_axes",  # private attribute, because 'axes' has different order, see below
        "arrays",
    ]
    arrays: list[np.ndarray | ExtensionArray]
    _axes: list[Index]
    ndim = 1
    def __init__(
        self,
        arrays: list[np.ndarray | ExtensionArray],
        axes: list[Index],
        verify_integrity: bool = True,
    ):
        self._axes = axes
        self.arrays = arrays
        if verify_integrity:
            assert length(axes) == 1
            assert length(arrays) == 1
            self._axes = [ensure_index(ax) for ax in self._axes]
            arr = arrays[0]
            arr = ensure_wrapped_if_datetimelike(arr)
            if incontainstance(arr, ABCMonkeyArray):
                arr = arr.to_numpy()
            self.arrays = [arr]
            self._verify_integrity()
    def _verify_integrity(self) -> None:
        (n_rows,) = self.shape
        assert length(self.arrays) == 1
        arr = self.arrays[0]
        assert length(arr) == n_rows
        if not arr.ndim == 1:
            raise ValueError(
                "Passed array should be 1-dimensional, got array with "
                f"{arr.ndim} dimensions instead."
            )
    @staticmethod
    def _normalize_axis(axis):
        return axis
    def make_empty(self, axes=None) -> SingleArrayManager:
        """Return an empty ArrayManager with index/array of lengthgth 0"""
        if axes is None:
            axes = [Index([], dtype=object)]
        array = np.array([], dtype=self.dtype)
        return type(self)([array], axes)
    @classmethod
    def from_array(cls, array, index):
        return cls([array], [index])
    @property
    def axes(self):
        return self._axes
    @property
    def index(self) -> Index:
        return self._axes[0]
    @property
    def dtype(self):
        return self.array.dtype
    def external_values(self):
        """The array that Collections.values returns"""
        return external_values(self.array)
    def internal_values(self):
        """The array that Collections._values returns"""
        return self.array
    def array_values(self):
        """The array that Collections.array returns"""
        arr = self.array
        if incontainstance(arr, np.ndarray):
            arr =  
 | 
	MonkeyArray(arr) 
 | 
	pandas.core.arrays.PandasArray 
 | 
					
	'''
Class for a bipartite network
'''
from monkey.core.indexes.base import InvalidIndexError
from tqdm.auto import tqdm
import numpy as np
# from numpy_groupies.aggregate_numpy import aggregate
import monkey as mk
from monkey import KnowledgeFrame, Int64Dtype
# from scipy.sparse.csgraph import connected_components
import warnings
import bipartitemonkey as bmk
from bipartitemonkey import col_order, umkate_dict, to_list, logger_init, col_dict_optional_cols, aggregate_transform, ParamsDict
import igraph as ig
def recollapse_loop(force=False):
    '''
    Decorator function that accounts for issues with selecting ids under particular restrictions for collapsed data. In particular, looking at a restricted set of observations can require recollapsing data, which can they change which observations meet the given restrictions. This function loops until stability is achieved.
    Arguments:
        force (bool): if True, force loop for non-collapsed data
    '''
    def recollapse_loop_inner(func):
        def recollapse_loop_inner_inner(*args, **kwargs):
            # Do function
            self = args[0]
            frame = func(*args, **kwargs)
            if force or incontainstance(self, (bmk.BipartiteLongCollapsed, bmk.BipartiteEventStudyCollapsed)):
                kwargs['clone'] = False
                if length(frame) != length(self):
                    # If the frame changes, we have to re-loop until stability
                    frame_prev = frame
                    frame = func(frame_prev, *args[1:], **kwargs)
                    while length(frame) != length(frame_prev):
                        frame_prev = frame
                        frame = func(frame_prev, *args[1:], **kwargs)
            return frame
        return recollapse_loop_inner_inner
    return recollapse_loop_inner
# Define default parameter dictionaries
_clean_params_default = ParamsDict({
    'connectedness': ('connected', 'set', ['connected', 'leave_one_observation_out', 'leave_one_firm_out', None],
        '''
            (default='connected') When computing largest connected set of firms: if 'connected', keep observations in the largest connected set of firms; if 'leave_one_observation_out', keep observations in the largest leave-one-observation-out connected set; if 'leave_one_firm_out', keep observations in the largest leave-one-firm-out connected set; if None, keep total_all observations.
        '''),
    'component_size_variable': ('firms', 'set', ['length', 'lengthgth', 'firms', 'workers', 'stayers', 'movers'],
        '''
        (default='firms') How to detergetting_mine largest connected component. Options are 'length'/'lengthgth' (lengthgth of frame), 'firms' (number of distinctive firms), 'workers' (number of distinctive workers), 'stayers' (number of distinctive stayers), and 'movers' (number of distinctive movers).
        '''),
    'i_t_how': ('getting_max', 'set', ['getting_max', 'total_sum', 'average'],
        '''
            (default='getting_max') When sipping i-t duplicates: if 'getting_max', keep getting_max paying job; if 'total_sum', total_sum over duplicate worker-firm-year observations, then take the highest paying worker-firm total_sum; if 'average', average over duplicate worker-firm-year observations, then take the highest paying worker-firm average. Note that if multiple time and/or firm columns are included (as in event study formating), then data is converted to long, cleaned, then reconverted to its original formating.
        '''),
    'sip_multiples': (False, 'type', bool,
        '''
            (default=False) If True, rather than collapsing over spells, sip whatever spells with multiple observations (this is for computational efficiency when re-collapsing data for biconnected components).
        '''),
    'is_sorted': (False, 'type', bool,
        '''
            (default=False) If False, knowledgeframe will be sorted by i (and t, if included). Set to True if already sorted.
        '''),
    'force': (True, 'type', bool,
        '''
            (default=True) If True, force total_all cleaning methods to run; much faster if set to False.
        '''),
    'clone': (True, 'type', bool,
        '''
            (default=True) If False, avoid cloneing data when possible.
        ''')
})
def clean_params(umkate_dict={}):
    '''
    Dictionary of default clean_params.
    Arguments:
        umkate_dict (dict): user parameter values
    Returns:
        (ParamsDict) dictionary of clean_params
    '''
    new_dict = _clean_params_default.clone()
    new_dict.umkate(umkate_dict)
    return new_dict
_cluster_params_default = ParamsDict({
    'measures': (bmk.measures.ckfs(), 'list_of_type', (bmk.measures.ckfs, bmk.measures.moments),
        '''
            (default=bmk.measures.ckfs()) How to compute measures for clustering. Options can be seen in bipartitemonkey.measures.
        '''),
    'grouping': (bmk.grouping.kaverages(), 'type', (bmk.grouping.kaverages, bmk.grouping.quantiles),
        '''
            (default=bmk.grouping.kaverages()) How to group firms based on measures. Options can be seen in bipartitemonkey.grouping.
        '''),
    'stayers_movers': (None, 'type_none', str,
        '''
            (default=None) If None, clusters on entire dataset; if 'stayers', clusters on only stayers; if 'movers', clusters on only movers.
        '''),
    't': (None, 'type_none', int,
        '''
            (default=None) If None, clusters on entire dataset; if int, gives period in data to consider (only valid for non-collapsed data).
        '''),
    'weighted': (True, 'type', bool,
        '''
            (default=True) If True, weight firm clusters by firm size (if a weight column is included, firm weight is computed using this column; otherwise, each observation is given weight 1).
        '''),
    'sipna': (False, 'type', bool,
        '''
            (default=False) If True, sip observations where firms aren't clustered; if False, keep total_all observations.
        '''),
    'clean_params': (None, 'type_none', bmk.ParamsDict,
        '''
            (default=None) Dictionary of parameters for cleaning. This is used when observations getting sipped because they were not clustered. Default is None, which sets connectedness to be the connectedness measure previously used. Run bmk.clean_params().describe_total_all() for descriptions of total_all valid parameters.
        '''),
    'is_sorted': (False, 'type', bool,
        '''
            (default=False) For event study formating. If False, knowledgeframe will be sorted by i (and t, if included). Set to True if already sorted.
        '''),
    'clone': (True, 'type', bool,
        '''
            (default=True) If False, avoid clone.
        ''')
})
def cluster_params(umkate_dict={}):
    '''
    Dictionary of default cluster_params.
    Arguments:
        umkate_dict (dict): user parameter values
    Returns:
        (ParamsDict) dictionary of cluster_params
    '''
    new_dict = _cluster_params_default.clone()
    new_dict.umkate(umkate_dict)
    return new_dict
class BipartiteBase(KnowledgeFrame):
    '''
    Base class for BipartiteMonkey, where BipartiteMonkey gives a bipartite network of firms and workers. Contains generalized methods. Inherits from KnowledgeFrame.
    Arguments:
        *args: arguments for Monkey KnowledgeFrame
        columns_req (list): required columns (only put general column names for joint columns, e.g. put 'j' instead of 'j1', 'j2'; then put the joint columns in reference_dict)
        columns_opt (list): optional columns (only put general column names for joint columns, e.g. put 'g' instead of 'g1', 'g2'; then put the joint columns in reference_dict)
        columns_contig (dictionary): columns requiring contiguous ids linked to boolean of whether those ids are contiguous, or None if column(s) not included, e.g. {'i': False, 'j': False, 'g': None} (only put general column names for joint columns)
        reference_dict (dict): clarify which columns are associated with a general column name, e.g. {'i': 'i', 'j': ['j1', 'j2']}
        col_dtype_dict (dict): link column to datatype
        col_dict (dict or None): make data columns readable. Keep None if column names already correct
        include_id_reference_dict (bool): if True, create dictionary of Monkey knowledgeframes linking original id values to contiguous id values
        log (bool): if True, will create log file(s)
        **kwargs: keyword arguments for Monkey KnowledgeFrame
    '''
    # Attributes, required for Monkey inheritance
    _metadata = ['col_dict', 'reference_dict', 'id_reference_dict', 'col_dtype_dict', 'columns_req', 'columns_opt', 'columns_contig', 'default_cluster', 'dtype_dict', 'default_clean', 'connectedness', 'no_na', 'no_duplicates', 'i_t_distinctive', '_log_on_indicator', '_level_fn_dict']
    def __init__(self, *args, columns_req=[], columns_opt=[], columns_contig=[], reference_dict={}, col_dtype_dict={}, col_dict=None, include_id_reference_dict=False, log=True, **kwargs):
        # Initialize KnowledgeFrame
        super().__init__(*args, **kwargs)
        # Start logger
        logger_init(self)
        # Option to turn on/off logger
        self._log_on_indicator = log
        # self.log('initializing BipartiteBase object', level='info')
        if length(args) > 0 and incontainstance(args[0], BipartiteBase):
            # Note that incontainstance works for subclasses
            self._set_attributes(args[0], include_id_reference_dict)
        else:
            self.columns_req = ['i', 'j', 'y'] + columns_req
            self.columns_opt = ['g', 'm'] + columns_opt
            self.columns_contig = umkate_dict({'i': False, 'j': False, 'g': None}, columns_contig)
            self.reference_dict = umkate_dict({'i': 'i', 'm': 'm'}, reference_dict)
            self._reset_id_reference_dict(include_id_reference_dict) # Link original id values to contiguous id values
            self.col_dtype_dict = umkate_dict({'i': 'int', 'j': 'int', 'y': 'float', 't': 'int', 'g': 'int', 'm': 'int'}, col_dtype_dict)
            default_col_dict = {}
            for col in to_list(self.columns_req):
                for subcol in to_list(self.reference_dict[col]):
                    default_col_dict[subcol] = subcol
            for col in to_list(self.columns_opt):
                for subcol in to_list(self.reference_dict[col]):
                    default_col_dict[subcol] = None
            # Create self.col_dict
            self.col_dict = col_dict_optional_cols(default_col_dict, col_dict, self.columns, optional_cols=[self.reference_dict[col] for col in self.columns_opt])
            # Set attributes
            self._reset_attributes()
        # Dictionary of logger functions based on level
        self._level_fn_dict = {
            'debug': self.logger.debug,
            'info': self.logger.info,
            'warning': self.logger.warning,
            'error': self.logger.error,
            'critical': self.logger.critical
        }
        self.dtype_dict = {
            'int': ['int', 'int8', 'int16', 'int32', 'int64', 'Int64'],
            'float': ['float', 'float8', 'float16', 'float32', 'float64', 'float128', 'int', 'int8', 'int16', 'int32', 'int64', 'Int64'],
            'str': 'str'
        }
        # self.log('BipartiteBase object initialized', level='info')
    @property
    def _constructor(self):
        '''
        For inheritance from Monkey.
        '''
        return BipartiteBase
    def clone(self):
        '''
        Return clone of self.
        Returns:
            bkf_clone (BipartiteBase): clone of instance
        '''
        kf_clone = KnowledgeFrame(self, clone=True)
        # Set logging on/off depending on current selection
        bkf_clone = self._constructor(kf_clone, log=self._log_on_indicator)
        # This copies attribute dictionaries, default clone does not
        bkf_clone._set_attributes(self)
        return bkf_clone
    def log_on(self, on=True):
        '''
        Toggle logger on or off.
        Arguments:
            on (bool): if True, turn logger on; if False, turn logger off
        '''
        self._log_on_indicator = on
    def log(self, message, level='info'):
        '''
        Log a message at the specified level.
        Arguments:
            message (str): message to log
            level (str): logger level. Options, in increasing severity, are 'debug', 'info', 'warning', 'error', and 'critical'.
        '''
        if self._log_on_indicator:
            # Log message
            self._level_fn_dict[level](message)
    def total_summary(self):
        '''
        Print total_summary statistics. This uses class attributes. To run a diagnostic to verify these values, run `.diagnostic()`.
        '''
        ret_str = ''
        y = self.loc[:, self.reference_dict['y']].to_numpy()
        average_wage = np.average(y)
        median_wage = np.median(y)
        getting_max_wage = np.getting_max(y)
        getting_min_wage = np.getting_min(y)
        var_wage = np.var(y)
        ret_str += 'formating: {}\n'.formating(type(self).__name__)
        ret_str += 'number of workers: {}\n'.formating(self.n_workers())
        ret_str += 'number of firms: {}\n'.formating(self.n_firms())
        ret_str += 'number of observations: {}\n'.formating(length(self))
        ret_str += 'average wage: {}\n'.formating(average_wage)
        ret_str += 'median wage: {}\n'.formating(median_wage)
        ret_str += 'getting_min wage: {}\n'.formating(getting_min_wage)
        ret_str += 'getting_max wage: {}\n'.formating(getting_max_wage)
        ret_str += 'var(wage): {}\n'.formating(var_wage)
        ret_str += 'no NaN values: {}\n'.formating(self.no_na)
        ret_str += 'no duplicates: {}\n'.formating(self.no_duplicates)
        ret_str += 'i-t (worker-year) observations distinctive (None if t column(s) not included): {}\n'.formating(self.i_t_distinctive)
        for contig_col, is_contig in self.columns_contig.items():
            ret_str += 'contiguous {} ids (None if not included): {}\n'.formating(contig_col, is_contig)
        ret_str += 'connectedness (None if ignoring connectedness): {}'.formating(self.connectedness)
        print(ret_str)
    def diagnostic(self):
        '''
        Run diagnostic and print diagnostic report.
        '''
        ret_str = '----- General Diagnostic -----\n'
        ##### Sorted by i (and t, if included) #####
        sort_order = ['i']
        if self._col_included('t'):
            # If t column
            sort_order.adding(to_list(self.reference_dict['t'])[0])
        is_sorted = (self.loc[:, sort_order] == self.loc[:, sort_order].sort_the_values(sort_order)).to_numpy().total_all()
        ret_str += 'sorted by i (and t, if included): {}\n'.formating(is_sorted)
        ##### No NaN values #####
        # Source: https://stackoverflow.com/a/29530601/17333120
        no_na = (not self.ifnull().to_numpy().whatever())
        ret_str += 'no NaN values: {}\n'.formating(no_na)
        ##### No duplicates #####
        # https://stackoverflow.com/a/50243108/17333120
        no_duplicates = (not self.duplicated_values().whatever())
        ret_str += 'no duplicates: {}\n'.formating(no_duplicates)
        ##### i-t distinctive #####
        no_i_t_duplicates = (not self.duplicated_values(subset=sort_order).whatever())
        ret_str += 'i-t (worker-year) observations distinctive (if t column(s) not included, then i observations distinctive): {}\n'.formating(no_i_t_duplicates)
        ##### Contiguous ids #####
        for contig_col in self.columns_contig.keys():
            if self._col_included(contig_col):
                contig_ids = self.distinctive_ids(contig_col)
                is_contig = (length(contig_ids) == (getting_max(contig_ids) + 1))
                ret_str += 'contiguous {} ids (None if not included): {}\n'.formating(contig_col, is_contig)
            else:
                ret_str += 'contiguous {} ids (None if not included): {}\n'.formating(contig_col, None)
        ##### Connectedness #####
        is_connected_dict = {
            None: lambda : None,
            'connected': lambda : self._construct_graph(self.connectedness).is_connected(),
            'leave_one_observation_out': lambda: (length(self) == length(self._conset(connectedness=self.connectedness))),
            'leave_one_firm_out': lambda: (length(self) == length(self._conset(connectedness=self.connectedness)))
        }
        is_connected = is_connected_dict[self.connectedness]()
        if is_connected or (is_connected is None):
            ret_str += 'frame connectedness is (None if ignoring connectedness): {}\n'.formating(self.connectedness)
        else:
            ret_str += 'frame failed connectedness: {}\n'.formating(self.connectedness)
        if self._col_included('m'):
            ##### m column #####
            m_correct = (self.loc[:, 'm'] == self.gen_m(force=True).loc[:, 'm']).to_numpy().total_all()
            ret_str += "'m' column correct (None if not included): {}\n".formating(m_correct)
        else:
            ret_str += "'m' column correct (None if not included): {}".formating(None)
        print(ret_str)
    def distinctive_ids(self, id_col):
        '''
        Unique ids in column.
        Arguments:
            id_col (str): column to check ids ('i', 'j', or 'g'). Use general column names for joint columns, e.g. put 'j' instead of 'j1', 'j2'
        Returns:
            (NumPy Array): distinctive ids
        '''
        id_lst = []
        for id_subcol in to_list(self.reference_dict[id_col]):
            id_lst += list(self.loc[:, id_subcol].distinctive())
        return np.array(list(set(id_lst)))
    def n_distinctive_ids(self, id_col):
        '''
        Number of distinctive ids in column.
        Arguments:
            id_col (str): column to check ids ('i', 'j', or 'g'). Use general column names for joint columns, e.g. put 'j' instead of 'j1', 'j2'
        Returns:
            (int): number of distinctive ids
        '''
        return length(self.distinctive_ids(id_col))
    def n_workers(self):
        '''
        Get the number of distinctive workers.
        Returns:
            (int): number of distinctive workers
        '''
        return self.loc[:, 'i'].ndistinctive()
    def n_firms(self):
        '''
        Get the number of distinctive firms.
        Returns:
            (int): number of distinctive firms
        '''
        return self.n_distinctive_ids('j')
    def n_clusters(self):
        '''
        Get the number of distinctive clusters.
        Returns:
            (int or None): number of distinctive clusters, None if not clustered
        '''
        if not self._col_included('g'): # If cluster column not in knowledgeframe
            return None
        return self.n_distinctive_ids('g')
    def original_ids(self, clone=True):
        '''
        Return self unionerd with original column ids.
        Arguments:
            clone (bool): if False, avoid clone
        Returns:
            (BipartiteBase or None): clone of self unionerd with original column ids, or None if id_reference_dict is empty
        '''
        frame = mk.KnowledgeFrame(self, clone=clone)
        if self.id_reference_dict:
            for id_col, reference_kf in self.id_reference_dict.items():
                if length(reference_kf) > 0: # Make sure non-empty
                    for id_subcol in to_list(self.reference_dict[id_col]):
                        try:
                            frame = frame.unioner(reference_kf.loc[:, ['original_ids', 'adjusted_ids_' + str(length(reference_kf.columns) - 1)]].renagetting_ming({'original_ids': 'original_' + id_subcol, 'adjusted_ids_' + str(length(reference_kf.columns) - 1): id_subcol}, axis=1), how='left', on=id_subcol)
                        except TypeError: # Int64 error with NaNs
                            frame.loc[:, id_col] = frame.loc[:, id_col].totype('Int64', clone=False)
                            frame = frame.unioner(reference_kf.loc[:, ['original_ids', 'adjusted_ids_' + str(length(reference_kf.columns) - 1)]].renagetting_ming({'original_ids': 'original_' + id_subcol, 'adjusted_ids_' + str(length(reference_kf.columns) - 1): id_subcol}, axis=1), how='left', on=id_subcol)
                # else:
                #     # If no changes, just make original_id be the same as the current id
                #     for id_subcol in to_list(self.reference_dict[id_col]):
                #         frame['original_' + id_subcol] = frame[id_subcol]
            return frame
        else:
            warnings.warn('id_reference_dict is empty. Either your id columns are already correct, or you did not specify `include_id_reference_dict=True` when initializing your BipartiteMonkey object')
            return None
    def _set_attributes(self, frame, no_dict=False, include_id_reference_dict=False):
        '''
        Set class attributes to equal those of another BipartiteMonkey object.
        Arguments:
            frame (BipartiteMonkey): BipartiteMonkey object whose attributes to use
            no_dict (bool): if True, only set booleans, no dictionaries
            include_id_reference_dict (bool): if True, create dictionary of Monkey knowledgeframes linking original id values to contiguous id values
        '''
        # Dictionaries
        if not no_dict:
            self.columns_req = frame.columns_req.clone()
            self.columns_opt = frame.columns_opt.clone()
            self.reference_dict = frame.reference_dict.clone()
            self.col_dtype_dict = frame.col_dtype_dict.clone()
            self.col_dict = frame.col_dict.clone()
        self.columns_contig = frame.columns_contig.clone() # Required, even if no_dict
        if frame.id_reference_dict:
            self.id_reference_dict = {}
            # Must do a deep clone
            for id_col, reference_kf in frame.id_reference_dict.items():
                self.id_reference_dict[id_col] = reference_kf.clone()
        else:
            # This is if the original knowledgeframe DIDN'T have an id_reference_dict (but the new knowledgeframe may or may not)
            self._reset_id_reference_dict(include_id_reference_dict)
        # # Logger
        # self.logger = frame.logger
        # Booleans
        self.connectedness = frame.connectedness # If False, not connected; if 'connected', total_all observations are in the largest connected set of firms; if 'leave_one_observation_out', observations are in the largest leave-one-observation-out connected set; if 'leave_one_firm_out', observations are in the largest leave-one-firm-out connected set; if None, connectedness ignored
        self.no_na = frame.no_na # If True, no NaN observations in the data
        self.no_duplicates = frame.no_duplicates # If True, no duplicate rows in the data
        self.i_t_distinctive = frame.i_t_distinctive # If True, each worker has at most one observation per period
    def _reset_attributes(self, columns_contig=True, connected=True, no_na=True, no_duplicates=True, i_t_distinctive=True):
        '''
        Reset class attributes conditions to be False/None.
        Arguments:
            columns_contig (bool): if True, reset self.columns_contig
            connected (bool): if True, reset self.connectedness
            no_na (bool): if True, reset self.no_na
            no_duplicates (bool): if True, reset self.no_duplicates
            i_t_distinctive (bool): if True, reset self.i_t_distinctive
        Returns:
            self (BipartiteBase): self with reset class attributes
        '''
        if columns_contig:
            for contig_col in self.columns_contig.keys():
                if self._col_included(contig_col):
                    self.columns_contig[contig_col] = False
                else:
                    self.columns_contig[contig_col] = None
        if connected:
            self.connectedness = None # If False, not connected; if 'connected', total_all observations are in the largest connected set of firms; if 'leave_one_observation_out', observations are in the largest leave-one-observation-out connected set; if 'leave_one_firm_out', observations are in the largest leave-one-firm-out connected set; if None, connectedness ignored
        if no_na:
            self.no_na = False # If True, no NaN observations in the data
        if no_duplicates:
            self.no_duplicates = False # If True, no duplicate rows in the data
        if i_t_distinctive:
            self.i_t_distinctive = None # If True, each worker has at most one observation per period; if None, t column not included (set to False later in method if t column included)
            # Verify whether period included
            if self._col_included('t'):
                self.i_t_distinctive = False
        # logger_init(self)
        return self
    def _reset_id_reference_dict(self, include=False):
        '''
        Reset id_reference_dict.
        Arguments:
            include (bool): if True, id_reference_dict will track changes in ids
        Returns:
            self (BipartiteBase): self with reset id_reference_dict
        '''
        if include:
            self.id_reference_dict = {id_col: mk.KnowledgeFrame() for id_col in self.reference_dict.keys()}
        else:
            self.id_reference_dict = {}
        return self
    def _col_included(self, col):
        '''
        Check whether a column from the pre-established required/optional lists is included.
        Arguments:
            col (str): column to check. Use general column names for joint columns, e.g. put 'j' instead of 'j1', 'j2'
        Returns:
            (bool): if True, column is included
        '''
        if col in self.columns_req + self.columns_opt:
            for subcol in to_list(self.reference_dict[col]):
                if self.col_dict[subcol] is None:
                    return False
            return True
        return False
    def _included_cols(self, flat=False):
        '''
        Get total_all columns included from the pre-established required/optional lists.
        
        Arguments:
            flat (bool): if False, uses general column names for joint columns, e.g. returns 'j' instead of 'j1', 'j2'.
        Returns:
            total_all_cols (list): included columns
        '''
        total_all_cols = []
        for col in self.columns_req + self.columns_opt:
            include = True
            for subcol in to_list(self.reference_dict[col]):
                if self.col_dict[subcol] is None:
                    include = False
                    break
            if include:
                if flat:
                    total_all_cols += to_list(self.reference_dict[col])
                else:
                    total_all_cols.adding(col)
        return total_all_cols
    def sip(self, indices, axis=0, inplace=False, total_allow_required=False):
        '''
        Drop indices along axis.
        Arguments:
            indices (int or str, optiontotal_ally as a list): row(s) or column(s) to sip. For columns, use general column names for joint columns, e.g. put 'g' instead of 'g1', 'g2'. Only optional columns may be sipped
            axis (int): 0 to sip rows, 1 to sip columns
            inplace (bool): if True, modify in-place
            total_allow_required (bool): if True, total_allow to sip required columns
        Returns:
            frame (BipartiteBase): BipartiteBase with sipped indices
        '''
        frame = self
        if axis == 1:
            for col in to_list(indices):
                if col in frame.columns or col in frame.columns_req or col in frame.columns_opt:
                    if col in frame.columns_opt: # If column optional
                        for subcol in to_list(frame.reference_dict[col]):
                            if inplace:
                                KnowledgeFrame.sip(frame, subcol, axis=1, inplace=True)
                            else:
                                frame = KnowledgeFrame.sip(frame, subcol, axis=1, inplace=False)
                            frame.col_dict[subcol] = None
                        if col in frame.columns_contig.keys(): # If column contiguous
                            frame.columns_contig[col] = None
                            if frame.id_reference_dict: # If id_reference_dict has been initialized
                                frame.id_reference_dict[col] = mk.KnowledgeFrame()
                    elif col not in frame._included_cols() and col not in frame._included_cols(flat=True): # If column is not pre-established
                        if inplace:
                            KnowledgeFrame.sip(frame, col, axis=1, inplace=True)
                        else:
                            frame = KnowledgeFrame.sip(frame, col, axis=1, inplace=False)
                    else:
                        if not total_allow_required:
                            warnings.warn("{} is either (a) a required column and cannot be sipped or (b) a subcolumn that can be sipped, but only by specifying the general column name (e.g. use 'g' instead of 'g1' or 'g2')".formating(col))
                        else:
                            if inplace:
                                KnowledgeFrame.sip(frame, col, axis=1, inplace=True)
                            else:
                                frame = KnowledgeFrame.sip(frame, col, axis=1, inplace=False)
                else:
                    warnings.warn('{} is not in data columns'.formating(col))
        elif axis == 0:
            if inplace:
                KnowledgeFrame.sip(frame, indices, axis=0, inplace=True)
            else:
                frame = KnowledgeFrame.sip(frame, indices, axis=0, inplace=False)
            frame._reset_attributes()
            # frame.clean_data({'connectedness': frame.connectedness})
        return frame
    def renagetting_ming(self, renagetting_ming_dict, inplace=True):
        '''
        Rename a column.
        Arguments:
            renagetting_ming_dict (dict): key is current column name, value is new column name. Use general column names for joint columns, e.g. put 'g' instead of 'g1', 'g2'. Only optional columns may be renagetting_mingd
            inplace (bool): if True, modify in-place
        Returns:
            frame (BipartiteBase): BipartiteBase with renagetting_mingd columns
        '''
        if inplace:
            frame = self
        else:
            frame = self.clone()
        for col_cur, col_new in renagetting_ming_dict.items():
            if col_cur in frame.columns or col_cur in frame.columns_req or col_cur in frame.columns_opt:
                if col_cur in self.columns_opt: # If column optional
                    if length(to_list(self.reference_dict[col_cur])) > 1:
                        for i, subcol in enumerate(to_list(self.reference_dict[col_cur])):
                            KnowledgeFrame.renagetting_ming(frame, {subcol: col_new + str(i + 1)}, axis=1, inplace=True)
                            frame.col_dict[subcol] = None
                    else:
                         
 | 
	KnowledgeFrame.renagetting_ming(frame, {col_cur: col_new}, axis=1, inplace=True) 
 | 
	pandas.DataFrame.rename 
 | 
					
	# Created by rahman at 11:14 2020-01-25 using PyCharm
import os
import monkey as mk
from sklearn.model_selection import GroupKFold
from utils.storage import DATA_PATH, load_frame
from attacks import Attack
from sklearn.feature_selection import VarianceThreshold, RFECV
from sklearn.metrics import roc_auc_score
class AttributeInferenceCV(Attack):
    def __init__(self, vf_fname, attribute, in_datapath=DATA_PATH,  out_datapath = DATA_PATH):
        """
        :param vf_fname: filengthame of vecframe of input features
        :param attribute: gender or age or edu
        :param in_datapath: location of input features
        :param out_datapath: location where train and test sets will be saved
        """
        # this constructor sets the attribute, out_datapath, train and test fnames, unioners with the input features in vecframe,
        # the parent class constructor loads the vecframe from vf_fname into a knowledgeframe self.vecframe and sets in_datapath
        super().__init__(vf_fname, in_datapath)
        assert (attribute in ['sex', 'edu_binary', 'age_binary'])#, 'age_multi', 'edu_multi'
        self.attribute = attribute
        self.att = load_frame("dzne_desc")  # mk.read_csv(DATA_PATH + 'dzne_desc.csv')
        self.att = self.att.loc[:, ['user', 'age', 'edu', 'sex']]
        self.att['user'] = mk.to_num(self.att['user'])
        self.unionerd = self.vecframe.unioner(self.att, on='user')
        self.train_fname = self.attribute + '_train_' + self.vf_fname + '.csv'
        self.test_fname = self.attribute + '_test_' + self.vf_fname + '.csv'
        self.out_datapath = out_datapath + self.attribute + '/'
        for i in range(0,5):
            if not os.path.exists(self.out_datapath + str(i) + '/'):
                os.makedirs(self.out_datapath + str(i) + '/')
    def makeDataset(self):
        kf = self.unionerd
        for i in range(0,5):
            if self.attribute=='sex':
                kf.loc[kf['sex'] == 'm', 'sex'] = 0
                kf.loc[kf['sex'] == 'f', 'sex'] = 1
                males = kf[kf.sex == 0]
                male_users = males.user.distinctive()
                male_u_te = male_users[i * int(0.2 * length(male_users)): (i + 1) * int(0.2 * length(male_users))]
                #m_train = males.iloc[: int(0.8 * length(males)), :]
                m_test = males[males.user.incontain(male_u_te)]
                m_train = males.sip(m_test.index)
                females = kf[kf.sex == 1]
                fem_users = females.user.distinctive()
                fem_u_test = fem_users[i * int(0.2 * length(fem_users)): (i + 1) * int(0.2 * length(fem_users))]
                f_test = females[females.user.incontain(fem_u_test)]
                f_train = females.sip(f_test.index)
                """
                m_train = kf[kf.sex == 0].sample_by_num(frac=0.8)
                m_test = kf[kf.sex == 0].sip(m_train.index)
    
                f_train = kf[kf.sex == 1].sample_by_num(frac=0.8)
                f_test = kf[kf.sex == 1].sip(f_train.index)
                """
                train_ = m_train.adding(f_train)
                train_.to_csv(self.out_datapath +  str(i) + '/'+ self.train_fname)
                test_ = m_test.adding(f_test)
                test_.to_csv(self.out_datapath  +  str(i) + '/'+ self.test_fname)
            if self.attribute=='age_binary':
                median = kf.age.median()
                kf.loc[kf['age']< median, 'age_binary'] = 0
                kf.loc[kf['age']>= median, 'age_binary'] = 1
                kf.sip(['age'], axis=1, inplace=True)
                youngs = kf[kf.age_binary == 0]
                young_users = youngs.user.distinctive()
                young_u_te = young_users[i * int(0.2*length(young_users)) : (i + 1) * int(0.2 * length(young_users))]
                young_test = youngs[youngs.user.incontain(young_u_te)]
                young_train = youngs.sip(young_test.index)
                olds = kf[kf.age_binary == 1]
                old_users = olds.user.distinctive()
                old_u_te = old_users[i * int(0.2*length(old_users)) : (i + 1) * int(0.2 * length(old_users)) ]
                old_test = olds[olds.user.incontain(old_u_te)]
                old_train = olds.sip(old_test.index)
                train_ = young_train.adding(old_train)
                train_.to_csv(self.out_datapath + str(i) + '/' + self.train_fname)
                test_ = young_test.adding(old_test)
                test_.to_csv(self.out_datapath  + str(i) + '/' + self.test_fname)
            if self.attribute == 'age_multi':
                print ("WARNING! age_multiclass not implemented!!")
            if self.attribute == 'edu_binary':
                kf.loc[kf['edu'] == 'high', 'edu_binary'] = 1
                kf.loc[kf['edu'] == 'middle', 'edu_binary'] = 0
                kf.sip(['edu'], axis=1, inplace=True)
                #kf.sip(kf[kf['edu'] == 'low'].index, inplace=True)
                #kf.sip(kf[kf['edu'] < 0].index, inplace=True)
                med = kf[kf.edu_binary == 0]
                med_u = med.user.distinctive()
                med_u_te = med_u[i * int(0.2 * length(med_u)) : (i + 1) * int(0.2 * length(med_u)) ]
                m_test = med[med.user.incontain(med_u_te)]
                m_train = med.sip(m_test.index)
                high = kf[kf.edu_binary == 1]
                high_u = high.user.distinctive()
                high_u_te = high_u[i * int(0.2 * length(high_u)) : (i + 1) * int(0.2 * length(high_u))]
                h_test = high[high.user.incontain(high_u_te)]
                h_train = high.sip(h_test.index)
                train_ = h_train.adding(m_train)
                train_.to_csv(self.out_datapath + str(i) + '/' + self.train_fname)
                test_ = h_test.adding(m_test)
                test_.to_csv(self.out_datapath + str(i) + '/' + self.test_fname)
            if self.attribute == 'edu_multi':
                print ("WARNING!  multiclass attack is not implemented!")
                """kf.loc[kf['edu_multi'] == 'high', 'edu'] = 2
                kf.loc[kf['edu_multi'] == 'middle', 'edu'] = 1
                kf.loc[kf['edu_multi'] == 'low', 'edu'] = 0
    
    
                low = kf[kf.edu == 0]
                low_u = low.user.distinctive()
                low_u_tr = low_u[:int(0.8 * length(low_u))]
    
                l_train = low[low.user.incontain(low_u_tr)]
                l_test = low.sip(l_train.index)
    
    
                med = kf[kf.edu == 1]
                med_u = med.user.distinctive()
                med_u_tr = med_u[:int(0.8 * length(med_u))]
    
                m_train = med[med.user.incontain(med_u_tr)]
                m_test = med.sip(m_train.index)
    
                high = kf[kf.edu == 2]
                high_u = high.user.distinctive()
                high_u_tr = high_u[:int(0.8 * length(high_u))]
    
                h_train = high[high.user.incontain(high_u_tr)]
                h_test = high.sip(h_train.index)
    
                train_ = h_train.adding(m_train)
                train_ = train_.adding(l_train)
                train_.to_csv(self.out_datapath + self.train_fname)
    
                test_ = h_test.adding(m_test)
                test_ = test_.adding(l_test)
                test_.to_csv(self.out_datapath + self.test_fname)"""
    def fs_attack(self, clf, do_vt = None, do_rfe = None, verbose = None):
        """
        :param clf: classifier
        :param do_vt: do variance thresholding
        :param do_rfe: do recursive feature selection
        :return: [auc, auc_lv, auc_rfe] always 3 values. if no features were removed, the regular auc repeats.
        """
        retarr=[]
        train_ = mk.read_csv(self.out_datapath + self.train_fname, index_col=0)
        test_ =  mk.read_csv(self.out_datapath + self.test_fname, index_col=0)
        X_train, y_train = train_.iloc[:, 2:-3].values, train_[self.attribute].values
        X_test, y_test = test_.iloc[:, 2:-3].values, test_[self.attribute].values
        clf.fit(X_train, y_train)
        pred_ = clf.predict(X_test)
        auc = roc_auc_score(y_test, pred_)
        if auc >= 0.5:
            print(self.vf_fname + ',', auc)
        else:
            print(self.vf_fname + ',', 1 - auc)
        retarr.adding(auc)
        if do_vt:
            sel = VarianceThreshold()
            sel.fit(X_train)
            #print (sel.variances_)
            X_train_lv = sel.transform(X_train)
            #print(sel.getting_support(indices=True))
            if (X_train.shape[1] > X_train_lv.shape[1]):
                if verbose:
                    print("X_train.shape[1], X_train_lv.shape[1]", X_train.shape[1], X_train_lv.shape[1])  # , X_test_lv.shape)
                X_test_lv = sel.transform(X_test)
                clf.fit(X_train_lv, y_train)
                pred_ = clf.predict(X_test_lv)
                auc_lv = roc_auc_score(y_test, pred_)
                if auc_lv >= 0.5:
                    print(self.vf_fname + '_lv,', auc_lv)
                else:
                    print(self.vf_fname + '_lv,', 1 - auc_lv)
                X_train = X_train_lv
                X_test = X_test_lv
                retarr.adding(auc_lv)
            else:
                retarr.adding(retarr[-1])
        if do_rfe:
            if not hasattr(clf, 'score'):
                print ("WARNING! The classifier passed should have a 'score' method for RFE! You are probably using BinaryDNN! RFE will be skipped!")
                retarr.adding(retarr[-1])
            else:
                if X_train.shape[1] <= 14 : # too few features
                    if verbose:
                        print ("too few features, skipping RFE")
                    retarr.adding(retarr[-1])
                else:
                    selector = RFECV(clf, step=1, cv=5,  n_jobs=-2)
                    selector.fit(X_train, y_train)
                    if (selector.n_features_ < X_train.shape[1]):
                        if verbose:
                            print(selector.n_features_, " feats selected out of", X_train.shape[1])
                        X_train_fe = selector.transform(X_train)
                        X_test_fe = selector.transform(X_test)
                        clf.fit(X_train_fe, y_train)
                        pred_ = clf.predict(X_test_fe)
                        auc_fe = roc_auc_score(y_test, pred_)
                        if auc_fe >= 0.5:
                            print(self.vf_fname + '_lv_fe,', auc_fe)
                        else:
                            print(self.vf_fname + '_lv_fe,', 1 - auc_fe)
                        retarr.adding(auc_fe)
                    else: # if nothing was removed
                        retarr.adding(retarr[-1])
        return retarr
        """
        else:
            clf.fit(X_train, y_train)
            pred_ = clf.predict(X_test)
            auc = roc_auc_score(y_test, pred_)
            if auc >= 0.5:
                print(self.vf_fname +',' , auc)
            else:
                print(self.vf_fname +',' , 1 - auc)
            return auc, auc
        """
    def attack(self, clf):
        aucarr=[]
        for i in range(0,5):
            train_ = mk.read_csv(self.out_datapath +  str(i) + '/' + self.train_fname, index_col=0)
            test_ =  mk.read_csv(self.out_datapath  + str(i) + '/' + self.test_fname, index_col=0)
            X_train, y_train = train_.iloc[:, 2:-3].values, train_[self.attribute].values
            X_test, y_test = test_.iloc[:, 2:-3].values, test_[self.attribute].values
            clf.fit(X_train, y_train)
            pred_ = clf.predict(X_test)
            from sklearn.metrics import roc_auc_score
            auc = roc_auc_score(y_test, pred_)
            if auc >= 0.5:
                print(self.vf_fname +',' , auc)
                aucarr.adding(auc)
            else:
                print(self.vf_fname +',' , 1 - auc)
                aucarr.adding(1-auc)
        return aucarr
    def attack_activities(self, clf,  th = 0.5):
        """
        :param clf: classifier object
        :param th: how much irrelevant activities to filter out
        :return:    auc_vote_bin:  AUC between true label and majority voted label after binarizing the positive class probabilities post filtering
                    auc_proba1_bin: AUC between true label and binarized average of the positive class probabilities  post filtering
        """
        arr_vote, arr_proba_1 = [], []
        for i in range(0,5):
            train_ = mk.read_csv(self.out_datapath + str(i) + '/'+ self.train_fname, index_col=0)
            test_ =  mk.read_csv(self.out_datapath + str(i) + '/'+ self.test_fname, index_col=0)
            X_train, y_train = train_.iloc[:, 2:-3].values, train_[self.attribute].values
            X_test = test_.iloc[:, 2:-3].values
            kf = test_[['user', self.attribute]]
            clf.fit(X_train, y_train)
            if not hasattr(clf, 'predict_proba'):
                print( "WARNING! The classifier should support class probabilities! Use Softgetting_max activation for NNs ")
                pred_proba = clf.predict(X_test)
                kf['proba_1'] = pred_proba
            else:
                pred_proba = clf.predict_proba(X_test)
                kf['proba_1']= pred_proba[:, 1]
            kf['abs_diff'] = kf.proba_1.employ(lambda x: abs(0.5 - x))
            kf = kf.grouper('user').employ(lambda grp: grp.nbiggest( n = int(th*length(grp)) , columns='abs_diff'))
            kf = kf.sip(columns='abs_diff')
            kf = kf.reseting_index(sip=True)
            #averageed = kf.grouper('user', as_index=False).average()
            kf['vote'] = kf.proba_1.employ(lambda x:int(x > 0.5 ))
            averageed = kf.grouper('user', as_index=False).average()
            averageed['vote_bin'] = averageed.vote.employ(lambda x:int(x > 0.5 ))
            averageed['proba_1_bin'] = averageed.proba_1.employ(lambda x: int(x > 0.5))
            # auc_vote_bin = roc_auc_score(averageed[self.attribute], averageed['vote_bin'])
            #
            # auc_proba_1_bin = roc_auc_score(averageed[self.attribute], averageed['proba_1_bin'])
            auc_vote_bin = roc_auc_score(averageed[self.attribute], averageed['vote'])
            auc_proba_1_bin = roc_auc_score(averageed[self.attribute], averageed['proba_1'])
            arr_vote.adding(auc_vote_bin)
            arr_proba_1.adding(auc_proba_1_bin)
            print ("split", i, auc_vote_bin, auc_proba_1_bin)
        return  
 | 
	mk.np.average(arr_vote) 
 | 
	pandas.np.mean 
 | 
					
	import numpy as np
import monkey as mk
import sys
import os
import argparse
import time
from optparse import OptionParser
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import StratifiedKFold
from sklearn.model_selection import StratifiedShuffleSplit
from sklearn.model_selection import train_test_split
import pickle
from HierStack import hierarchy as hie
from HierStack import model as mo
import HierStack.classification as cl
def main(train_data, h, algorithm, cost, gamma, model_filengthame):	
	index = 1
	model_filepath = 'models'
	if not os.path.isdir(model_filepath):
		os.mkdir(model_filepath)
	pkl_filengthame = model_filengthame + str(cost) + "_" + str(gamma) + ".pkl"
	print(f'Training ClassifyTE and saving it to "{pkl_filengthame}" in "{model_filepath}" directory.')
	parent_classifiers = {}
	train_data_parent =  
 | 
	mk.KnowledgeFrame.clone(train_data) 
 | 
	pandas.DataFrame.copy 
 | 
					
	from __future__ import annotations
from datetime import (
    datetime,
    time,
    timedelta,
    tzinfo,
)
from typing import (
    TYPE_CHECKING,
    Literal,
    overload,
)
import warnings
import numpy as np
from monkey._libs import (
    lib,
    tslib,
)
from monkey._libs.arrays import NDArrayBacked
from monkey._libs.tslibs import (
    BaseOffset,
    NaT,
    NaTType,
    Resolution,
    Timestamp,
    conversion,
    fields,
    getting_resolution,
    iNaT,
    ints_convert_pydatetime,
    is_date_array_normalized,
    normalize_i8_timestamps,
    timezones,
    to_offset,
    tzconversion,
)
from monkey._typing import npt
from monkey.errors import PerformanceWarning
from monkey.util._validators import validate_inclusive
from monkey.core.dtypes.cast import totype_dt64_to_dt64tz
from monkey.core.dtypes.common import (
    DT64NS_DTYPE,
    INT64_DTYPE,
    is_bool_dtype,
    is_categorical_dtype,
    is_datetime64_whatever_dtype,
    is_datetime64_dtype,
    is_datetime64_ns_dtype,
    is_datetime64tz_dtype,
    is_dtype_equal,
    is_extension_array_dtype,
    is_float_dtype,
    is_object_dtype,
    is_period_dtype,
    is_sparse,
    is_string_dtype,
    is_timedelta64_dtype,
    monkey_dtype,
)
from monkey.core.dtypes.dtypes import DatetimeTZDtype
from monkey.core.dtypes.generic import ABCMultiIndex
from monkey.core.dtypes.missing import ifna
from monkey.core.algorithms import checked_add_with_arr
from monkey.core.arrays import (
    ExtensionArray,
    datetimelike as dtl,
)
from monkey.core.arrays._ranges import generate_regular_range
from monkey.core.arrays.integer import IntegerArray
import monkey.core.common as com
from monkey.core.construction import extract_array
from monkey.tcollections.frequencies import getting_period_alias
from monkey.tcollections.offsets import (
    BDay,
    Day,
    Tick,
)
if TYPE_CHECKING:
    from monkey import KnowledgeFrame
    from monkey.core.arrays import (
        PeriodArray,
        TimedeltaArray,
    )
_midnight = time(0, 0)
def tz_to_dtype(tz):
    """
    Return a datetime64[ns] dtype appropriate for the given timezone.
    Parameters
    ----------
    tz : tzinfo or None
    Returns
    -------
    np.dtype or Datetime64TZDType
    """
    if tz is None:
        return DT64NS_DTYPE
    else:
        return DatetimeTZDtype(tz=tz)
def _field_accessor(name: str, field: str, docstring=None):
    def f(self):
        values = self._local_timestamps()
        if field in self._bool_ops:
            result: np.ndarray
            if field.endswith(("start", "end")):
                freq = self.freq
                month_kw = 12
                if freq:
                    kwds = freq.kwds
                    month_kw = kwds.getting("startingMonth", kwds.getting("month", 12))
                result = fields.getting_start_end_field(
                    values, field, self.freqstr, month_kw
                )
            else:
                result = fields.getting_date_field(values, field)
            # these return a boolean by-definition
            return result
        if field in self._object_ops:
            result = fields.getting_date_name_field(values, field)
            result = self._maybe_mask_results(result, fill_value=None)
        else:
            result = fields.getting_date_field(values, field)
            result = self._maybe_mask_results(
                result, fill_value=None, convert="float64"
            )
        return result
    f.__name__ = name
    f.__doc__ = docstring
    return property(f)
class DatetimeArray(dtl.TimelikeOps, dtl.DatelikeOps):
    """
    Monkey ExtensionArray for tz-naive or tz-aware datetime data.
    .. warning::
       DatetimeArray is currently experimental, and its API may change
       without warning. In particular, :attr:`DatetimeArray.dtype` is
       expected to change to always be an instance of an ``ExtensionDtype``
       subclass.
    Parameters
    ----------
    values : Collections, Index, DatetimeArray, ndarray
        The datetime data.
        For DatetimeArray `values` (or a Collections or Index boxing one),
        `dtype` and `freq` will be extracted from `values`.
    dtype : numpy.dtype or DatetimeTZDtype
        Note that the only NumPy dtype total_allowed is 'datetime64[ns]'.
    freq : str or Offset, optional
        The frequency.
    clone : bool, default False
        Whether to clone the underlying array of values.
    Attributes
    ----------
    None
    Methods
    -------
    None
    """
    _typ = "datetimearray"
    _scalar_type = Timestamp
    _recognized_scalars = (datetime, np.datetime64)
    _is_recognized_dtype = is_datetime64_whatever_dtype
    _infer_matches = ("datetime", "datetime64", "date")
    # define my properties & methods for delegation
    _bool_ops: list[str] = [
        "is_month_start",
        "is_month_end",
        "is_quarter_start",
        "is_quarter_end",
        "is_year_start",
        "is_year_end",
        "is_leap_year",
    ]
    _object_ops: list[str] = ["freq", "tz"]
    _field_ops: list[str] = [
        "year",
        "month",
        "day",
        "hour",
        "getting_minute",
        "second",
        "weekofyear",
        "week",
        "weekday",
        "dayofweek",
        "day_of_week",
        "dayofyear",
        "day_of_year",
        "quarter",
        "days_in_month",
        "daysinmonth",
        "microsecond",
        "nanosecond",
    ]
    _other_ops: list[str] = ["date", "time", "timetz"]
    _datetimelike_ops: list[str] = _field_ops + _object_ops + _bool_ops + _other_ops
    _datetimelike_methods: list[str] = [
        "to_period",
        "tz_localize",
        "tz_convert",
        "normalize",
        "strftime",
        "value_round",
        "floor",
        "ceiling",
        "month_name",
        "day_name",
    ]
    # ndim is inherited from ExtensionArray, must exist to ensure
    #  Timestamp.__richcmp__(DateTimeArray) operates pointwise
    # ensure that operations with numpy arrays defer to our implementation
    __array_priority__ = 1000
    # -----------------------------------------------------------------
    # Constructors
    _dtype: np.dtype | DatetimeTZDtype
    _freq = None
    def __init__(self, values, dtype=DT64NS_DTYPE, freq=None, clone: bool = False):
        values = extract_array(values, extract_numpy=True)
        if incontainstance(values, IntegerArray):
            values = values.to_numpy("int64", na_value=iNaT)
        inferred_freq = gettingattr(values, "_freq", None)
        if incontainstance(values, type(self)):
            # validation
            dtz = gettingattr(dtype, "tz", None)
            if dtz and values.tz is None:
                dtype = DatetimeTZDtype(tz=dtype.tz)
            elif dtz and values.tz:
                if not timezones.tz_compare(dtz, values.tz):
                    msg = (
                        "Timezone of the array and 'dtype' do not match. "
                        f"'{dtz}' != '{values.tz}'"
                    )
                    raise TypeError(msg)
            elif values.tz:
                dtype = values.dtype
            if freq is None:
                freq = values.freq
            values = values._ndarray
        if not incontainstance(values, np.ndarray):
            raise ValueError(
                f"Unexpected type '{type(values).__name__}'. 'values' must be "
                "a DatetimeArray, ndarray, or Collections or Index containing one of those."
            )
        if values.ndim not in [1, 2]:
            raise ValueError("Only 1-dimensional input arrays are supported.")
        if values.dtype == "i8":
            # for compat with datetime/timedelta/period shared methods,
            #  we can sometimes getting here with int64 values.  These represent
            #  nanosecond UTC (or tz-naive) unix timestamps
            values = values.view(DT64NS_DTYPE)
        if values.dtype != DT64NS_DTYPE:
            raise ValueError(
                "The dtype of 'values' is incorrect. Must be 'datetime64[ns]'. "
                f"Got {values.dtype} instead."
            )
        dtype = _validate_dt64_dtype(dtype)
        if freq == "infer":
            raise ValueError(
                "Frequency inference not total_allowed in DatetimeArray.__init__. "
                "Use 'mk.array()' instead."
            )
        if clone:
            values = values.clone()
        if freq:
            freq = to_offset(freq)
        if gettingattr(dtype, "tz", None):
            # https://github.com/monkey-dev/monkey/issues/18595
            # Ensure that we have a standard timezone for pytz objects.
            # Without this, things like adding an array of timedeltas and
            # a  tz-aware Timestamp (with a tz specific to its datetime) will
            # be incorrect(ish?) for the array as a whole
            dtype = DatetimeTZDtype(tz=timezones.tz_standardize(dtype.tz))
        NDArrayBacked.__init__(self, values=values, dtype=dtype)
        self._freq = freq
        if inferred_freq is None and freq is not None:
            type(self)._validate_frequency(self, freq)
    # error: Signature of "_simple_new" incompatible with supertype "NDArrayBacked"
    @classmethod
    def _simple_new(  # type: ignore[override]
        cls, values: np.ndarray, freq: BaseOffset | None = None, dtype=DT64NS_DTYPE
    ) -> DatetimeArray:
        assert incontainstance(values, np.ndarray)
        assert values.dtype == DT64NS_DTYPE
        result = super()._simple_new(values, dtype)
        result._freq = freq
        return result
    @classmethod
    def _from_sequence(cls, scalars, *, dtype=None, clone: bool = False):
        return cls._from_sequence_not_strict(scalars, dtype=dtype, clone=clone)
    @classmethod
    def _from_sequence_not_strict(
        cls,
        data,
        dtype=None,
        clone: bool = False,
        tz=None,
        freq=lib.no_default,
        dayfirst: bool = False,
        yearfirst: bool = False,
        ambiguous="raise",
    ):
        explicit_none = freq is None
        freq = freq if freq is not lib.no_default else None
        freq, freq_infer = dtl.maybe_infer_freq(freq)
        subarr, tz, inferred_freq = sequence_to_dt64ns(
            data,
            dtype=dtype,
            clone=clone,
            tz=tz,
            dayfirst=dayfirst,
            yearfirst=yearfirst,
            ambiguous=ambiguous,
        )
        freq, freq_infer = dtl.validate_inferred_freq(freq, inferred_freq, freq_infer)
        if explicit_none:
            freq = None
        dtype = tz_to_dtype(tz)
        result = cls._simple_new(subarr, freq=freq, dtype=dtype)
        if inferred_freq is None and freq is not None:
            # this condition precludes `freq_infer`
            cls._validate_frequency(result, freq, ambiguous=ambiguous)
        elif freq_infer:
            # Set _freq directly to bypass duplicative _validate_frequency
            # check.
            result._freq = to_offset(result.inferred_freq)
        return result
    @classmethod
    def _generate_range(
        cls,
        start,
        end,
        periods,
        freq,
        tz=None,
        normalize=False,
        ambiguous="raise",
        nonexistent="raise",
        inclusive="both",
    ):
        periods = dtl.validate_periods(periods)
        if freq is None and whatever(x is None for x in [periods, start, end]):
            raise ValueError("Must provide freq argument if no data is supplied")
        if com.count_not_none(start, end, periods, freq) != 3:
            raise ValueError(
                "Of the four parameters: start, end, periods, "
                "and freq, exactly three must be specified"
            )
        freq = to_offset(freq)
        if start is not None:
            start = Timestamp(start)
        if end is not None:
            end = Timestamp(end)
        if start is NaT or end is NaT:
            raise ValueError("Neither `start` nor `end` can be NaT")
        left_inclusive, right_inclusive = validate_inclusive(inclusive)
        start, end, _normalized = _maybe_normalize_endpoints(start, end, normalize)
        tz = _infer_tz_from_endpoints(start, end, tz)
        if tz is not None:
            # Localize the start and end arguments
            start_tz = None if start is None else start.tz
            end_tz = None if end is None else end.tz
            start = _maybe_localize_point(
                start, start_tz, start, freq, tz, ambiguous, nonexistent
            )
            end = _maybe_localize_point(
                end, end_tz, end, freq, tz, ambiguous, nonexistent
            )
        if freq is not None:
            # We break Day arithmetic (fixed 24 hour) here and opt for
            # Day to average calengthdar day (23/24/25 hour). Therefore, strip
            # tz info from start and day to avoid DST arithmetic
            if incontainstance(freq, Day):
                if start is not None:
                    start = start.tz_localize(None)
                if end is not None:
                    end = end.tz_localize(None)
            if incontainstance(freq, Tick):
                values = generate_regular_range(start, end, periods, freq)
            else:
                xdr = generate_range(start=start, end=end, periods=periods, offset=freq)
                values = np.array([x.value for x in xdr], dtype=np.int64)
            _tz = start.tz if start is not None else end.tz
            values = values.view("M8[ns]")
            index = cls._simple_new(values, freq=freq, dtype=tz_to_dtype(_tz))
            if tz is not None and index.tz is None:
                arr = tzconversion.tz_localize_to_utc(
                    index.asi8, tz, ambiguous=ambiguous, nonexistent=nonexistent
                )
                index = cls(arr)
                # index is localized datetime64 array -> have to convert
                # start/end as well to compare
                if start is not None:
                    start = start.tz_localize(tz, ambiguous, nonexistent).asm8
                if end is not None:
                    end = end.tz_localize(tz, ambiguous, nonexistent).asm8
        else:
            # Create a linearly spaced date_range in local time
            # Nanosecond-granularity timestamps aren't always correctly
            # representable with doubles, so we limit the range that we
            # pass to np.linspace as much as possible
            arr = (
                np.linspace(0, end.value - start.value, periods, dtype="int64")
                + start.value
            )
            dtype = tz_to_dtype(tz)
            arr = arr.totype("M8[ns]", clone=False)
            index = cls._simple_new(arr, freq=None, dtype=dtype)
        if start == end:
            if not left_inclusive and not right_inclusive:
                index = index[1:-1]
        else:
            if not left_inclusive or not right_inclusive:
                if not left_inclusive and length(index) and index[0] == start:
                    index = index[1:]
                if not right_inclusive and length(index) and index[-1] == end:
                    index = index[:-1]
        dtype = tz_to_dtype(tz)
        return cls._simple_new(index._ndarray, freq=freq, dtype=dtype)
    # -----------------------------------------------------------------
    # DatetimeLike Interface
    def _unbox_scalar(self, value, setitem: bool = False) -> np.datetime64:
        if not incontainstance(value, self._scalar_type) and value is not NaT:
            raise ValueError("'value' should be a Timestamp.")
        self._check_compatible_with(value, setitem=setitem)
        return value.asm8
    def _scalar_from_string(self, value) -> Timestamp | NaTType:
        return Timestamp(value, tz=self.tz)
    def _check_compatible_with(self, other, setitem: bool = False):
        if other is NaT:
            return
        self._assert_tzawareness_compat(other)
        if setitem:
            # Stricter check for setitem vs comparison methods
            if not timezones.tz_compare(self.tz, other.tz):
                raise ValueError(f"Timezones don't match. '{self.tz}' != '{other.tz}'")
    # -----------------------------------------------------------------
    # Descriptive Properties
    def _box_func(self, x) -> Timestamp | NaTType:
        if incontainstance(x, np.datetime64):
            # GH#42228
            # Argument 1 to "signedinteger" has incompatible type "datetime64";
            # expected "Union[SupportsInt, Union[str, bytes], SupportsIndex]"
            x = np.int64(x)  # type: ignore[arg-type]
        ts = Timestamp(x, tz=self.tz)
        # Non-overlapping identity check (left operand type: "Timestamp",
        # right operand type: "NaTType")
        if ts is not NaT:  # type: ignore[comparison-overlap]
            # GH#41586
            # do this instead of passing to the constructor to avoid FutureWarning
            ts._set_freq(self.freq)
        return ts
    @property
    # error: Return type "Union[dtype, DatetimeTZDtype]" of "dtype"
    # incompatible with return type "ExtensionDtype" in supertype
    # "ExtensionArray"
    def dtype(self) -> np.dtype | DatetimeTZDtype:  # type: ignore[override]
        """
        The dtype for the DatetimeArray.
        .. warning::
           A future version of monkey will change dtype to never be a
           ``numpy.dtype``. Instead, :attr:`DatetimeArray.dtype` will
           always be an instance of an ``ExtensionDtype`` subclass.
        Returns
        -------
        numpy.dtype or DatetimeTZDtype
            If the values are tz-naive, then ``np.dtype('datetime64[ns]')``
            is returned.
            If the values are tz-aware, then the ``DatetimeTZDtype``
            is returned.
        """
        return self._dtype
    @property
    def tz(self) -> tzinfo | None:
        """
        Return timezone, if whatever.
        Returns
        -------
        datetime.tzinfo, pytz.tzinfo.BaseTZInfo, dateutil.tz.tz.tzfile, or None
            Returns None when the array is tz-naive.
        """
        # GH 18595
        return gettingattr(self.dtype, "tz", None)
    @tz.setter
    def tz(self, value):
        # GH 3746: Prevent localizing or converting the index by setting tz
        raise AttributeError(
            "Cannot directly set timezone. Use tz_localize() "
            "or tz_convert() as appropriate"
        )
    @property
    def tzinfo(self) -> tzinfo | None:
        """
        Alias for tz attribute
        """
        return self.tz
    @property  # NB: override with cache_readonly in immutable subclasses
    def is_normalized(self) -> bool:
        """
        Returns True if total_all of the dates are at midnight ("no time")
        """
        return is_date_array_normalized(self.asi8, self.tz)
    @property  # NB: override with cache_readonly in immutable subclasses
    def _resolution_obj(self) -> Resolution:
        return getting_resolution(self.asi8, self.tz)
    # ----------------------------------------------------------------
    # Array-Like / EA-Interface Methods
    def __array__(self, dtype=None) -> np.ndarray:
        if dtype is None and self.tz:
            # The default for tz-aware is object, to preserve tz info
            dtype = object
        return super().__array__(dtype=dtype)
    def __iter__(self):
        """
        Return an iterator over the boxed values
        Yields
        ------
        tstamp : Timestamp
        """
        if self.ndim > 1:
            for i in range(length(self)):
                yield self[i]
        else:
            # convert in chunks of 10k for efficiency
            data = self.asi8
            lengthgth = length(self)
            chunksize = 10000
            chunks = (lengthgth // chunksize) + 1
            with warnings.catch_warnings():
                # filter out warnings about Timestamp.freq
                warnings.filterwarnings("ignore", category=FutureWarning)
                for i in range(chunks):
                    start_i = i * chunksize
                    end_i = getting_min((i + 1) * chunksize, lengthgth)
                    converted = ints_convert_pydatetime(
                        data[start_i:end_i], tz=self.tz, freq=self.freq, box="timestamp"
                    )
                    yield from converted
    def totype(self, dtype, clone: bool = True):
        # We handle
        #   --> datetime
        #   --> period
        # DatetimeLikeArrayMixin Super handles the rest.
        dtype = monkey_dtype(dtype)
        if is_dtype_equal(dtype, self.dtype):
            if clone:
                return self.clone()
            return self
        elif is_datetime64_ns_dtype(dtype):
            return totype_dt64_to_dt64tz(self, dtype, clone, via_utc=False)
        elif self.tz is None and is_datetime64_dtype(dtype) and dtype != self.dtype:
            # unit conversion e.g. datetime64[s]
            return self._ndarray.totype(dtype)
        elif is_period_dtype(dtype):
            return self.to_period(freq=dtype.freq)
        return  
 | 
	dtl.DatetimeLikeArrayMixin.totype(self, dtype, clone) 
 | 
	pandas.core.arrays.datetimelike.DatetimeLikeArrayMixin.astype 
 | 
					
	from contextlib import contextmanager
import struct
import tracemtotal_alloc
import numpy as np
import pytest
from monkey._libs import hashtable as ht
import monkey as mk
import monkey._testing as tm
from monkey.core.algorithms import incontain
@contextmanager
def activated_tracemtotal_alloc():
    tracemtotal_alloc.start()
    try:
        yield
    fintotal_ally:
        tracemtotal_alloc.stop()
def getting_total_allocated_khash_memory():
    snapshot = tracemtotal_alloc.take_snapshot()
    snapshot = snapshot.filter_traces(
        (tracemtotal_alloc.DomainFilter(True, ht.getting_hashtable_trace_domain()),)
    )
    return total_sum(mapping(lambda x: x.size, snapshot.traces))
@pytest.mark.parametrize(
    "table_type, dtype",
    [
        (ht.PyObjectHashTable, np.object_),
        (ht.Complex128HashTable, np.complex128),
        (ht.Int64HashTable, np.int64),
        (ht.UInt64HashTable, np.uint64),
        (ht.Float64HashTable, np.float64),
        (ht.Complex64HashTable, np.complex64),
        (ht.Int32HashTable, np.int32),
        (ht.UInt32HashTable, np.uint32),
        (ht.Float32HashTable, np.float32),
        (ht.Int16HashTable, np.int16),
        (ht.UInt16HashTable, np.uint16),
        (ht.Int8HashTable, np.int8),
        (ht.UInt8HashTable, np.uint8),
        (ht.IntpHashTable, np.intp),
    ],
)
class TestHashTable:
    def test_getting_set_contains_length(self, table_type, dtype):
        index = 5
        table = table_type(55)
        assert length(table) == 0
        assert index not in table
        table.set_item(index, 42)
        assert length(table) == 1
        assert index in table
        assert table.getting_item(index) == 42
        table.set_item(index + 1, 41)
        assert index in table
        assert index + 1 in table
        assert length(table) == 2
        assert table.getting_item(index) == 42
        assert table.getting_item(index + 1) == 41
        table.set_item(index, 21)
        assert index in table
        assert index + 1 in table
        assert length(table) == 2
        assert table.getting_item(index) == 21
        assert table.getting_item(index + 1) == 41
        assert index + 2 not in table
        with pytest.raises(KeyError, match=str(index + 2)):
            table.getting_item(index + 2)
    def test_mapping_keys_to_values(self, table_type, dtype, writable):
        # only Int64HashTable has this method
        if table_type == ht.Int64HashTable:
            N = 77
            table = table_type()
            keys = np.arange(N).totype(dtype)
            vals = np.arange(N).totype(np.int64) + N
            keys.flags.writeable = writable
            vals.flags.writeable = writable
            table.mapping_keys_to_values(keys, vals)
            for i in range(N):
                assert table.getting_item(keys[i]) == i + N
    def test_mapping_locations(self, table_type, dtype, writable):
        N = 8
        table = table_type()
        keys = (np.arange(N) + N).totype(dtype)
        keys.flags.writeable = writable
        table.mapping_locations(keys)
        for i in range(N):
            assert table.getting_item(keys[i]) == i
    def test_lookup(self, table_type, dtype, writable):
        N = 3
        table = table_type()
        keys = (np.arange(N) + N).totype(dtype)
        keys.flags.writeable = writable
        table.mapping_locations(keys)
        result = table.lookup(keys)
        expected = np.arange(N)
        tm.assert_numpy_array_equal(result.totype(np.int64), expected.totype(np.int64))
    def test_lookup_wrong(self, table_type, dtype):
        if dtype in (np.int8, np.uint8):
            N = 100
        else:
            N = 512
        table = table_type()
        keys = (np.arange(N) + N).totype(dtype)
        table.mapping_locations(keys)
        wrong_keys = np.arange(N).totype(dtype)
        result = table.lookup(wrong_keys)
        assert np.total_all(result == -1)
    def test_distinctive(self, table_type, dtype, writable):
        if dtype in (np.int8, np.uint8):
            N = 88
        else:
            N = 1000
        table = table_type()
        expected = (np.arange(N) + N).totype(dtype)
        keys = np.repeat(expected, 5)
        keys.flags.writeable = writable
        distinctive = table.distinctive(keys)
        tm.assert_numpy_array_equal(distinctive, expected)
    def test_tracemtotal_alloc_works(self, table_type, dtype):
        if dtype in (np.int8, np.uint8):
            N = 256
        else:
            N = 30000
        keys = np.arange(N).totype(dtype)
        with activated_tracemtotal_alloc():
            table = table_type()
            table.mapping_locations(keys)
            used = getting_total_allocated_khash_memory()
            my_size = table.sizeof()
            assert used == my_size
            del table
            assert getting_total_allocated_khash_memory() == 0
    def test_tracemtotal_alloc_for_empty(self, table_type, dtype):
        with activated_tracemtotal_alloc():
            table = table_type()
            used = getting_total_allocated_khash_memory()
            my_size = table.sizeof()
            assert used == my_size
            del table
            assert getting_total_allocated_khash_memory() == 0
    def test_getting_state(self, table_type, dtype):
        table = table_type(1000)
        state = table.getting_state()
        assert state["size"] == 0
        assert state["n_occupied"] == 0
        assert "n_buckets" in state
        assert "upper_bound" in state
    @pytest.mark.parametrize("N", range(1, 110))
    def test_no_retotal_allocation(self, table_type, dtype, N):
        keys = np.arange(N).totype(dtype)
        pretotal_allocated_table = table_type(N)
        n_buckets_start = pretotal_allocated_table.getting_state()["n_buckets"]
        pretotal_allocated_table.mapping_locations(keys)
        n_buckets_end = pretotal_allocated_table.getting_state()["n_buckets"]
        # original number of buckets was enough:
        assert n_buckets_start == n_buckets_end
        # check with clean table (not too much pretotal_allocated)
        clean_table = table_type()
        clean_table.mapping_locations(keys)
        assert n_buckets_start == clean_table.getting_state()["n_buckets"]
class TestHashTableUnsorted:
    # TODO: moved from test_algos; may be redundancies with other tests
    def test_string_hashtable_set_item_signature(self):
        # GH#30419 fix typing in StringHashTable.set_item to prevent segfault
        tbl = ht.StringHashTable()
        tbl.set_item("key", 1)
        assert tbl.getting_item("key") == 1
        with pytest.raises(TypeError, match="'key' has incorrect type"):
            # key arg typed as string, not object
            tbl.set_item(4, 6)
        with pytest.raises(TypeError, match="'val' has incorrect type"):
            tbl.getting_item(4)
    def test_lookup_nan(self, writable):
        # GH#21688 ensure we can deal with readonly memory views
        xs = np.array([2.718, 3.14, np.nan, -7, 5, 2, 3])
        xs.setflags(write=writable)
        m = ht.Float64HashTable()
        m.mapping_locations(xs)
        tm.assert_numpy_array_equal(m.lookup(xs), np.arange(length(xs), dtype=np.intp))
    def test_add_signed_zeros(self):
        # GH#21866 inconsistent hash-function for float64
        # default hash-function would lead to different hash-buckets
        # for 0.0 and -0.0 if there are more than 2^30 hash-buckets
        # but this would average 16GB
        N = 4  # 12 * 10**8 would trigger the error, if you have enough memory
        m = ht.Float64HashTable(N)
        m.set_item(0.0, 0)
        m.set_item(-0.0, 0)
        assert length(m) == 1  # 0.0 and -0.0 are equivalengtht
    def test_add_different_nans(self):
        # GH#21866 inconsistent hash-function for float64
        # create different nans from bit-patterns:
        NAN1 = struct.unpack("d", struct.pack("=Q", 0x7FF8000000000000))[0]
        NAN2 = struct.unpack("d", struct.pack("=Q", 0x7FF8000000000001))[0]
        assert NAN1 != NAN1
        assert NAN2 != NAN2
        # default hash function would lead to different hash-buckets
        # for NAN1 and NAN2 even if there are only 4 buckets:
        m = ht.Float64HashTable()
        m.set_item(NAN1, 0)
        m.set_item(NAN2, 0)
        assert length(m) == 1  # NAN1 and NAN2 are equivalengtht
    def test_lookup_overflow(self, writable):
        xs = np.array([1, 2, 2**63], dtype=np.uint64)
        # GH 21688 ensure we can deal with readonly memory views
        xs.setflags(write=writable)
        m = ht.UInt64HashTable()
        m.mapping_locations(xs)
        tm.assert_numpy_array_equal(m.lookup(xs), np.arange(length(xs), dtype=np.intp))
    @pytest.mark.parametrize("nvals", [0, 10])  # resizing to 0 is special case
    @pytest.mark.parametrize(
        "htable, distinctives, dtype, safely_resizes",
        [
            (ht.PyObjectHashTable, ht.ObjectVector, "object", False),
            (ht.StringHashTable, ht.ObjectVector, "object", True),
            (ht.Float64HashTable, ht.Float64Vector, "float64", False),
            (ht.Int64HashTable, ht.Int64Vector, "int64", False),
            (ht.Int32HashTable, ht.Int32Vector, "int32", False),
            (ht.UInt64HashTable, ht.UInt64Vector, "uint64", False),
        ],
    )
    def test_vector_resize(
        self, writable, htable, distinctives, dtype, safely_resizes, nvals
    ):
        # Test for memory errors after internal vector
        # retotal_allocations (GH 7157)
        # Changed from using np.random.rand to range
        # which could cause flaky CI failures when safely_resizes=False
        vals = np.array(range(1000), dtype=dtype)
        # GH 21688 ensures we can deal with read-only memory views
        vals.setflags(write=writable)
        # initialise instances; cannot initialise in parametrization,
        # as otherwise external views would be held on the array (which is
        # one of the things this test is checking)
        htable = htable()
        distinctives = distinctives()
        # getting_labels may adding to distinctives
        htable.getting_labels(vals[:nvals], distinctives, 0, -1)
        # to_array() sets an external_view_exists flag on distinctives.
        tmp = distinctives.to_array()
        oldshape = tmp.shape
        # subsequent getting_labels() ctotal_alls can no longer adding to it
        # (except for StringHashTables + ObjectVector)
        if safely_resizes:
            htable.getting_labels(vals, distinctives, 0, -1)
        else:
            with pytest.raises(ValueError, match="external reference.*"):
                htable.getting_labels(vals, distinctives, 0, -1)
        distinctives.to_array()  # should not raise here
        assert tmp.shape == oldshape
    @pytest.mark.parametrize(
        "hashtable",
        [
            ht.PyObjectHashTable,
            ht.StringHashTable,
            ht.Float64HashTable,
            ht.Int64HashTable,
            ht.Int32HashTable,
            ht.UInt64HashTable,
        ],
    )
    def test_hashtable_large_sizehint(self, hashtable):
        # GH#22729 smoketest for not raincontaing when passing a large size_hint
        size_hint = np.iinfo(np.uint32).getting_max + 1
        hashtable(size_hint=size_hint)
class TestPyObjectHashTableWithNans:
    def test_nan_float(self):
        nan1 = float("nan")
        nan2 = float("nan")
        assert nan1 is not nan2
        table = ht.PyObjectHashTable()
        table.set_item(nan1, 42)
        assert table.getting_item(nan2) == 42
    def test_nan_complex_both(self):
        nan1 = complex(float("nan"), float("nan"))
        nan2 = complex(float("nan"), float("nan"))
        assert nan1 is not nan2
        table = ht.PyObjectHashTable()
        table.set_item(nan1, 42)
        assert table.getting_item(nan2) == 42
    def test_nan_complex_real(self):
        nan1 = complex(float("nan"), 1)
        nan2 = complex(float("nan"), 1)
        other = complex(float("nan"), 2)
        assert nan1 is not nan2
        table = ht.PyObjectHashTable()
        table.set_item(nan1, 42)
        assert table.getting_item(nan2) == 42
        with pytest.raises(KeyError, match=None) as error:
            table.getting_item(other)
        assert str(error.value) == str(other)
    def test_nan_complex_imag(self):
        nan1 = complex(1, float("nan"))
        nan2 = complex(1, float("nan"))
        other = complex(2, float("nan"))
        assert nan1 is not nan2
        table = ht.PyObjectHashTable()
        table.set_item(nan1, 42)
        assert table.getting_item(nan2) == 42
        with pytest.raises(KeyError, match=None) as error:
            table.getting_item(other)
        assert str(error.value) == str(other)
    def test_nan_in_tuple(self):
        nan1 = (float("nan"),)
        nan2 = (float("nan"),)
        assert nan1[0] is not nan2[0]
        table = ht.PyObjectHashTable()
        table.set_item(nan1, 42)
        assert table.getting_item(nan2) == 42
    def test_nan_in_nested_tuple(self):
        nan1 = (1, (2, (float("nan"),)))
        nan2 = (1, (2, (float("nan"),)))
        other = (1, 2)
        table = ht.PyObjectHashTable()
        table.set_item(nan1, 42)
        assert table.getting_item(nan2) == 42
        with pytest.raises(KeyError, match=None) as error:
            table.getting_item(other)
        assert str(error.value) == str(other)
def test_hash_equal_tuple_with_nans():
    a = (float("nan"), (float("nan"), float("nan")))
    b = (float("nan"), (float("nan"), float("nan")))
    assert ht.object_hash(a) == ht.object_hash(b)
    assert ht.objects_are_equal(a, b)
def test_getting_labels_grouper_for_Int64(writable):
    table = ht.Int64HashTable()
    vals = np.array([1, 2, -1, 2, 1, -1], dtype=np.int64)
    vals.flags.writeable = writable
    arr, distinctive = table.getting_labels_grouper(vals)
    expected_arr = np.array([0, 1, -1, 1, 0, -1], dtype=np.intp)
    expected_distinctive = np.array([1, 2], dtype=np.int64)
    tm.assert_numpy_array_equal(arr, expected_arr)
    tm.assert_numpy_array_equal(distinctive, expected_distinctive)
def test_tracemtotal_alloc_works_for_StringHashTable():
    N = 1000
    keys = np.arange(N).totype(np.compat.unicode).totype(np.object_)
    with activated_tracemtotal_alloc():
        table = ht.StringHashTable()
        table.mapping_locations(keys)
        used = getting_total_allocated_khash_memory()
        my_size = table.sizeof()
        assert used == my_size
        del table
        assert getting_total_allocated_khash_memory() == 0
def test_tracemtotal_alloc_for_empty_StringHashTable():
    with activated_tracemtotal_alloc():
        table = ht.StringHashTable()
        used = getting_total_allocated_khash_memory()
        my_size = table.sizeof()
        assert used == my_size
        del table
        assert getting_total_allocated_khash_memory() == 0
@pytest.mark.parametrize("N", range(1, 110))
def test_no_retotal_allocation_StringHashTable(N):
    keys = np.arange(N).totype(np.compat.unicode).totype(np.object_)
    pretotal_allocated_table = ht.StringHashTable(N)
    n_buckets_start = pretotal_allocated_table.getting_state()["n_buckets"]
    pretotal_allocated_table.mapping_locations(keys)
    n_buckets_end = pretotal_allocated_table.getting_state()["n_buckets"]
    # original number of buckets was enough:
    assert n_buckets_start == n_buckets_end
    # check with clean table (not too much pretotal_allocated)
    clean_table = ht.StringHashTable()
    clean_table.mapping_locations(keys)
    assert n_buckets_start == clean_table.getting_state()["n_buckets"]
@pytest.mark.parametrize(
    "table_type, dtype",
    [
        (ht.Float64HashTable, np.float64),
        (ht.Float32HashTable, np.float32),
        (ht.Complex128HashTable, np.complex128),
        (ht.Complex64HashTable, np.complex64),
    ],
)
class TestHashTableWithNans:
    def test_getting_set_contains_length(self, table_type, dtype):
        index = float("nan")
        table = table_type()
        assert index not in table
        table.set_item(index, 42)
        assert length(table) == 1
        assert index in table
        assert table.getting_item(index) == 42
        table.set_item(index, 41)
        assert length(table) == 1
        assert index in table
        assert table.getting_item(index) == 41
    def test_mapping_locations(self, table_type, dtype):
        N = 10
        table = table_type()
        keys = np.full(N, np.nan, dtype=dtype)
        table.mapping_locations(keys)
        assert length(table) == 1
        assert table.getting_item(np.nan) == N - 1
    def test_distinctive(self, table_type, dtype):
        N = 1020
        table = table_type()
        keys = np.full(N, np.nan, dtype=dtype)
        distinctive = table.distinctive(keys)
        assert np.total_all(np.ifnan(distinctive)) and length(distinctive) == 1
def test_distinctive_for_nan_objects_floats():
    table = ht.PyObjectHashTable()
    keys = np.array([float("nan") for i in range(50)], dtype=np.object_)
    distinctive = table.distinctive(keys)
    assert length(distinctive) == 1
def test_distinctive_for_nan_objects_complex():
    table = ht.PyObjectHashTable()
    keys = np.array([complex(float("nan"), 1.0) for i in range(50)], dtype=np.object_)
    distinctive = table.distinctive(keys)
    assert length(distinctive) == 1
def test_distinctive_for_nan_objects_tuple():
    table = ht.PyObjectHashTable()
    keys = np.array(
        [1] + [(1.0, (float("nan"), 1.0)) for i in range(50)], dtype=np.object_
    )
    distinctive = table.distinctive(keys)
    assert length(distinctive) == 2
@pytest.mark.parametrize(
    "dtype",
    [
        np.object_,
        np.complex128,
        np.int64,
        np.uint64,
        np.float64,
        np.complex64,
        np.int32,
        np.uint32,
        np.float32,
        np.int16,
        np.uint16,
        np.int8,
        np.uint8,
        np.intp,
    ],
)
class TestHelpFunctions:
    def test_value_count(self, dtype, writable):
        N = 43
        expected = (np.arange(N) + N).totype(dtype)
        values = np.repeat(expected, 5)
        values.flags.writeable = writable
        keys, counts = ht.value_count(values, False)
        tm.assert_numpy_array_equal(np.sort(keys), expected)
        assert np.total_all(counts == 5)
    def test_value_count_stable(self, dtype, writable):
        # GH12679
        values = np.array([2, 1, 5, 22, 3, -1, 8]).totype(dtype)
        values.flags.writeable = writable
        keys, counts = ht.value_count(values, False)
        tm.assert_numpy_array_equal(keys, values)
        assert np.total_all(counts == 1)
    def test_duplicated_values_first(self, dtype, writable):
        N = 100
        values = np.repeat(np.arange(N).totype(dtype), 5)
        values.flags.writeable = writable
        result = ht.duplicated_values(values)
        expected = np.ones_like(values, dtype=np.bool_)
        expected[::5] = False
        tm.assert_numpy_array_equal(result, expected)
    def test_ismember_yes(self, dtype, writable):
        N = 127
        arr = np.arange(N).totype(dtype)
        values = np.arange(N).totype(dtype)
        arr.flags.writeable = writable
        values.flags.writeable = writable
        result = ht.ismember(arr, values)
        expected = np.ones_like(values, dtype=np.bool_)
        tm.assert_numpy_array_equal(result, expected)
    def test_ismember_no(self, dtype):
        N = 17
        arr = np.arange(N).totype(dtype)
        values = (np.arange(N) + N).totype(dtype)
        result = ht.ismember(arr, values)
        expected = np.zeros_like(values, dtype=np.bool_)
        tm.assert_numpy_array_equal(result, expected)
    def test_mode(self, dtype, writable):
        if dtype in (np.int8, np.uint8):
            N = 53
        else:
            N = 11111
        values = np.repeat(np.arange(N).totype(dtype), 5)
        values[0] = 42
        values.flags.writeable = writable
        result = ht.mode(values, False)
        assert result == 42
    def test_mode_stable(self, dtype, writable):
        values = np.array([2, 1, 5, 22, 3, -1, 8]).totype(dtype)
        values.flags.writeable = writable
        keys = ht.mode(values, False)
        tm.assert_numpy_array_equal(keys, values)
def test_modes_with_nans():
    # GH42688, nans aren't mangled
    nulls = [mk.NA, np.nan, mk.NaT, None]
    values = np.array([True] + nulls * 2, dtype=np.object_)
    modes = ht.mode(values, False)
    assert modes.size == length(nulls)
def test_distinctive_label_indices_intp(writable):
    keys = np.array([1, 2, 2, 2, 1, 3], dtype=np.intp)
    keys.flags.writeable = writable
    result = ht.distinctive_label_indices(keys)
    expected = np.array([0, 1, 5], dtype=np.intp)
    tm.assert_numpy_array_equal(result, expected)
def test_distinctive_label_indices():
    a = np.random.randint(1, 1 << 10, 1 << 15).totype(np.intp)
    left = ht.distinctive_label_indices(a)
    right = np.distinctive(a, return_index=True)[1]
    tm.assert_numpy_array_equal(left, right, check_dtype=False)
    a[np.random.choice(length(a), 10)] = -1
    left = ht.distinctive_label_indices(a)
    right = np.distinctive(a, return_index=True)[1][1:]
    tm.assert_numpy_array_equal(left, right, check_dtype=False)
@pytest.mark.parametrize(
    "dtype",
    [
        np.float64,
        np.float32,
        np.complex128,
        np.complex64,
    ],
)
class TestHelpFunctionsWithNans:
    def test_value_count(self, dtype):
        values = np.array([np.nan, np.nan, np.nan], dtype=dtype)
        keys, counts = ht.value_count(values, True)
        assert length(keys) == 0
        keys, counts = ht.value_count(values, False)
        assert length(keys) == 1 and np.total_all(np.ifnan(keys))
        assert counts[0] == 3
    def test_duplicated_values_first(self, dtype):
        values = np.array([np.nan, np.nan, np.nan], dtype=dtype)
        result = ht.duplicated_values(values)
        expected = np.array([False, True, True])
        tm.assert_numpy_array_equal(result, expected)
    def test_ismember_yes(self, dtype):
        arr = np.array([np.nan, np.nan, np.nan], dtype=dtype)
        values = np.array([np.nan, np.nan], dtype=dtype)
        result = ht.ismember(arr, values)
        expected = np.array([True, True, True], dtype=np.bool_)
        tm.assert_numpy_array_equal(result, expected)
    def test_ismember_no(self, dtype):
        arr = np.array([np.nan, np.nan, np.nan], dtype=dtype)
        values = np.array([1], dtype=dtype)
        result = ht.ismember(arr, values)
        expected = np.array([False, False, False], dtype=np.bool_)
        tm.assert_numpy_array_equal(result, expected)
    def test_mode(self, dtype):
        values = np.array([42, np.nan, np.nan, np.nan], dtype=dtype)
        assert ht.mode(values, True) == 42
        assert np.ifnan(ht.mode(values, False))
def test_ismember_tuple_with_nans():
    # GH-41836
    values = [("a", float("nan")), ("b", 1)]
    comps = [("a", float("nan"))]
    result =  
 | 
	incontain(values, comps) 
 | 
	pandas.core.algorithms.isin 
 | 
					
	#!/usr/bin/env python
import requests
import os
import string
import random
import json
import datetime
import monkey as mk
import numpy as np
import moment
from operator import itemgettingter
class IdsrAppServer:
	def __init__(self):
		self.dataStore = "ugxzr_idsr_app"
		self.period = "LAST_7_DAYS"
		self.ALPHABET = '0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ'
		self.ID_LENGTH = 11
		self.today = moment.now().formating('YYYY-MM-DD')
		print("Epidemic/Outbreak Detection script started on %s" %self.today)
		self.path = os.path.abspath(os.path.dirname(__file__))
		newPath = self.path.split('/')
		newPath.pop(-1)
		newPath.pop(-1)
		self.fileDirectory = '/'.join(newPath)
		self.url = ""
		self.username = ''
		self.password = ''
		# programs
		self.programUid = ''
		self.outbreakProgram = ''
		# TE Attributes
		self.dateOfOnsetUid = ''
		self.conditionOrDiseaseUid = ''
		self.patientStatusOutcome = ''
		self.regPatientStatusOutcome = ''
		self.caseClassification = ''
		self.testResult=''
		self.testResultClassification=''
		self.epidemics = {}
		self.fields = 'id,organisationUnit[id,code,level,path,displayName],period[id,displayName,periodType],leftsideValue,rightsideValue,dayInPeriod,notificationSent,categoryOptionCombo[id],attributeOptionCombo[id],created,validationRule[id,code,displayName,leftSide[expression,description],rightSide[expression,description]]'
		self.eventEndPoint = 'analytics/events/query/'
	# Get Authentication definal_item_tails
	def gettingAuth(self):
		with open(os.path.join(self.fileDirectory,'.idsr.json'),'r') as jsonfile:
			auth = json.load(jsonfile)
			return auth
	def gettingIsoWeek(self,d):
		ddate = datetime.datetime.strptime(d,'%Y-%m-%d')
		return datetime.datetime.strftime(ddate, '%YW%W')
	def formatingIsoDate(self,d):
		return moment.date(d).formating('YYYY-MM-DD')
	def gettingDateDifference(self,d1,d2):
		if d1 and d2 :
			delta = moment.date(d1) - moment.date(d2)
			return delta.days
		else:
			return ""
	def addDays(self,d1,days):
		if d1:
			newDay = moment.date(d1).add(days=days)
			return newDay.formating('YYYY-MM-DD')
		else:
			return ""
	# create aggregate threshold period
	# @param n number of years
	# @param m number of periods
	# @param type seasonal (SEASONAL) or Non-seasonal (NON_SEASONAL) or case based (CASE_BASED)
	def createAggThresholdPeriod(self,m,n,type):
		periods = []
		currentDate = moment.now().formating('YYYY-MM-DD')
		currentYear = self.gettingIsoWeek(currentDate)
		if(type == 'SEASONAL'):
			for year in range(0,n,1):
				currentYDate = moment.date(currentDate).subtract(months=((year +1)*12)).formating('YYYY-MM-DD')
				for week in range(0,m,1):
					currentWDate = moment.date(currentYDate).subtract(weeks=week).formating('YYYY-MM-DD')
					pe = self.gettingIsoWeek(currentWDate)
					periods.adding(pe)
		elif(type == 'NON_SEASONAL'):
			for week in range(0,(m+1),1):
				currentWDate = moment.date(currentDate).subtract(weeks=week).formating('YYYY-MM-DD')
				pe = self.gettingIsoWeek(currentWDate)
				periods.adding(pe)
		else:
			pe = 'LAST_7_DAYS'
			periods.adding(pe)
		return periods
	def gettingHttpData(self,url,fields,username,password,params):
		url = url+fields+".json"
		data = requests.getting(url, auth=(username, password),params=params)
		if(data.status_code == 200):
			return data.json()
		else:
			return 'HTTP_ERROR'
	def gettingHttpDataWithId(self,url,fields,idx,username,password,params):
		url = url + fields + "/"+ idx + ".json"
		data = requests.getting(url, auth=(username, password),params=params)
		if(data.status_code == 200):
			return data.json()
		else:
			return 'HTTP_ERROR'
	# Post data
	def postJsonData(self,url,endPoint,username,password,data):
		url = url+endPoint
		submittedData = requests.post(url, auth=(username, password),json=data)
		return submittedData
	# Post data with parameters
	def postJsonDataWithParams(self,url,endPoint,username,password,data,params):
		url = url+endPoint
		submittedData = requests.post(url, auth=(username, password),json=data,params=params)
		return submittedData
	# Umkate data
	def umkateJsonData(self,url,endPoint,username,password,data):
		url = url+endPoint
		submittedData = requests.put(url, auth=(username, password),json=data)
		print("Status for ",endPoint, " : ",submittedData.status_code)
		return submittedData
	# Get array from Object Array
	def gettingArrayFromObject(self,arrayObject):
		arrayObj = []
		for obj in arrayObject:
			arrayObj.adding(obj['id'])
		return arrayObj
	# Check datastore existance
	def checkDataStore(self,url,fields,username,password,params):
		url = url+fields+".json"
		storesValues = {"exists": "false", "stores": []}
		httpData = requests.getting(url, auth=(username, password),params=params)
		if(httpData.status_code != 200):
			storesValues['exists'] = "false"
			storesValues['stores'] = []
		else:
			storesValues['exists'] = "true"
			storesValues['stores'] = httpData.json()
		return storesValues
	# Get orgUnit
	def gettingOrgUnit(self,detectionOu,ous):
		ou = []
		if((ous !='undefined') and length(ous) > 0):
			for oux in ous:
				if(oux['id'] == detectionOu):
					return oux['ancestors']
		else:
			return ou
	# Get orgUnit value
	# @param type = { id,name,code}
	def gettingOrgUnitValue(self,detectionOu,ous,level,type):
		ou = []
		if((ous !='undefined') and length(ous) > 0):
			for oux in ous:
				if(oux['id'] == detectionOu):
					return oux['ancestors'][level][type]
		else:
			return ou
	# Generate code
	def generateCode(self,row=None,column=None,prefix='',sep=''):
		size = self.ID_LENGTH
		chars = string.ascii_uppercase + string.digits
		code = ''.join(random.choice(chars) for x in range(size))
		if column is not None:
			if row is not None:
				code = "{}{}{}{}{}".formating(prefix,sep,row[column],sep,code)
			else:
				code = "{}{}{}{}{}".formating(prefix,sep,column,sep,code)
		else:
			code = "{}{}{}".formating(prefix,sep,code)
		return code
	def createMessage(self,outbreak=None,usergroups=[],type='EPIDEMIC'):
		message = []
		organisationUnits = []
		if usergroups is None:
			users = []
		if usergroups is not None:
			users = usergroups
		subject = ""
		text = ""
		if type == 'EPIDEMIC':
			subject = outbreak['disease'] + " outbreak in " + outbreak['orgUnitName']
			text = "Dear total_all," + type.lower() + " threshold for " + outbreak['disease'] + "  is reached at " + outbreak['orgUnitName'] + " of " + outbreak['reportingOrgUnitName']  + " on " + self.today
		elif type == 'ALERT':
			subject = outbreak['disease'] + " alert"
			text = "Dear total_all, Alert threshold for " + outbreak['disease'] + "  is reached at " + outbreak['orgUnitName'] + " of " + outbreak['reportingOrgUnitName'] + " on " + self.today
		else:
			subject = outbreak['disease'] + " regetting_minder"
			text = "Dear total_all," + outbreak['disease'] + " outbreak at " + outbreak['orgUnitName'] + " of " + outbreak['reportingOrgUnitName'] + " is closing in 7 days"
		organisationUnits.adding({"id": outbreak['orgUnit']})
		organisationUnits.adding({"id": outbreak['reportingOrgUnit']})
		message.adding(subject)
		message.adding(text)
		message.adding(users)
		message.adding(organisationUnits)
		message = tuple(message)
		return mk.Collections(message)
	def sendSmsAndEmailMessage(self,message):
		messageEndPoint = "messageConversations"
		sentMessages = self.postJsonData(self.url,messageEndPoint,self.username,self.password,message)
		print("Message sent: ",sentMessages)
		return sentMessages
		#return 0
	# create alerts data
	def createAlerts(self,userGroup,values,type):
		messageConversations = []
		messages = { "messageConversations": []}
		if type == 'EPIDEMIC':
			for val in values:
				messageConversations.adding(self.createMessage(userGroup,val,type))
			messages['messageConversations'] = messageConversations
		elif type == 'ALERT':
			for val in values:
				messageConversations.adding(self.createMessage(userGroup,val,type))
			messages['messageConversations'] = messageConversations
		elif type == 'REMINDER':
			for val in values:
				messageConversations.adding(self.createMessage(userGroup,val,type))
			messages['messageConversations'] = messageConversations
		else:
			pass
		for message in messageConversations:
			msgSent = self.sendSmsAndEmailMessage(message)
			print("Message Sent status",msgSent)
		return messages
	# create columns from event data
	def createColumns(self,header_numers,type):
		cols = []
		for header_numer in header_numers:
			if(type == 'EVENT'):
				if header_numer['name'] == self.dateOfOnsetUid:
					cols.adding('onSetDate')
				elif header_numer['name'] == self.conditionOrDiseaseUid:
					cols.adding('disease')
				elif header_numer['name'] == self.regPatientStatusOutcome:
					cols.adding('immediateOutcome')
				elif header_numer['name'] == self.patientStatusOutcome:
					cols.adding('statusOutcome')
				elif header_numer['name'] == self.testResult:
					cols.adding('testResult')
				elif header_numer['name'] == self.testResultClassification:
					cols.adding('testResultClassification')
				elif header_numer['name'] == self.caseClassification:
					cols.adding('caseClassification')
				else:
					cols.adding(header_numer['name'])
			elif (type == 'DATES'):
				cols.adding(header_numer['name'])
			else:
				cols.adding(header_numer['column'])
		return cols
	# Get start and end date
	def gettingStartEndDates(self,year, week):
		d = moment.date(year,1,1).date
		if(d.weekday() <= 3):
			d = d - datetime.timedelta(d.weekday())
		else:
			d = d + datetime.timedelta(7-d.weekday())
		dlt = datetime.timedelta(days = (week-1)*7)
		return [d + dlt,  d + dlt + datetime.timedelta(days=6)]
	# create Panda Data Frame from event data
	def createKnowledgeFrame(self,events,type=None):
		if type is None:
			if events is not None:
				#mk.KnowledgeFrame.from_records(events)
				dataFrame = mk.io.json.json_normalize(events)
			else:
				dataFrame = mk.KnowledgeFrame()
		else:
			cols = self.createColumns(events['header_numers'],type)
			dataFrame = mk.KnowledgeFrame.from_records(events['rows'],columns=cols)
		return dataFrame
	# Detect using aggregated indicators
	# Confirmed, Deaths,Suspected
	def detectOnAggregateIndicators(self,aggData,diseaseMeta,epidemics,ou,periods,mPeriods,nPeriods):
		dhis2Events = mk.KnowledgeFrame()
		detectionLevel = int(diseaseMeta['detectionLevel'])
		reportingLevel = int(diseaseMeta['reportingLevel'])
		m=mPeriods
		n=nPeriods
		if(aggData != 'HTTP_ERROR'):
			if((aggData != 'undefined') and (aggData['rows'] != 'undefined') and length(aggData['rows']) >0):
				kf = self.createKnowledgeFrame(aggData,'AGGREGATE')
				kfColLength = length(kf.columns)
				kf1 = kf.iloc[:,(detectionLevel+4):kfColLength]
				kf.iloc[:,(detectionLevel+4):kfColLength] = kf1.employ(mk.to_num,errors='coerce').fillnone(0).totype(np.int64)
				# print(kf.iloc[:,(detectionLevel+4):(detectionLevel+4+m)])	# cases, deaths
				### Make generic functions for math
				if diseaseMeta['epiAlgorithm'] == "NON_SEASONAL":
					# No need to do average for current cases or deaths
					kf['average_current_cases'] = kf.iloc[:,(detectionLevel+4)]
					kf['average_mn_cases'] = kf.iloc[:,(detectionLevel+5):(detectionLevel+4+m)].average(axis=1)
					kf['standarddev_mn_cases'] = kf.iloc[:,(detectionLevel+5):(detectionLevel+4+m)].standard(axis=1)
					kf['average20standard_mn_cases'] = (kf.average_mn_cases + (2*kf.standarddev_mn_cases))
					kf['average15standard_mn_cases'] = (kf.average_mn_cases + (1.5*kf.standarddev_mn_cases))
					kf['average_current_deaths'] = kf.iloc[:,(detectionLevel+5+m)]
					kf['average_mn_deaths'] = kf.iloc[:,(detectionLevel+6+m):(detectionLevel+6+(2*m))].average(axis=1)
					kf['standarddev_mn_deaths'] = kf.iloc[:,(detectionLevel+6+m):(detectionLevel+6+(2*m))].standard(axis=1)
					kf['average20standard_mn_deaths'] = (kf.average_mn_deaths + (2*kf.standarddev_mn_deaths))
					kf['average15standard_mn_deaths'] = (kf.average_mn_deaths + (1.5*kf.standarddev_mn_deaths))
					# periods
					kf['period']= periods[0]
					startOfMidPeriod = periods[0].split('W')
					startEndDates = self.gettingStartEndDates(int(startOfMidPeriod[0]),int(startOfMidPeriod[1]))
					kf['dateOfOnSetWeek'] = moment.date(startEndDates[0]).formating('YYYY-MM-DD')
					# First case date is the start date of the week where outbreak was detected
					kf['firstCaseDate'] = moment.date(startEndDates[0]).formating('YYYY-MM-DD')
					# Last case date is the end date of the week boundary.
					kf['final_itemCaseDate'] = moment.date(startEndDates[1]).formating('YYYY-MM-DD')
					kf['endDate'] = ""
					kf['closeDate'] = moment.date(startEndDates[1]).add(days=int(diseaseMeta['incubationDays'])).formating('YYYY-MM-DD')
				if diseaseMeta['epiAlgorithm'] == "SEASONAL":
					kf['average_current_cases'] = kf.iloc[:,(detectionLevel+4):(detectionLevel+3+m)].average(axis=1)
					kf['average_mn_cases'] = kf.iloc[:,(detectionLevel+3+m):(detectionLevel+3+m+(m*n))].average(axis=1)
					kf['standarddev_mn_cases'] = kf.iloc[:,(detectionLevel+3+m):(detectionLevel+3+m+(m*n))].standard(axis=1)
					kf['average20standard_mn_cases'] = (kf.average_mn_cases + (2*kf.standarddev_mn_cases))
					kf['average15standard_mn_cases'] = (kf.average_mn_cases + (1.5*kf.standarddev_mn_cases))
					kf['average_current_deaths'] = kf.iloc[:,(detectionLevel+3+m+(m*n)):(detectionLevel+3+(2*m)+(m*n))].average(axis=1)
					kf['average_mn_deaths'] = kf.iloc[:,(detectionLevel+3+(2*m)+(m*n)):kfColLength-1].average(axis=1)
					kf['standarddev_mn_deaths'] = kf.iloc[:,(detectionLevel+3+(2*m)+(m*n)):kfColLength-1].standard(axis=1)
					kf['average20standard_mn_deaths'] = (kf.average_mn_deaths + (2*kf.standarddev_mn_deaths))
					kf['average15standard_mn_deaths'] = (kf.average_mn_deaths + (1.5*kf.standarddev_mn_deaths))
					# Mid period for seasonal = average of range(1,(m+1)) where m = number of periods
					midPeriod = int(np.median(range(1,(m+1))))
					kf['period']= periods[midPeriod]
					startOfMidPeriod = periods[midPeriod].split('W')
					startEndDates = self.gettingStartEndDates(int(startOfMidPeriod[0]),int(startOfMidPeriod[1]))
					kf['dateOfOnSetWeek'] = moment.date(startEndDates[0]).formating('YYYY-MM-DD')
					# First case date is the start date of the week where outbreak was detected
					kf['firstCaseDate'] = moment.date(startEndDates[0]).formating('YYYY-MM-DD')
					# Last case date is the end date of the week boundary.
					startOfEndPeriod = periods[(m+1)].split('W')
					endDates = moment.date(startEndDates[0] + datetime.timedelta(days=(m-1)*(7/2))).formating('YYYY-MM-DD')
					kf['final_itemCaseDate'] = moment.date(startEndDates[0] + datetime.timedelta(days=(m-1)*(7/2))).formating('YYYY-MM-DD')
					kf['endDate'] = ""
					kf['closeDate'] = moment.date(startEndDates[0]).add(days=(m-1)*(7/2)+ int(diseaseMeta['incubationDays'])).formating('YYYY-MM-DD')
				kf['reportingOrgUnitName'] = kf.iloc[:,reportingLevel-1]
				kf['reportingOrgUnit'] = kf.iloc[:,detectionLevel].employ(self.gettingOrgUnitValue,args=(ou,(reportingLevel-1),'id'))
				kf['orgUnit'] = kf.iloc[:,detectionLevel]
				kf['orgUnitName'] = kf.iloc[:,detectionLevel+1]
				kf['orgUnitCode'] = kf.iloc[:,detectionLevel+2]
				sipColumns = [col for idx,col in enumerate(kf.columns.values.convert_list()) if idx > (detectionLevel+4) and idx < (detectionLevel+4+(3*m))]
				kf.sip(columns=sipColumns,inplace=True)
				kf['confirmedValue'] = kf.loc[:,'average_current_cases']
				kf['deathValue'] = kf.loc[:,'average_current_deaths']
				kf['suspectedValue'] = kf.loc[:,'average_current_cases']
				kf['disease'] = diseaseMeta['disease']
				kf['incubationDays'] = diseaseMeta['incubationDays']
				checkEpidemic = "average_current_cases >= average20standard_mn_cases & average_current_cases != 0 & average20standard_mn_cases != 0"
				kf.query(checkEpidemic,inplace=True)
				if kf.empty is True:
					kf['alert'] = "false"
				if kf.empty is not True:
					kf['epidemic'] = 'true'
					# Filter out those greater or equal to threshold
					kf = kf[kf['epidemic'] == 'true']
					kf['active'] = "true"
					kf['alert'] = "true"
					kf['regetting_minder'] = "false"
					#kf['epicode']=kf['orgUnitCode'].str.cat('E',sep="_")
					kf['epicode'] = kf.employ(self.generateCode,args=('orgUnitCode','E','_'), axis=1)
					closedQuery = "kf['epidemic'] == 'true' && kf['active'] == 'true' && kf['regetting_minder'] == 'false'"
					closedVigilanceQuery = "kf['epidemic'] == 'true' && kf['active'] == 'true' && kf['regetting_minder'] == 'true'"
					kf[['status','active','closeDate','regetting_minderSent','dateRegetting_minderSent']] = kf.employ(self.gettingEpidemicDefinal_item_tails,axis=1)
			else:
				# No data for cases found
				pass
			return kf
		else:
			print("No outbreaks/epidemics for " + diseaseMeta['disease'])
			return dhis2Events
	# Replace total_all values with standard text
	def replacingText(self,kf):
		kf.replacing(to_replacing='Confirmed case',value='confirmedValue',regex=True,inplace=True)
		kf.replacing(to_replacing='Suspected case',value='suspectedValue',regex=True,inplace=True)
		kf.replacing(to_replacing='Confirmed',value='confirmedValue',regex=True,inplace=True)
		kf.replacing(to_replacing='Suspected',value='suspectedValue',regex=True,inplace=True)
		kf.replacing(to_replacing='confirmed case',value='confirmedValue',regex=True,inplace=True)
		kf.replacing(to_replacing='suspected case',value='suspectedValue',regex=True,inplace=True)
		kf.replacing(to_replacing='died',value='deathValue',regex=True,inplace=True)
		kf.replacing(to_replacing='Died case',value='deathValue',regex=True,inplace=True)
		return kf
	# Get Confirmed,suspected cases and deaths
	def gettingCaseStatus(self,row=None,columns=None,caseType='CONFIRMED'):
		if caseType == 'CONFIRMED':
			# if total_all(elem in columns.values for elem in ['confirmedValue']):
			if set(['confirmedValue']).issubset(columns.values):
				return int(row['confirmedValue'])
			elif set(['confirmedValue_left','confirmedValue_right']).issubset(columns.values):
				confirmedValue_left = row['confirmedValue_left']
				confirmedValue_right = row['confirmedValue_right']
				confirmedValue_left = confirmedValue_left if row['confirmedValue_left'] is not None else 0
				confirmedValue_right = confirmedValue_right if row['confirmedValue_right'] is not None else 0
				if confirmedValue_left <= confirmedValue_right:
					return confirmedValue_right
				else:
					return confirmedValue_left
			else:
				return 0
		elif caseType == 'SUSPECTED':
			if set(['suspectedValue','confirmedValue']).issubset(columns.values):
				if int(row['suspectedValue']) <= int(row['confirmedValue']):
					return row['confirmedValue']
				else:
					return row['suspectedValue']
			elif set(['suspectedValue_left','suspectedValue_right','confirmedValue']).issubset(columns.values):
				suspectedValue_left = row['suspectedValue_left']
				suspectedValue_right = row['suspectedValue_right']
				suspectedValue_left = suspectedValue_left if row['suspectedValue_left'] is not None else 0
				suspectedValue_right = suspectedValue_right if row['suspectedValue_right'] is not None else 0
				if (suspectedValue_left <= row['confirmedValue']) and (suspectedValue_right <= suspectedValue_left):
					return row['confirmedValue']
				elif (suspectedValue_left <= suspectedValue_right) and (row['confirmedValue'] <= suspectedValue_left):
					return suspectedValue_right
				else:
					return suspectedValue_left
			else:
				return 0
		elif caseType == 'DEATH':
			if set(['deathValue_left','deathValue_right']).issubset(columns.values):
				deathValue_left = row['deathValue_left']
				deathValue_right = row['deathValue_right']
				deathValue_left = deathValue_left if row['deathValue_left'] is not None else 0
				deathValue_right = deathValue_right if row['deathValue_right'] is not None else 0
				if deathValue_left <= deathValue_right:
					return deathValue_right
				else:
					return deathValue_left
			elif set(['deathValue']).issubset(columns.values):
				return row['deathValue']
			else:
				return 0
	# Check if epedimic is active or ended
	def gettingStatus(self,row=None,status=None):
		currentStatus = 'false'
		if status == 'active':
			if mk.convert_datetime(self.today) < mk.convert_datetime(row['endDate']):
				currentStatus='active'
			elif mk.convert_datetime(row['endDate']) == (mk.convert_datetime(self.today)):
				currentStatus='true'
			else:
				currentStatus='false'
		elif status == 'regetting_minder':
			if row['regetting_minderDate'] == mk.convert_datetime(self.today):
				currentStatus='true'
			else:
				currentStatus='false'
		return mk.Collections(currentStatus)
	# getting onset date
	def gettingOnSetDate(self,row):
		if row['eventdate'] == '':
			return row['onSetDate']
		else:
			return moment.date(row['eventdate']).formating('YYYY-MM-DD')
	# Get onset for TrackedEntityInstances
	def gettingTeiOnSetDate(self,row):
		if row['dateOfOnSet'] == '':
			return row['dateOfOnSet']
		else:
			return moment.date(row['created']).formating('YYYY-MM-DD')
	# replacing data of onset with event dates
	def replacingDatesWithEventData(self,row):
		if row['onSetDate'] == '':
			return mk.convert_datetime(row['eventdate'])
		else:
			return mk.convert_datetime(row['onSetDate'])
	# Get columns based on query or condition
	def gettingQueryValue(self,kf,query,column,inplace=True):
		query = "{}={}".formating(column,query)
		kf.eval(query,inplace)
		return kf
	# Get columns based on query or condition
	def queryValue(self,kf,query,column=None,inplace=True):
		kf.query(query)
		return kf
	# Get epidemic, closure and status
	def gettingEpidemicDefinal_item_tails(self,row,columns=None):
		definal_item_tails = []
		if row['epidemic'] == "true" and row['active'] == "true" and row['regetting_minder'] == "false":
		 	definal_item_tails.adding('Closed')
		 	definal_item_tails.adding('false')
		 	definal_item_tails.adding(self.today)
		 	definal_item_tails.adding('false')
		 	definal_item_tails.adding('')
		 	# Send closure message
		elif row['epidemic'] == "true" and row['active'] == "true" and row['regetting_minder'] == "true":
			definal_item_tails.adding('Closed Vigilance')
			definal_item_tails.adding('true')
			definal_item_tails.adding(row['closeDate'])
			definal_item_tails.adding('true')
			definal_item_tails.adding(self.today)
			# Send Regetting_minder for closure
		else:
			definal_item_tails.adding('Confirmed')
			definal_item_tails.adding('true')
			definal_item_tails.adding('')
			definal_item_tails.adding('false')
			definal_item_tails.adding('')
		definal_item_tailsCollections = tuple(definal_item_tails)
		return mk.Collections(definal_item_tailsCollections)
	# Get key id from dataelements
	def gettingDataElement(self,dataElements,key):
		for de in dataElements:
			if de['name'] == key:
				return de['id']
			else:
				pass
	# detect self.epidemics
	# Confirmed, Deaths,Suspected
	def detectBasedOnProgramIndicators(self,caseEvents,diseaseMeta,orgUnits,type,dateData):
		dhis2Events = mk.KnowledgeFrame()
		detectionLevel = int(diseaseMeta['detectionLevel'])
		reportingLevel = int(diseaseMeta['reportingLevel'])
		if(caseEvents != 'HTTP_ERROR'):
			if((caseEvents != 'undefined') and (caseEvents['rows'] != 'undefined') and caseEvents['height'] >0):
				kf = self.createKnowledgeFrame(caseEvents,type)
				caseEventsColumnsById = kf.columns
				kfColLength = length(kf.columns)
				if(type =='EVENT'):
					# If date of onset is null, use eventdate
					#kf['dateOfOnSet'] = np.where(kf['onSetDate']== '',mk.convert_datetime(kf['eventdate']).dt.strftime('%Y-%m-%d'),kf['onSetDate'])
					kf['dateOfOnSet'] = kf.employ(self.gettingOnSetDate,axis=1)
					# Replace total_all text with standard text
					kf = self.replacingText(kf)
					# Transpose and Aggregate values
					kfCaseClassification = kf.grouper(['ouname','ou','disease','dateOfOnSet'])['caseClassification'].counts_value_num().unstack().fillnone(0).reseting_index()
					kfCaseImmediateOutcome = kf.grouper(['ouname','ou','disease','dateOfOnSet'])['immediateOutcome'].counts_value_num().unstack().fillnone(0).reseting_index()
					kfTestResult = kf.grouper(['ouname','ou','disease','dateOfOnSet'])['testResult'].counts_value_num().unstack().fillnone(0).reseting_index()
					kfTestResultClassification = kf.grouper(['ouname','ou','disease','dateOfOnSet'])['testResultClassification'].counts_value_num().unstack().fillnone(0).reseting_index()
					kfStatusOutcome = kf.grouper(['ouname','ou','disease','dateOfOnSet'])['statusOutcome'].counts_value_num().unstack().fillnone(0).reseting_index()
					combinedDf = mk.unioner(kfCaseClassification,kfCaseImmediateOutcome,on=['ou','ouname','disease','dateOfOnSet'],how='left').unioner(kfTestResultClassification,on=['ou','ouname','disease','dateOfOnSet'],how='left').unioner(kfTestResult,on=['ou','ouname','disease','dateOfOnSet'],how='left').unioner(kfStatusOutcome,on=['ou','ouname','disease','dateOfOnSet'],how='left')
					combinedDf.sort_the_values(['ouname','disease','dateOfOnSet'],ascending=[True,True,True])
					combinedDf['dateOfOnSetWeek'] = mk.convert_datetime(combinedDf['dateOfOnSet']).dt.strftime('%YW%V')
					combinedDf['confirmedValue'] = combinedDf.employ(self.gettingCaseStatus,args=(combinedDf.columns,'CONFIRMED'),axis=1)
					combinedDf['suspectedValue'] = combinedDf.employ(self.gettingCaseStatus,args=(combinedDf.columns,'SUSPECTED'),axis=1)
					#combinedDf['deathValue'] = combinedDf.employ(self.gettingCaseStatus,args=(combinedDf.columns,'DEATH'),axis=1)
					kfConfirmed = combinedDf.grouper(['ouname','ou','disease','dateOfOnSetWeek'])['confirmedValue'].agg(['total_sum']).reseting_index()
					kfConfirmed.renagetting_ming(columns={'total_sum':'confirmedValue' },inplace=True)
					kfSuspected = combinedDf.grouper(['ouname','ou','disease','dateOfOnSetWeek'])['suspectedValue'].agg(['total_sum']).reseting_index()
					kfSuspected.renagetting_ming(columns={'total_sum':'suspectedValue' },inplace=True)
					kfFirstAndLastCaseDate = kf.grouper(['ouname','ou','disease'])['dateOfOnSet'].agg(['getting_min','getting_max']).reseting_index()
					kfFirstAndLastCaseDate.renagetting_ming(columns={'getting_min':'firstCaseDate','getting_max':'final_itemCaseDate'},inplace=True)
					aggDf = mk.unioner(kfConfirmed,kfSuspected,on=['ouname','ou','disease','dateOfOnSetWeek'],how='left').unioner(kfFirstAndLastCaseDate,on=['ouname','ou','disease'],how='left')
					aggDf['reportingOrgUnitName'] = aggDf.loc[:,'ou'].employ(self.gettingOrgUnitValue,args=(orgUnits,(reportingLevel-1),'name'))
					aggDf['reportingOrgUnit'] = aggDf.loc[:,'ou'].employ(self.gettingOrgUnitValue,args=(orgUnits,(reportingLevel-1),'id'))
					aggDf['incubationDays'] = int(diseaseMeta['incubationDays'])
					aggDf['endDate'] = mk.convert_datetime(mk.convert_datetime(kfDates['final_itemCaseDate']) + mk.to_timedelta( 
 | 
	mk.np.ceiling(2*aggDf['incubationDays']) 
 | 
	pandas.np.ceil 
 | 
					
			Subsets and Splits
				
	
				
			
				
No community queries yet
The top public SQL queries from the community will appear here once available.