Datasets:
				
			
			
	
			
	
		
			
	
		| text
				 stringlengths 12 1.05M | repo_name
				 stringlengths 5 86 | path
				 stringlengths 4 191 | language
				 stringclasses 1
				value | license
				 stringclasses 15
				values | size
				 int32 12 1.05M | keyword
				 listlengths 1 23 | text_hash
				 stringlengths 64 64 | 
|---|---|---|---|---|---|---|---|
| 
	########################################################################
# This program is copyright (c) Upinder S. Bhalla, NCBS, 2015.
# It is licenced under the GPL 2.1 or higher.
# There is no warranty of any kind. You are welcome to make copies under 
# the provisions of the GPL.
# This programme illustrates building a panel of multiscale models to
# test neuronal plasticity in different contexts.
########################################################################
try:
    import moogli
except Exception as e:
    print( "[INFO ] Could not import moogli. Quitting..." )
    quit()
import numpy
import time
import pylab
import moose
from moose import neuroml
from PyQt4 import Qt, QtCore, QtGui
import matplotlib.pyplot as plt
import sys
import os
from moose.neuroml.ChannelML import ChannelML
sys.path.append('../../../Demos/util')
import rdesigneur as rd
PI = 3.14159265359
useGssa = True
combineSegments = True
# Pick your favourite cell here.
#elecFileName = "ca1_minimal.p"
## Cell morphology from Bannister and Larkman J Neurophys 2015/NeuroMorpho
elecFileName = "h10.CNG.swc"
#elecFileName = "CA1.morph.xml"
#elecFileName = "VHC-neuron.CNG.swc"
synSpineList = []
synDendList = []
probeInterval = 0.1
probeAmplitude = 1.0
tetanusFrequency = 100.0
tetanusAmplitude = 1000
tetanusAmplitudeForSpines = 1000
frameRunTime = 1e-3 # 1 ms
baselineTime = 0.05
tetTime = 0.01
postTetTime = 0.01
runtime = baselineTime + tetTime + postTetTime
def buildRdesigneur():
    '''
    ##################################################################
    # Here we define which prototypes are to be loaded in to the system.
    # Each specification has the format
    # source [localName]
    # source can be any of
    # filename.extension,   # Identify type of file by extension, load it.
    # function(),           # func( name ) builds object of specified name
    # file.py:function() ,  # load Python file, run function(name) in it.
    # moose.Classname       # Make obj moose.Classname, assign to name.
    # path                  # Already loaded into library or on path.
    # After loading the prototypes, there should be an object called 'name'
    # in the library.
    ##################################################################
    '''
    cellProto = [ [ "./cells/" + elecFileName, "elec" ] ]
    chanProto = [
        ['./chans/hd.xml'], \
        ['./chans/kap.xml'], \
        ['./chans/kad.xml'], \
        ['./chans/kdr.xml'], \
        ['./chans/na3.xml'], \
        ['./chans/nax.xml'], \
        ['./chans/CaConc.xml'], \
        ['./chans/Ca.xml'], \
        ['./chans/NMDA.xml'], \
        ['./chans/Glu.xml'] \
    ]
    spineProto = [ \
        ['makeSpineProto()', 'spine' ]
    ]
    chemProto = []
    ##################################################################
    # Here we define what goes where, and any parameters. Each distribution
    # has the format
    # protoName, path, field, expr, [field, expr]...
    # where 
    #   protoName identifies the prototype to be placed on the cell
    #   path is a MOOSE wildcard path specifying where to put things
    #   field is the field to assign.
    #   expr is a math expression to define field value. This uses the
    #     muParser. Built-in variables are:
    #       p, g, L, len, dia, maxP, maxG, maxL.
    #     where
    #       p = path distance from soma, threaded along dendrite
    #       g = geometrical distance from soma (shortest distance)
    #       L = electrotonic distance from soma: number of length constants
    #       len = length of dendritic compartment
    #       dia = diameter of dendritic compartment
    #       maxP = maximal value of 'p' for the cell
    #       maxG = maximal value of 'g' for the cell
    #       maxL = maximal value of 'L' for the cell
    #
    #     The muParser provides most math functions, and the Heaviside 
    #     function H(x) = 1 for x > 0 is also provided.
    ##################################################################
    passiveDistrib = [ 
            [ ".", "#", "RM", "2.8", "CM", "0.01", "RA", "1.5",  \
                "Em", "-58e-3", "initVm", "-65e-3" ], \
            [ ".", "#axon#", "RA", "0.5" ] \
        ]
    chanDistrib = [ \
            ["hd", "#dend#,#apical#", "Gbar", "5e-2*(1+(p*3e4))" ], \
            ["kdr", "#", "Gbar", "p < 50e-6 ? 500 : 100" ], \
            ["na3", "#soma#,#dend#,#apical#", "Gbar", "250" ], \
            ["nax", "#soma#,#axon#", "Gbar", "1250" ], \
            ["kap", "#axon#,#soma#", "Gbar", "300" ], \
            ["kap", "#dend#,#apical#", "Gbar", \
                "300*(H(100-p*1e6)) * (1+(p*1e4))" ], \
            ["Ca_conc", "#soma#,#dend#,#apical#", "tau", "0.0133" ], \
            ["kad", "#soma#,#dend#,#apical#", "Gbar", \
                "300*H(p - 100e-6)*(1+p*1e4)" ], \
                ["Ca", "#dend#,#apical#", "Gbar", "p<160e-6? 10+ p*0.25e-6 : 50" ], \
            ["Ca", "#soma#", "Gbar", "10" ], \
            ["glu", "#dend#,#apical#", "Gbar", "200*H(p-200e-6)" ], \
            ["NMDA", "#dend#,#apical#", "Gbar", "2*H(p-200e-6)" ] \
        ]
    '''
    spineDistrib = [ \
            ["spine", '#apical#', "spineSpacing", "20e-6", \
                "spineSpacingDistrib", "2e-6", \
                "angle", "0", \
                "angleDistrib", str( 2*PI ), \
                "size", "1", \
                "sizeDistrib", "0.5" ] \
        ]
    '''
    spineDistrib = [
            ["spine", '#apical#', 
                "20e-6", "2e-6",
                "1", "0.5",
                "0", str( 2*PI ) ]
        ]
    chemDistrib = []
    spineProto = [['makeActiveSpine()', 'spine']]
    ######################################################################
    # Here we define the mappings across scales. Format:
    # sourceObj sourceField destObj destField offset scale
    # where the coupling expression is anything a muParser can evaluate,
    # using the input variable x. For example: 8e-5 + 300*x
    # For now, let's use existing adaptors which take an offset and scale.
    ######################################################################
    adaptorList = []
    ######################################################################
    # Having defined everything, now to create the rdesigneur and proceed
    # with creating the model.
    ######################################################################
    
    #rd.addSpineProto() # This adds a version with an LCa channel by default.
    rdes = rd.rdesigneur(
        useGssa = useGssa,
        combineSegments = combineSegments,
        stealCellFromLibrary = True,
        passiveDistrib = passiveDistrib,
        spineDistrib = spineDistrib,
        chanDistrib = chanDistrib,
        chemDistrib = chemDistrib,
        cellProto = cellProto,
        chanProto = chanProto,
        chemProto = chemProto,
        spineProto = spineProto,
        adaptorList = adaptorList
    )
    #spineProto = spineProto, \
    return rdes
def buildPlots( rdes ):
    graphs = moose.Neutral( '/graphs' )
    vtab = moose.Table( '/graphs/VmTab' )
    moose.connect( vtab, 'requestOut', rdes.soma, 'getVm' )
def displayPlots():
    pylab.figure(1, figsize = (8,10 ) )
    pylab.subplot( 1,1,1)
    for i in moose.wildcardFind( "/graphs/#VmTab" ):
        t = numpy.arange( 0, i.vector.size, 1 ) * i.dt
        pylab.plot( t, i.vector, label = i.name )
    pylab.xlabel( "Time (s)" )
    pylab.legend()
    pylab.title( 'Vm' )
    pylab.figure(2, figsize= (8,10))
    ax = pylab.subplot( 1,1,1 )
    neuron = moose.element( '/model/elec' )
    comptDistance = dict( list(zip( neuron.compartments, neuron.pathDistanceFromSoma ) ))
    for i in moose.wildcardFind( '/library/#[ISA=ChanBase]' ):
        chans = moose.wildcardFind( '/model/elec/#/' + i.name )
        print ( i.name, len( chans ) )
        p = [ 1e6*comptDistance.get( j.parent, 0) for j in chans ]
        Gbar = [ j.Gbar/(j.parent.length * j.parent.diameter * PI) for j in chans ]
        if len( p ) > 2:
            pylab.plot( p, Gbar, linestyle = 'None', marker = ".", label = i.name )
            sortedGbar = sorted(zip(p, Gbar), key=lambda x: x[0])
    ax.set_yscale( 'log' )
    pylab.xlabel( "Distance from soma (microns)" )
    pylab.ylabel( "Channel density (Seimens/sq mtr)" )
    pylab.legend()
    pylab.title( 'Channel distribution' )
    pylab.show()
def create_vm_viewer(rdes):
    network = moogli.extensions.moose.read(rdes.elecid.path)
    normalizer = moogli.utilities.normalizer(-0.08,
                                             0.02,
                                             clipleft=True,
                                             clipright=True)
    colormap = moogli.colors.UniformColorMap([moogli.colors.Color(0.0,
                                                                  0.0,
                                                                  1.0,
                                                                  1.0),
                                              moogli.colors.Color(1.0,
                                                                  1.0,
                                                                  0.0,
                                                                  0.1)])
    mapper = moogli.utilities.mapper(colormap, normalizer)
    vms = [moose.element(x).Vm for x in list(network.shapes.keys())]
    network.set("color", vms, mapper)
    def prelude(view):
        view.pitch(PI/2)
        view.zoom(0.4)
    def interlude(view):
        moose.start(frameRunTime)
        vms = [moose.element(x).Vm for x in list(network.shapes.keys())]
        network.set("color", vms, mapper)
        view.yaw(0.01)
        currTime = moose.element('/clock').currentTime
        if currTime < runtime:
            deliverStim(currTime)
        else:
            view.stop()
    def postlude(view):
        displayPlots()
    viewer = moogli.Viewer("vm-viewer")
    viewer.attach_shapes(list(network.shapes.values()))
    view = moogli.View("vm-view",
                       prelude=prelude,
                       interlude=interlude,
                       postlude=postlude)
    viewer.attach_view(view)
    return viewer
def create_ca_viewer(rdes):
    network = moogli.extensions.moose.read(rdes.elecid.path)
    ca_elements = []
    for compartment_path in list(network.shapes.keys()):
        if moose.exists(compartment_path + '/Ca_conc'):
            ca_elements.append(moose.element(compartment_path + '/Ca_conc'))
        else:
            ca_elements.append(moose.element('/library/Ca_conc'))
    normalizer = moogli.utilities.normalizer(0.0,
                                             0.002,
                                             clipleft=True,
                                             clipright=True)
    colormap = moogli.colors.UniformColorMap([moogli.colors.Color(1.0,
                                                                  0.0,
                                                                  0.0,
                                                                  1.0),
                                              moogli.colors.Color(0.0,
                                                                  1.0,
                                                                  1.0,
                                                                  0.1)])
    mapper = moogli.utilities.mapper(colormap, normalizer)
    cas = [element.Ca for element in ca_elements]
    network.set("color", cas, mapper)
    def prelude(view):
        view.pitch(PI/2)
        view.zoom(0.4)
    def interlude(view):
        moose.start(frameRunTime)
        cas = [element.Ca for element in ca_elements]
        network.set("color", cas, mapper)
        view.yaw(0.01)
        currTime = moose.element('/clock').currentTime
        if currTime < runtime:
            deliverStim(currTime)
        else:
            view.stop()
    viewer = moogli.Viewer("ca-viewer")
    viewer.attach_shapes(list(network.shapes.values()))
    view = moogli.View("ca-view",
                       prelude=prelude,
                       interlude=interlude)
    viewer.attach_view(view)
    return viewer
def build3dDisplay(rdes):
    print (("building 3d Display"))
    app = QtGui.QApplication(sys.argv)
    vm_viewer = create_vm_viewer(rdes)
    vm_viewer.resize(700, 900)
    vm_viewer.show()
    vm_viewer.start()
    ca_viewer = create_ca_viewer(rdes)
    ca_viewer.resize(700, 900)
    ca_viewer.show()
    ca_viewer.start()
    return app.exec_()
def deliverStim( currTime ):
    if currTime > baselineTime and currTime < baselineTime + tetTime:
        # deliver tet stim
        step = int ( (currTime - baselineTime) / frameRunTime )
        tetStep = int( 1.0 / (tetanusFrequency * frameRunTime ) )
        if step % tetStep == 0:
            for i in synDendList:
                i.activation( tetanusAmplitude )
            for i in synSpineList:
                i.activation( tetanusAmplitudeForSpines )
    else:
        # deliver probe stim
        step = int (currTime / frameRunTime )
        probeStep = int( probeInterval / frameRunTime )
        if step % probeStep == 0:
            print (("Doing probe Stim at ", currTime))
            for i in synSpineList:
                i.activation( probeAmplitude )
def main():
    global synSpineList 
    global synDendList
    numpy.random.seed( 1234 )
    rdes = buildRdesigneur()
    rdes.buildModel( '/model' )
    assert( moose.exists( '/model' ) )
    synSpineList = moose.wildcardFind( "/model/elec/#head#/glu,/model/elec/#head#/NMDA" )
    temp = set( moose.wildcardFind( "/model/elec/#/glu,/model/elec/#/NMDA" ) )
    synDendList = list( temp - set( synSpineList ) )
    print (("num spine, dend syns = ", len( synSpineList ), len( synDendList )))
    moose.reinit()
    #for i in moose.wildcardFind( '/model/elec/#apical#/#[ISA=CaConcBase]' ):
        #print i.path, i.length, i.diameter, i.parent.length, i.parent.diameter
    buildPlots(rdes)
    # Run for baseline, tetanus, and post-tetanic settling time 
    t1 = time.time()
    build3dDisplay(rdes)
    print (('real time = ', time.time() - t1))
if __name__ == '__main__':
    main()
 | 
	BhallaLab/moose | 
	moose-examples/paper-2015/Fig2_elecModels/Fig2C.py | 
	Python | 
	gpl-3.0 | 14,223 | 
	[
  "MOOSE",
  "NEURON"
] | 
	5eb6a5a439a675762a02c12cdff996e6a0d98f6ee874773cba2951727562aac5 | 
| 
	# creates: N.LDA
import os
from gpaw.test import gen
gen('N')
os.system('cp N.LDA ../_build')
 | 
	qsnake/gpaw | 
	doc/setups/N.py | 
	Python | 
	gpl-3.0 | 94 | 
	[
  "GPAW"
] | 
	ad7d53917d97406476db3321deeeb0fb89711b3341fa301373e89d7cf3800a42 | 
| 
	# ----------------------------------------------------------------------------
# Copyright 2015 Nervana Systems Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#      http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ----------------------------------------------------------------------------
import numpy as np
import os
from neon.backends import gen_backend
from neon.data import DataIterator, load_mnist, load_text, Text
from neon.initializers import Gaussian, Constant
from neon.layers import GeneralizedCost, Affine, BatchNorm
from neon.layers import Dropout, Conv, Pooling, MergeConcat, Recurrent
from neon.models import Model
from neon.optimizers import GradientDescentMomentum
from neon.transforms import Rectlin, Logistic, CrossEntropyBinary
from neon.util.persist import save_obj
def test_model_get_outputs_rnn(backend):
    data_path = load_text('ptb-valid')
    data_set = Text(time_steps=50, path=data_path)
    # weight initialization
    init = Constant(0.08)
    # model initialization
    layers = [
        Recurrent(150, init, Logistic()),
        Affine(len(data_set.vocab), init, bias=init, activation=Rectlin())
    ]
    model = Model(layers=layers)
    output = model.get_outputs(data_set)
    assert output.shape == (data_set.ndata, data_set.seq_length, data_set.nclass)
def test_model_get_outputs(backend):
    (X_train, y_train), (X_test, y_test), nclass = load_mnist()
    train_set = DataIterator(X_train[:backend.bsz * 3])
    init_norm = Gaussian(loc=0.0, scale=0.1)
    layers = [Affine(nout=20, init=init_norm, bias=init_norm, activation=Rectlin()),
              Affine(nout=10, init=init_norm, activation=Logistic(shortcut=True))]
    mlp = Model(layers=layers)
    out_list = []
    for x, t in train_set:
        x = mlp.fprop(x)
        out_list.append(x.get().T.copy())
    ref_output = np.vstack(out_list)
    train_set.reset()
    output = mlp.get_outputs(train_set)
    assert np.allclose(output, ref_output)
def test_model_serialize(backend):
    (X_train, y_train), (X_test, y_test), nclass = load_mnist()
    train_set = DataIterator([X_train, X_train], y_train, nclass=nclass, lshape=(1, 28, 28))
    init_norm = Gaussian(loc=0.0, scale=0.01)
    # initialize model
    path1 = [Conv((5, 5, 16), init=init_norm, bias=Constant(0), activation=Rectlin()),
             Pooling(2),
             Affine(nout=20, init=init_norm, bias=init_norm, activation=Rectlin())]
    path2 = [Dropout(keep=0.5),
             Affine(nout=20, init=init_norm, bias=init_norm, activation=Rectlin())]
    layers = [MergeConcat([path1, path2]),
              Affine(nout=20, init=init_norm, bias=init_norm, activation=Rectlin()),
              BatchNorm(),
              Affine(nout=10, init=init_norm, activation=Logistic(shortcut=True))]
    tmp_save = 'test_model_serialize_tmp_save.pickle'
    mlp = Model(layers=layers)
    mlp.optimizer = GradientDescentMomentum(learning_rate=0.1, momentum_coef=0.9)
    mlp.cost = GeneralizedCost(costfunc=CrossEntropyBinary())
    n_test = 3
    num_epochs = 3
    # Train model for num_epochs and n_test batches
    for epoch in range(num_epochs):
        for i, (x, t) in enumerate(train_set):
            x = mlp.fprop(x)
            delta = mlp.cost.get_errors(x, t)
            mlp.bprop(delta)
            mlp.optimizer.optimize(mlp.layers_to_optimize, epoch=epoch)
            if i > n_test:
                break
    # Get expected outputs of n_test batches and states of all layers
    outputs_exp = []
    pdicts_exp = [l.get_params_serialize() for l in mlp.layers_to_optimize]
    for i, (x, t) in enumerate(train_set):
        outputs_exp.append(mlp.fprop(x, inference=True))
        if i > n_test:
            break
    # Serialize model
    save_obj(mlp.serialize(keep_states=True), tmp_save)
    # Load model
    mlp = Model(layers=layers)
    mlp.load_weights(tmp_save)
    outputs = []
    pdicts = [l.get_params_serialize() for l in mlp.layers_to_optimize]
    for i, (x, t) in enumerate(train_set):
        outputs.append(mlp.fprop(x, inference=True))
        if i > n_test:
            break
    # Check outputs, states, and params are the same
    for output, output_exp in zip(outputs, outputs_exp):
        assert np.allclose(output.get(), output_exp.get())
    for pd, pd_exp in zip(pdicts, pdicts_exp):
        for s, s_e in zip(pd['states'], pd_exp['states']):
            if isinstance(s, list):  # this is the batch norm case
                for _s, _s_e in zip(s, s_e):
                    assert np.allclose(_s, _s_e)
            else:
                assert np.allclose(s, s_e)
        for p, p_e in zip(pd['params'], pd_exp['params']):
            if isinstance(p, list):  # this is the batch norm case
                for _p, _p_e in zip(p, p_e):
                    assert np.allclose(_p, _p_e)
            else:
                assert np.allclose(p, p_e)
    os.remove(tmp_save)
if __name__ == '__main__':
    be = gen_backend(backend='gpu', batch_size=50)
    test_model_get_outputs_rnn(be)
 | 
	misko/neon | 
	tests/test_model.py | 
	Python | 
	apache-2.0 | 5,468 | 
	[
  "Gaussian"
] | 
	3bfd6fb19f3b714563f6e85de7e32ae6cf3194700cb2bc8edfd82d289f9d24bc | 
| 
	#!/usr/bin/env python
"""Extract read start from BAM files to Wig format for PAUSE.
Usage:
    bam_to_wiggle.py <BAM file>
"""
import os
import tempfile
from contextlib import contextmanager
import pysam
import subprocess
import argparse
@contextmanager
def indexed_bam(bam_file):
    if not os.path.exists(bam_file.name + ".bai"):
        pysam.index(bam_file.name)
    sam_reader = pysam.Samfile(bam_file.name, "rb")
    yield sam_reader
    sam_reader.close()
def gen_header(bam_file, suffix):
    track_name = "name=%s_%s" % (
        os.path.splitext(os.path.split(bam_file)[-1])[0],
        suffix,
    )
    return "track type=wiggle_0 %s visibility=full\n" % track_name
def convert_to_bigwig(wig_file, chr_sizes, bw_file):
    # This will be fine under Galaxy, but could use temp folder?
    size_file = "%s-sizes.txt" % (os.path.splitext(bw_file)[0])
    with open(size_file, "w") as out_handle:
        for chrom, size in chr_sizes:
            out_handle.write("%s\t%s\n" % (chrom, size))
    try:
        cl = ["wigToBigWig", wig_file, size_file, bw_file]
        subprocess.check_call(cl)
    finally:
        os.unlink(size_file)
    return bw_file
def start_data(bam_file, starts_f=None, starts_r=None):
    with indexed_bam(bam_file) as work_bam:
        starts_f_wig = tempfile.NamedTemporaryFile(delete=False)
        starts_r_wig = tempfile.NamedTemporaryFile(delete=False)
        sizes = zip(work_bam.references, work_bam.lengths)
        regions = [(name, 0, length) for name, length in sizes]
        for chrom, start, end in regions:
            if end is None and chrom in work_bam.references:
                end = work_bam.lengths[work_bam.references.index(chrom)]
            assert end is not None, "Could not find %s in header" % chrom
            # Since the file is sorted, we could actually optimise this bit
            # out...currently fails cost benefit analysis so will wait until
            # memory issues are reported.
            start_map_f = {}
            start_map_r = {}
            for col in work_bam.fetch(chrom, start, end):
                # print " ".join(map(str, [col.qstart, col.qend, col.rlen, col.aend, col.alen, col.pos]))
                #   qstart   qend   rlen   aend    alen   pos
                #   0        145    145    13537   143    13394
                # reverse strand
                # start is  13395
                # end is 13537
                if col.is_reverse:
                    rstart = col.aend
                    if rstart in start_map_r:
                        start_map_r[rstart] += 1
                    else:
                        start_map_r[rstart] = 1
                else:
                    rstart = col.pos + 1
                    if rstart in start_map_f:
                        start_map_f[rstart] += 1
                    else:
                        start_map_f[rstart] = 1
            # Write to file
            starts_f_wig.write(gen_header(bam_file.name, "f"))
            starts_f_wig.write("variableStep chrom=%s\n" % chrom)
            for i in range(start + 1, end + 1):
                if i in start_map_f:
                    starts_f_wig.write("%s %.1f\n" % (i, start_map_f[i]))
                else:
                    starts_f_wig.write("%s 0.0\n" % i)
            starts_r_wig.write(gen_header(bam_file.name, "r"))
            starts_r_wig.write("variableStep chrom=%s\n" % chrom)
            for i in range(start + 1, end + 1):
                if i in start_map_r:
                    starts_r_wig.write("%s %.1f\n" % (i, start_map_r[i]))
                else:
                    starts_r_wig.write("%s 0.0\n" % i)
        starts_f_wig.close()
        starts_r_wig.close()
        try:
            convert_to_bigwig(starts_f_wig.name, sizes, starts_f.name)
            convert_to_bigwig(starts_r_wig.name, sizes, starts_r.name)
        finally:
            os.unlink(starts_f_wig.name)
            os.unlink(starts_r_wig.name)
if __name__ == "__main__":
    parser = argparse.ArgumentParser(description="Extract starts from BAM as BigWig")
    parser.add_argument("bam_file", type=argparse.FileType("r"), help="Bam file")
    parser.add_argument(
        "--starts_f",
        type=argparse.FileType("wb"),
        default="starts.f.bw",
        help="Sense Starts File",
    )
    parser.add_argument(
        "--starts_r",
        type=argparse.FileType("wb"),
        default="starts.r.bw",
        help="Antisense Starts File",
    )
    parser.add_argument("--version", action="version", version="0.1")
    args = parser.parse_args()
    start_data(**vars(args))
 | 
	TAMU-CPT/galaxy-tools | 
	tools/pause/pause_starts_to_wiggle.py | 
	Python | 
	gpl-3.0 | 4,610 | 
	[
  "Galaxy",
  "pysam"
] | 
	7a17a731153d43766a00672d66cbc22da6041df4aad39283a34c65b81a35440d | 
| 
	#!/usr/bin/env python
""" check_pseudo.py calculates energy for 7 alat points near SIESTA equilibrium to fine tune the delta-factor. 
"""
import os
import sys
import uuid
import glob
import numpy as np
import shutil
import matplotlib.pyplot as plt
from generate import PGInputFile, PTInputFile
from get_energies import read_energy
from calc_delta import BM, read_ref_data, calcDelta, get_alats, get_volumes
def check_pseudo(settings, data_dir):
    """ Checks pseudopotential for delta factor calculation
    
    Arguments:
        settings {[type]} -- [description]
        data_dir {[type]} -- [description]
    """
    cwd = os.getcwd()
    element = settings.calc["element"]
    x, y = [], []
    os.chdir(data_dir)
    pseudo_file = glob.glob("*.psf")[0]
    
    for root, dirs, _ in os.walk(os.getcwd()):
        if "check" in root: continue
        for dir_i in dirs:
            try:
                alat = float(dir_i)
            except:
                continue
            energies = read_energy(element, alat)
            if energies is not None:
                x_i, y_i = energies
                x.append(x_i)
                y.append(y_i)
    x = np.array(x) / settings.calc["nat"]
    y = np.array(y) / settings.calc["nat"]
    p = np.polyfit(x, y, 2)
    # make 7 points out of existing data
    if len(x) == 7:
        x_p = x
        y_p = y
    else:
        x_p = get_volumes(7, settings.calc) / settings.calc["nat"]
        y_p = np.poly1d(p)(x_p)
    # get check directory
    if not os.path.exists("check"):
        os.makedirs("check")
    shutil.copy(pseudo_file, "check")
    os.chdir("check")
    # write original data
    np.savetxt("energies_original.txt", np.vstack((x, y)).T)
    vol, bulk_mod, bulk_deriv, _ = BM(np.vstack((x_p, y_p)).T)
    np.savetxt("energies_BM.txt", np.vstack((x_p, y_p)).T)
    
    our_data = np.core.records.fromrecords([(element, vol, bulk_mod, bulk_deriv), ], names=('element', 'V0', 'B0', 'BP'))
    ref_data = read_ref_data(os.path.join(cwd, "delta", "WIEN2k.txt"))
    ref_data_el = ref_data[ref_data['element'] == element]
    delta, delta_rel, _ = calcDelta(our_data, ref_data_el, useasymm=False)
    with open("BP.dat", "w") as f:
        f.write("Our data: {}\n".format(our_data))
        f.write("Reference data: {}\n".format(ref_data_el))
        f.write("Delta factor: {} {}\n".format(delta, delta_rel))
 | 
	ansobolev/PseudoGenerator | 
	pseudogen/check_pseudo.py | 
	Python | 
	mit | 2,386 | 
	[
  "SIESTA",
  "WIEN2k"
] | 
	2286a65136ae498e930e31d1f7c6bfcf92c0cc82d6b4540635ee0de03e12cad9 | 
| 
	from copy import deepcopy as dc
from itertools import combinations
import ase.io as aseio
import numpy as np
from ase.atoms import Atoms as AAtoms
from pyiid.asa import calculate_asa, get_neighbor_list, get_coordination
__author__ = 'christopher'
def convert_stru_to_atoms(stru):
    symbols = []
    xyz = []
    tags = []
    for d_atom in stru:
        symbols.append(d_atom.element)
        xyz.append(d_atom.xyz)
        tags.append(d_atom.label)
    atoms = AAtoms(symbols, np.array(xyz), tags=tags)
    return atoms
def build_sphere_np(file_name, radius):
    """
    Build a spherical nanoparticle
    :param file_name: ASE loadable atomic positions
    :param radius: Radius of particle in Angstroms
    :return:
    """
    atoms = aseio.read(file_name)
    cell_dist = atoms.get_cell()
    multiple = np.ceil(2 * radius / cell_dist.diagonal()).astype(int)
    atoms = atoms.repeat(multiple)
    com = atoms.get_center_of_mass()
    atoms.translate(-com)
    del atoms[[atom.index for atom in atoms
               if np.sqrt(np.dot(atom.position, atom.position)) >=
               np.sqrt(radius ** 2)]]
    atoms.center()
    atoms.set_pbc((False, False, False))
    return atoms
def tag_surface_atoms(atoms, tag=1, probe=1.4, cutoff=None):
    """
    Find which are the surface atoms in a nanoparticle.
    Parameters
    ----------
    atoms: ase.atoms object
        The atomic configuration
    tag: int
        The number with which to tag the surface atoms
    probe: float, optional
        Radius of the probe molecule, default is 1.4 A the radius of water
    cutoff: float
        Bond cutoff, defaults to van der Waals radius
    """
    calculate_asa(atoms, probe, tag=tag, cutoff=cutoff)
def add_ligands(ligand, surface, distance, coverage, head, tail):
    atoms = dc(surface)
    tag_surface_atoms(atoms)
    for atom in atoms:
        if atom.tag == 1 and np.random.random() < coverage:
            pos = atom.position
            com = surface.get_center_of_mass()
            disp = pos - com
            norm_disp = disp / np.sqrt(np.dot(disp, disp))
            l_length = ligand[tail].position - ligand[head].position
            norm_l_length = l_length / np.sqrt(np.dot(l_length, l_length))
            ads = dc(ligand)
            ads.rotate(norm_l_length, a=norm_disp)
            ads.translate(-ads[head].position)
            ads.translate(pos + distance * norm_disp)
            atoms += ads
    return atoms
def get_angle_list(atoms, cutoff, element=None, tag=None):
    """
    Get all the angles in the NP
    Parameters
    ----------
    atoms: ase.Atoms objecct
        The atomic configuration
    cutoff: float
        Bond length cutoff
    element: str, optional
        Limit the list to only this element
    tag: int
        Limt the list to only this tag
    Returns
    -------
    ndarray:
        The list of bond angles in degrees
    """
    n_list = list(get_neighbor_list(cutoff, atoms))
    angles = []
    for i in range(len(atoms)):
        z = list(combinations(n_list[i], 2))
        for a in z:
            if (element is not None and atoms[i].symbol != element) or \
                    (tag is not None and atoms[i].tag != tag):
                break
            angles.append(np.rad2deg(atoms.get_angle([a[0], i, a[1]])))
    return np.nan_to_num(np.asarray(angles))
def get_coord_list(atoms, cutoff, element=None, tag=None):
    """
    Get all the angles in the NP
    Parameters
    ----------
    atoms: ase.Atoms object
        The atomic configuration
    cutoff: float
        Bond length cutoff
    element: str, optional
        Limit the list to only this element
    tag: int
        Limt the list to only this tag
    Returns
    -------
    ndarray:
        The list of coordination nubmers
    """
    if isinstance(atoms, list):
        coord_l = []
        for atms in atoms:
            a = get_coordination(cutoff, atms)
            if element is not None and tag is not None:
                coord_l.append(
                    a[(np.asarray(atoms.get_chemical_symbols()) == element) &
                      (atoms.get_tags() == tag)])
            elif element is not None:
                coord_l.append(
                    a[np.asarray(atoms.get_chemical_symbols()) == element])
            elif tag is not None:
                coord_l.append(a[atoms.get_tags() == tag])
            else:
                coord_l.append(a)
        c = np.asarray(coord_l)
        return np.average(c, axis=0), np.std(c, axis=0)
    else:
        a = get_coordination(cutoff, atoms)
        if element is not None and tag is not None:
            return a[(np.asarray(atoms.get_chemical_symbols()) == element) &
                     (atoms.get_tags() == tag)]
        elif element is not None:
            return a[np.asarray(atoms.get_chemical_symbols()) == element]
        elif tag is not None:
            return a[atoms.get_tags() == tag]
        else:
            return a
def get_bond_dist_list(atoms, cutoff, element=None, tag=None):
    """
    Get all the angles in the NP
    Parameters
    ----------
    atoms: ase.Atoms objecct
        The atomic configuration
    cutoff: float
        Bond length cutoff
    element: str, optional
        Limit the list to only this element
    tag: int
        Limt the list to only this tag
    Returns
    -------
    ndarray:
        The list of bond distances
    """
    n_list = list(get_neighbor_list(cutoff, atoms))
    bonds = []
    for i in range(len(atoms)):
        for a in n_list[i]:
            if (element is not None and atoms[i].symbol != element) or \
                    (tag is not None and atoms[i].tag != tag):
                break
            bonds.append(atoms.get_distance(i, a))
    return np.nan_to_num(np.asarray(bonds))
 | 
	CJ-Wright/pyIID | 
	pyiid/utils.py | 
	Python | 
	bsd-3-clause | 5,803 | 
	[
  "ASE"
] | 
	5e32988f1ea4991d436343938a03c8967054e4336fc3660a3273e5bdda9ddf19 | 
| 
	#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
A app configuration defines the user-tunable parameters of the application and also the quality evaluation such as the:
* Amazon Mechanical Turk HIT description, pricing, keywords, etc.
* The description and instructions of the task
* The configuration of the type of test (e.g 'mushra' or 'pairwise')
* The definition of the quality scales
* The paths to the audio stimuli
* Which components of the evaluation are active (e.g. pre-test survey, post-test survey, hearing screening, etc.)
This subpackage contains a base configuration which contains overridable defaults, as well as pre-defined testing
configurations for common audio quality evaluation scenarios. Make sure that before you run a test that you at least
change the stimuli and the ``SERVER_ADDRESS`` variable.
.. seealso:: :doc:`../test_configurations`
"""
import os
try:
    from secret_keys import CSRF_SECRET_KEY, SESSION_KEY
except ImportError:
    try:
        CSRF_SECRET_KEY = os.environ['CSRF_SECRET_KEY']
        SESSION_KEY = os.environ['SESSION_KEY']
    except KeyError:
        raise KeyError('No keys found. Either define a secret_keys.py file (using generate_key_files.py) or set the '
                       'keys using environment variables.')
# Get the application mode from the environment variable APP_MODE
APP_MODE = os.getenv('APP_MODE')
# HEARING TEST CONSTANTS
MIN_HEARING_TEST_AUDIO_TONES = 2
MAX_HEARING_TEST_AUDIO_TONES = 8
HEARING_TEST_AUDIO_FILES_PER_TONES = 4
MIN_HEARING_TEST_AUDIO_INDEX = HEARING_TEST_AUDIO_FILES_PER_TONES * MIN_HEARING_TEST_AUDIO_TONES
MAX_HEARING_TEST_AUDIO_INDEX = HEARING_TEST_AUDIO_FILES_PER_TONES * MAX_HEARING_TEST_AUDIO_TONES
# HEARING RESPONSE ESTIMATION CONSTANTS
HEARING_RESPONSE_NFREQS = 8 # number of different frequencies
HEARING_RESPONSE_NADD = 3 # number of max additional tones (60 for 10dB, 3 for 20dB Spacing)
class BaseConfig(object):
    """
    The base application configuration.
    Attributes
    ----------
    DEBUG : bool
        Enable/disable debug mode (see Flask docs) (default is False)
    TESTING : bool
        Enable/disable testing mode (see Flask docs) (default is False)
    SECRET_KEY : str
        If a secret key is set, cryptographic components can use this to sign cookies and other things. Set this to a
        complex random value when you want to use the secure cookie for instance. Set via `generate_key_file.py` or
        using environment variable 'SECRET_KEY'. (see Flask Docs)
    CSRF_SESSION_KEY : str
        A Cross-site Request Forgery (CSRF) secret key for signing data. Set via `generate_key_file.py` or
        using environment variable 'CSRF_SESSION_KEY'. (see Flask docs)
    CSRF_ENABLED : bool
        Enable/disable protection against *Cross-site Request Forgery (CSRF)* (see Flask docs) (default is True)
    SERVER_ADDRESS : str
        The name and port number of the server. Do not include 'http'. (e.g.: 'caqe.local:5000') (see Flask docs)
        Can be set via environment variable 'SERVER_ADDRESS'. (default is 'caqe.local:5000')
    SQLALCHEMY_DATABASE_URI : str
        The database URI that should be used for the connection (see Flask-SQLAlchemy docs). Examples:
        * sqlite:////tmp/test.db
        * mysql://username:password@server/db
        Can be set via environment variable 'DATABASE_URL'. (default is'sqlite:////~/caqe.db')
    PREFERRED_URL_SCHEME : str
        The URL scheme that should be used for URL generation if no URL scheme is available. 'http' or 'https'
        (default is 'https')
    AUDIO_FILE_DIRECTORY : str
        Relative directory path to testing audio stimuli. (default is 'static/audio')
    ENCRYPT_AUDIO_STIMULI_URLS : bool
        Enable/disable encryption of the URLs so that users can't game consistency. (default is True)
    TEST_TYPE : str
        The test type (limited to 'pairwise' or 'mushra' for now). (default is None)
    ANONYMOUS_PARTICIPANTS_ENABLED : bool
        Enable/disable participants to enter through '/anonymous' entry point. (default is False)
    IP_COLLECTION_ENABLED : bool
        Enable/disable collection participants' IP addresses. (default is True)
    OBTAIN_CONSENT : bool
        If True, obtain consent from each participant (see consent.html) (default is True)
    PRE_TEST_SURVEY_ENABLED : bool
        If True, ask participants a survey before evaluation (see pre_test_survey.html). (default is True)
    PRE_TEST_SURVEY_INCLUSION_CRITERIA : list of str
        Pre-test survey inclusion criteria.
        (default is ["int(survey['age']) >= 18", "survey['hearing_disorder'] == 'No'"])
    POST_TEST_SURVEY_ENABLED : bool
        If True, ask participants a survey after evaluation (see post_test_survey.html) (default is True)
    HEARING_RESPONSE_ESTIMATION_ENABLED : bool
        If enabled, ask participants to complete the in-situ hearing response estimation. (default is True)
    CONDITIONS_PER_EVALUATION : int
        The number of conditions to present to a participant in a single visit to '/evaluate'.
        Note that currently evaluation is limited to one condition group. So if this value is more than 1, there must
        be at least as many conditions per group as there are conditions per evaluation for this to have an effect.
        It is also recommended that an integer multiple of `CONDITIONS_PER_EVALUATION` comprise the number of conditions
        per group. For example, if there are 28 conditions in a group, set the number of `CONDITIONS_PER_EVALUATION` to
        14 or 7.
        (default is 1)
    TRIALS_PER_CONDITION : int
        The number of trials we should collect per condition (with distinct participants). (default is 20)
    LIMIT_SUBJECT_TO_ONE_TASK_TYPE : bool
        If True, each subject is limited to one type of Test. (default is True)
    TEST_CONDITION_ORDER_RANDOMIZED : bool
        Randomize the condition order per test for each participant. (default is True)
    TEST_CONDITION_GROUP_ORDER_RANDOMIZED : bool
        Randomize the condition group order for each participant. (default is False)
    STIMULUS_ORDER_RANDOMIZED : bool
        Randomize the stimulus order per for each condition. (default is True)
    HEARING_SCREENING_TEST_ENABLED : bool
        Set to True if you want the participants to be required to take a hearing screening test. (default is True)
    HEARING_TEST_EXPIRATION_HOURS : int
        The number of hours their hearing test is valid for (they must retake after this time has passed)
        (default is 24)
    MAX_HEARING_TEST_ATTEMPTS : int
        The number of attempts one has before they are sent away (they must wait `hearing_test_expiration_hours`
        to take it again) (default is 2)
    HEARING_TEST_REJECTION_ENABLED : bool
        If this is set to True, then we still test the users, but we don't reject them. (default is True)
    HEARING_RESPONSE_NOPTIONS : int
        Max number of frequencies for user to respond with in hearing response estimation. (default is 20)
    MTURK_HOST : str
        Amazon Mechanical Turk host location. By default set it to the sandbox, and configure it via an environment
        variable (so, it can be easily modified when deploying and testing using Heroku).
        Can be set via environment variable 'MTURK_HOST'. (default is 'mechanicalturk.sandbox.amazonaws.com')
    MTURK_QUESTION_URL : str
        Entry point URL. (default is 'https://%s/mturk' % SERVER_ADDRESS)
    MTURK_REWARD : float
        This is the reward given to each worker for an approved assignment (in USD)
        (note that Amazon takes their Mechanical Turk Fee on top of this. See https://requester.mturk.com/pricing)
        (default is 0.50)
    MTURK_FIRST_HIT_BONUS : float
        The default bonus reward in USD that is optionally given (using ``turk_admin_cli.py``) to participants that
        completed the first assignment, which may have additional testing (e.g. survey, hearing tests, etc.)
        (default is 0.30)
    MTURK_MAX_CONSISTENCY_BONUS : float
        The defualt maximum bonus reward in USD for pairwise consistency. This optional bonus is given using
        ``turk_admin_cli.py``. (default is 0.25)
    MTURK_MIN_CONSISTENCY_THRESHOLD_FOR_BONUS : float
        The minimum pairwise consistency required to receive the optional bonus (given through ``turk_admin_cli.py``.)
        (default is 0.7)
    MTURK_NUMBER_HITS_APPROVED_REQUIREMENT : int
        MTurk worker must have this many approved HITs to accept task. (default is 1000)
    MTURK_PERCENT_ASSIGNMENTS_APPROVED_REQUIREMENT : int
        MTurk worker must have this percentage of approved assignments to accept task. (default is 97)
    MTURK_TITLE : str
        Title of MTurk HIT (default is 'Critical audio listening task. Listen to audio recordings and rate them on
        various scales of quality.')
    MTURK_DESCRIPTION : str
        Description of MTurk HIT.
        (default is 'This listening test aims to rate the quality of a set of signals in comparison to a reference
        signal. Note that while the maximum number assignments a worker can do is 10, it's possible that fewer than
        10 may be available to you. \*\*CHROME ONLY\*\* \*\*BONUS AVAILABLE\*\*')
    MTURK_KEYWORDS : str
        Keywords for MTurk HIT. (default is 'audio, sound, music, listening, research')
    MTURK_ASSIGNMENT_DURATION_IN_SECONDS : int
        Accepted MTurk assignments must be completed within this duration or they will be released to other workers
        (default is 60 * 30, i.e. 30 minutes)
    MTURK_LIFETIME_IN_SECONDS : int
        HITs expire (no one can accept them) after this duration since being posted.
        (default is 60 * 60 * 24 * 7, i.e 1 week)
    MTURK_FRAME_HEIGHT : int
        The size of the Mechanical Turk browser frame (default is 1200)
    ACCEPTABLE_BROWSERS : list of str
        The set of acceptable browsers. set as None to disable browser rejection. (default is ['chrome',])
    BEGIN_BUTTON_ENABLED : bool
        If true, participants will have to click a button that launches a new window. This is useful in order to
        delay condition assignment until the user is engaged in the task, and allows a new window to be launched
        that is bigger than the Mechanical Turk frame for instance. (default is True)
    POPUP_WIDTH : int
        The width of the window launched when participants press the "begin button" the task. (default is 1200)
    POPUP_HEIGHT : int
        The height of the window launched when participants press the "begin button" the task. (default is 1200)
    TEST_TIMEOUT_SEC : float
        The participant must spend at least this amount of time on the evaluation task before submission.
        (default is 60.)
    REQUIRE_LISTENING_TO_ALL_TRAINING_SOUNDS : bool
        If True, the participant must listen to all of the training sounds before proceeding to the evaluation task.
        (default is True)
    PREVIEW_HTML : str
        The HTML content of the preview page. This will be the same for all conditions, regardless of test since
        conditions are assigned on the fly (so we can have complete control over condition assignment).
        (default is None)
    MIN_RATING_VALUE : int
        The minimum rating value on the MUSHRA slider. (default is 0)
    MAX_RATING_VALUE : int
        The maximum rating value on the MUSHRA slider. (default is 99)
    DEFAULT_RATING_VALUE : int
        The default rating value on the MUSHRA slider. (default is 50)
    TESTS : list of dict
        The test and condition-specific configuration variables.
        Note that if 'evaluation_instructions_html' is not None in the condition, it will override the instructions
        defined in the test.
        Note also that reference keys must be alphanumeric and stimulus keys must begin with 'S' followed by a number,
        e.g. 'S29'.
        The dicts are of the form::
            {'test_config_variables':
                {'test_title': '...', # The test title that is displayed on the evaluation page
                 'first_task_introduction_html': '...',  # Content of the intro page the first time they do a task
                 'introduction_html': '...', # Content of the intro page (after the first time they perform the task)
                 'training_instructions_html': '...', # The HTML content of the training instructions
                 'evaluation_instructions_html': '...'}, # The HTML content of the evaluation instructions
                 'references' : (('<reference_name>', '<reference_description>'),), # Reference names and descriptions
                 'reference_example_dict':
                    {'<reference_name}': url_for('static', filename='audio/<reference_filename>.wav'), ... },
                 'quality_example_dict':
                    {'<example_type0>': [url_for('static', filename='audio/<example0_filename>.wav'),
                                         url_for('static', filename='audio/<example1_filename>.wav'),],
                     '<example_type1>': [url_for('static', filename='audio/<example3_filename>),]}},
             'condition_groups' :
                [{'reference_files': {<reference_name>: '<reference_filename>.wav',},
                 {'stimulus_files': {'S1': '<S1_filename>.wav',
                                     'S2': '<S2_filename>,wav',}},
                 {'conditions': [{'reference_keys': [<reference_name>,],
                                  'stimulus_keys': ['S1','S2','S7', ... ],
                                  'evaluation_instructions_html': <condition_specific_evaluation_instructions>},]},]}
        (default is [])
    Note
    ----
    For testing, add: ::
        0.0.0.0     caqe.local
    to /etc/hosts
    We need to set the SERVER_ADDRESS to resolve ``url_for`` definitions when constructing the database, but we can't simply
    use `localhost` because the secure sessions are not compatible with that.
    """
    # ---------------------------------------------------------------------------------------------
    # BACKEND VARIABLES
    TESTING = False
    DEBUG = False
    SECRET_KEY = CSRF_SECRET_KEY
    CSRF_SESSION_KEY = SESSION_KEY
    CSRF_ENABLED = True
    SQLALCHEMY_DATABASE_URI = os.getenv('DATABASE_URL', 'sqlite:////%s' % os.path.expanduser('~/caqe.db'))
    SERVER_ADDRESS = os.getenv('SERVER_ADDRESS', 'caqe.local:5000')
    PREFERRED_URL_SCHEME = 'https'
    AUDIO_FILE_DIRECTORY = os.getenv('AUDIO_FILE_DIRECTORY', 'static/audio')
    AUDIO_CODEC = 'wav'
    ENCRYPT_AUDIO_STIMULI_URLS = True
    EXTERNAL_FILE_HOST = False
    BEGIN_TITLE = 'Audio Quality Evaluation'
    # ---------------------------------------------------------------------------------------------
    # TESTING VARIABLES
    TEST_TYPE = None
    ANONYMOUS_PARTICIPANTS_ENABLED = False
    IP_COLLECTION_ENABLED = True
    OBTAIN_CONSENT = False
    PRE_TEST_SURVEY_ENABLED = True
    PRE_TEST_SURVEY_INCLUSION_CRITERIA = ["int(survey['age']) >= 18",
                                          "survey['hearing_disorder'] == 'No'"]
    POST_TEST_SURVEY_ENABLED = True
    HEARING_RESPONSE_ESTIMATION_ENABLED = True
    CONDITIONS_PER_EVALUATION = 1
    TRIALS_PER_CONDITION = 20
    LIMIT_SUBJECT_TO_ONE_TASK_TYPE = True
    TEST_CONDITION_ORDER_RANDOMIZED = True
    TEST_CONDITION_GROUP_ORDER_RANDOMIZED = False
    STIMULUS_ORDER_RANDOMIZED = True
    # ---------------------------------------------------------------------------------------------
    # HEARING SCREENING VARIABLES
    HEARING_SCREENING_TEST_ENABLED = True
    HEARING_TEST_EXPIRATION_HOURS = 12
    MAX_HEARING_TEST_ATTEMPTS = 2
    HEARING_TEST_REJECTION_ENABLED = True
    # ---------------------------------------------------------------------------------------------
    # HEARING RESPONSE ESTIMATION VARIABLES
    HEARING_RESPONSE_NOPTIONS = 20
    # ---------------------------------------------------------------------------------------------
    # MECHANICAL TURK VARIABLES
    MTURK_HOST = os.getenv('MTURK_HOST', 'mechanicalturk.sandbox.amazonaws.com')
    MTURK_QUESTION_URL = 'https://%s/mturk' % SERVER_ADDRESS
    MTURK_REWARD = 0.50
    MTURK_FIRST_HIT_BONUS = 0.30
    MTURK_MAX_CONSISTENCY_BONUS = 0.25
    MTURK_MIN_CONSISTENCY_THRESHOLD_FOR_BONUS = 0.7
    MTURK_NUMBER_HITS_APPROVED_REQUIREMENT = 1000
    MTURK_PERCENT_ASSIGNMENTS_APPROVED_REQUIREMENT = 97
    MTURK_TITLE = 'Critical audio listening task. Listen to audio recordings and rate them on various ' \
                  'scales of quality.'
    MTURK_DESCRIPTION = 'This listening test aims to rate the quality of a set of signals in comparison to a reference ' \
                        'signal. Note that while the maximum number assignments a worker can do is 10, it\'s possible that ' \
                        'fewer than 10 may be available to you. **CHROME ONLY** **BONUS AVAILABLE**'
    MTURK_KEYWORDS = 'audio, sound, music, listening, research'
    MTURK_ASSIGNMENT_DURATION_IN_SECONDS = 60 * 30
    MTURK_LIFETIME_IN_SECONDS = 60 * 60 * 24 * 7
    MTURK_MAX_ASSIGNMENTS = 200
    MTURK_AUTO_APPROVAL_DELAY_IN_SECONDS = 60 * 60 * 24 * 1  # 1 day
    MTURK_FRAME_HEIGHT = 1200
    # ---------------------------------------------------------------------------------------------
    # FRONT-END VARIABLES
    ACCEPTABLE_BROWSERS = ['chrome']
    BEGIN_BUTTON_ENABLED = True
    POPUP_WIDTH = 1200
    POPUP_HEIGHT = 1200
    TEST_TIMEOUT_SEC = 60.
    REQUIRE_LISTENING_TO_ALL_TRAINING_SOUNDS = True
    PREVIEW_HTML = None
    MIN_RATING_VALUE = 0
    MAX_RATING_VALUE = 99
    DEFAULT_RATING_VALUE = 50
    # ---------------------------------------------------------------------------------------------
    # DEFAULT CONDITION AND TEST-SPECIFIC VARIABLES
    #   (These will be configured for each condition and saved in the database)
    TESTS = []
class TestingOverrideConfig(object):
    """
    Override config for testing.
    Note
    ----
    To enable these parameters set environment variable ``APP_MODE`` to 'TESTING'. In Linux: ::
        $ export APP_MODE=TESTING
    """
    TESTING = True
    DEBUG = True
    SQLALCHEMY_DATABASE_URI = 'sqlite://'
    SERVER_ADDRESS = 'caqe.local:5000'
    MTURK_QUESTION_URL = 'https://%s/mturk' % SERVER_ADDRESS
    PREFERRED_URL_SCHEME = 'http'
class DevelopmentOverrideConfig(object):
    """
    Override config for development.
    Note
    ----
    To enable these parameters set environment variable ``APP_MODE`` to 'DEVELOPMENT'. In Linux: ::
        $ export APP_MODE=DEVELOPMENT
    """
    DEBUG = True
    SERVER_ADDRESS = 'caqe.local:5000'
    MTURK_QUESTION_URL = 'https://%s/mturk' % SERVER_ADDRESS
    HEARING_TEST_REJECTION_ENABLED = False
    PREFERRED_URL_SCHEME = 'http'
    REQUIRE_LISTENING_TO_ALL_TRAINING_SOUNDS = False
class ProductionOverrideConfig(object):
    """
    Override config for production.
    Note
    ----
    To enable these parameters set environment variable ``APP_MODE`` to 'PRODUCTION'. In Linux: ::
        $ export APP_MODE=PRODUCTION
    """
    TESTING = False
    DEBUG = False
class EvaluationDevOverrideConfig(object):
    """
    Override config for evaluation task development.
    Note
    ----
    To enable these parameters set environment variable ``APP_MODE`` to 'EVALUATION'. In Linux: ::
        $ export APP_MODE=EVALUATION
    """
    DEBUG = True
    SERVER_ADDRESS = 'caqe.local:5000'
    MTURK_QUESTION_URL = 'https://%s/mturk' % SERVER_ADDRESS
    HEARING_TEST_REJECTION_ENABLED = False
    HEARING_SCREENING_TEST_ENABLED = False
    HEARING_RESPONSE_ESTIMATION_ENABLED = False
    PREFERRED_URL_SCHEME = 'http'
    REQUIRE_LISTENING_TO_ALL_TRAINING_SOUNDS = False
    PRE_TEST_SURVEY_ENABLED = False
    POST_TEST_SURVEY_ENABLED = False
 | 
	mcartwright/CAQE | 
	src/caqe/configuration.py | 
	Python | 
	mit | 19,785 | 
	[
  "VisIt"
] | 
	60f1965a4f5b55df7d2bb1ddb9a6d553291e0b68e9e279e55f56f6f2698d3754 | 
| 
	#!/usr/bin/env python
#
# Wrapper script for Java Conda packages that ensures that the java runtime is invoked with the right options.
# Adapted from https://github.com/bioconda/bioconda-recipes/blob/master/recipes/peptide-shaker/1.16.16/peptide-shaker.py (accessed June, 21th 2019).
#
# Program Parameters
#
import os
import subprocess
import sys
import shutil
from os import access
from os import getenv
from os import X_OK
jar_file = 'GeMoMa-1.7.1.jar'
default_jvm_mem_opts = ['-Xms1g', '-Xmx2g']
original_string = "java -jar "+jar_file+" CLI"
wrapper_string = "GeMoMa"
# !!! End of parameter section. No user-serviceable code below this line !!!
def real_dirname(path):
    """Return the symlink-resolved, canonicalized directory-portion of path."""
    return os.path.dirname(os.path.realpath(path))
def java_executable():
    """Return the executable name of the Java interpreter."""
    java_home = getenv('JAVA_HOME')
    java_bin = os.path.join('bin', 'java')
    if java_home and access(os.path.join(java_home, java_bin), X_OK):
        return os.path.join(java_home, java_bin)
    else:
        return 'java'
def jvm_opts(argv):
    """Construct list of Java arguments based on our argument list.
    The argument list passed in argv must not include the script name.
    The return value is a 3-tuple lists of strings of the form:
      (memory_options, prop_options, passthrough_options)
    """
    mem_opts = []
    prop_opts = []
    pass_args = []
    for arg in argv:
        if arg.startswith('-D'):
            prop_opts.append(arg)
        elif arg.startswith('-XX'):
            prop_opts.append(arg)
        elif arg.startswith('-Xm'):
            mem_opts.append(arg)
        else:
            pass_args.append(arg)
    # In the original shell script the test coded below read:
    #   if [ "$jvm_mem_opts" == "" ] && [ -z ${_JAVA_OPTIONS+x} ]
    # To reproduce the behaviour of the above shell code fragment
    # it is important to explictly check for equality with None
    # in the second condition, so a null envar value counts as True!
    if mem_opts == [] and getenv('_JAVA_OPTIONS') is None:
        mem_opts = default_jvm_mem_opts
    return (mem_opts, prop_opts, pass_args)
def main():
    java = java_executable()
    (mem_opts, prop_opts, pass_args) = jvm_opts(sys.argv[1:])
    jar_dir = real_dirname(sys.argv[0])
    jar_arg = '-jar'
    jar_path = os.path.join(jar_dir, jar_file)
    cli = 'CLI'
    cmd = [java] + mem_opts + prop_opts + [jar_arg] + [jar_path] + [cli] + pass_args
#    print('wrapper script translating:')
#    print(sys.argv)
#    print('to:')
#    print(cmd)
#    print('=======================================================================================================================\n')
#    print(original_string)
#    print(wrapper_string)
    #sys.exit(subprocess.call(cmd))
    p = subprocess.Popen(cmd,stderr=subprocess.PIPE);
    for line in iter(p.stderr.readline,b''):
        tomod = line.decode("utf-8")
        tomod = tomod.replace(original_string,wrapper_string)
        print(tomod,end='',file=sys.stderr)
    exit(p.wait())
if __name__ == '__main__':
    main()
 | 
	cokelaer/bioconda-recipes | 
	recipes/gemoma/GeMoMa.py | 
	Python | 
	mit | 3,169 | 
	[
  "Bioconda"
] | 
	018ca2619f82a0002e2334d695e8fe532aec2293d4d5bda0711ecab68d30118d | 
| 
	# sql/elements.py
# Copyright (C) 2005-2013 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Core SQL expression elements, including :class:`.ClauseElement`,
:class:`.ColumnElement`, and derived classes.
"""
from __future__ import unicode_literals
from .. import util, exc, inspection
from . import type_api
from . import operators
from .visitors import Visitable, cloned_traverse, traverse
from .annotation import Annotated
import itertools
from .base import Executable, PARSE_AUTOCOMMIT, Immutable, NO_ARG
import re
import operator
def _clone(element, **kw):
    return element._clone()
def collate(expression, collation):
    """Return the clause ``expression COLLATE collation``.
    e.g.::
        collate(mycolumn, 'utf8_bin')
    produces::
        mycolumn COLLATE utf8_bin
    """
    expr = _literal_as_binds(expression)
    return BinaryExpression(
        expr,
        _literal_as_text(collation),
        operators.collate, type_=expr.type)
def between(ctest, cleft, cright):
    """Return a ``BETWEEN`` predicate clause.
    Equivalent of SQL ``clausetest BETWEEN clauseleft AND clauseright``.
    The :func:`between()` method on all
    :class:`.ColumnElement` subclasses provides
    similar functionality.
    """
    ctest = _literal_as_binds(ctest)
    return ctest.between(cleft, cright)
def literal(value, type_=None):
    """Return a literal clause, bound to a bind parameter.
    Literal clauses are created automatically when non- :class:`.ClauseElement`
    objects (such as strings, ints, dates, etc.) are used in a comparison
    operation with a :class:`.ColumnElement`
    subclass, such as a :class:`~sqlalchemy.schema.Column` object.
    Use this function to force the
    generation of a literal clause, which will be created as a
    :class:`BindParameter` with a bound value.
    :param value: the value to be bound. Can be any Python object supported by
        the underlying DB-API, or is translatable via the given type argument.
    :param type\_: an optional :class:`~sqlalchemy.types.TypeEngine` which
        will provide bind-parameter translation for this literal.
    """
    return BindParameter(None, value, type_=type_, unique=True)
def type_coerce(expr, type_):
    """Coerce the given expression into the given type,
    on the Python side only.
    :func:`.type_coerce` is roughly similar to :func:`.cast`, except no
    "CAST" expression is rendered - the given type is only applied towards
    expression typing and against received result values.
    e.g.::
        from sqlalchemy.types import TypeDecorator
        import uuid
        class AsGuid(TypeDecorator):
            impl = String
            def process_bind_param(self, value, dialect):
                if value is not None:
                    return str(value)
                else:
                    return None
            def process_result_value(self, value, dialect):
                if value is not None:
                    return uuid.UUID(value)
                else:
                    return None
        conn.execute(
            select([type_coerce(mytable.c.ident, AsGuid)]).\\
                    where(
                        type_coerce(mytable.c.ident, AsGuid) ==
                        uuid.uuid3(uuid.NAMESPACE_URL, 'bar')
                    )
        )
    """
    type_ = type_api.to_instance(type_)
    if hasattr(expr, '__clause_expr__'):
        return type_coerce(expr.__clause_expr__())
    elif isinstance(expr, BindParameter):
        bp = expr._clone()
        bp.type = type_
        return bp
    elif not isinstance(expr, Visitable):
        if expr is None:
            return Null()
        else:
            return literal(expr, type_=type_)
    else:
        return Label(None, expr, type_=type_)
def outparam(key, type_=None):
    """Create an 'OUT' parameter for usage in functions (stored procedures),
    for databases which support them.
    The ``outparam`` can be used like a regular function parameter.
    The "output" value will be available from the
    :class:`~sqlalchemy.engine.ResultProxy` object via its ``out_parameters``
    attribute, which returns a dictionary containing the values.
    """
    return BindParameter(
                key, None, type_=type_, unique=False, isoutparam=True)
def and_(*clauses):
    """Join a list of clauses together using the ``AND`` operator.
    The ``&`` operator is also overloaded on all :class:`.ColumnElement`
    subclasses to produce the
    same result.
    """
    if len(clauses) == 1:
        return clauses[0]
    return BooleanClauseList(operator=operators.and_, *clauses)
def or_(*clauses):
    """Join a list of clauses together using the ``OR`` operator.
    The ``|`` operator is also overloaded on all
    :class:`.ColumnElement` subclasses to produce the
    same result.
    """
    if len(clauses) == 1:
        return clauses[0]
    return BooleanClauseList(operator=operators.or_, *clauses)
def not_(clause):
    """Return a negation of the given clause, i.e. ``NOT(clause)``.
    The ``~`` operator is also overloaded on all
    :class:`.ColumnElement` subclasses to produce the
    same result.
    """
    return operators.inv(_literal_as_binds(clause))
@inspection._self_inspects
class ClauseElement(Visitable):
    """Base class for elements of a programmatically constructed SQL
    expression.
    """
    __visit_name__ = 'clause'
    _annotations = {}
    supports_execution = False
    _from_objects = []
    bind = None
    _is_clone_of = None
    is_selectable = False
    is_clause_element = True
    _order_by_label_element = None
    def _clone(self):
        """Create a shallow copy of this ClauseElement.
        This method may be used by a generative API.  Its also used as
        part of the "deep" copy afforded by a traversal that combines
        the _copy_internals() method.
        """
        c = self.__class__.__new__(self.__class__)
        c.__dict__ = self.__dict__.copy()
        ClauseElement._cloned_set._reset(c)
        ColumnElement.comparator._reset(c)
        # this is a marker that helps to "equate" clauses to each other
        # when a Select returns its list of FROM clauses.  the cloning
        # process leaves around a lot of remnants of the previous clause
        # typically in the form of column expressions still attached to the
        # old table.
        c._is_clone_of = self
        return c
    @property
    def _constructor(self):
        """return the 'constructor' for this ClauseElement.
        This is for the purposes for creating a new object of
        this type.   Usually, its just the element's __class__.
        However, the "Annotated" version of the object overrides
        to return the class of its proxied element.
        """
        return self.__class__
    @util.memoized_property
    def _cloned_set(self):
        """Return the set consisting all cloned ancestors of this
        ClauseElement.
        Includes this ClauseElement.  This accessor tends to be used for
        FromClause objects to identify 'equivalent' FROM clauses, regardless
        of transformative operations.
        """
        s = util.column_set()
        f = self
        while f is not None:
            s.add(f)
            f = f._is_clone_of
        return s
    def __getstate__(self):
        d = self.__dict__.copy()
        d.pop('_is_clone_of', None)
        return d
    def _annotate(self, values):
        """return a copy of this ClauseElement with annotations
        updated by the given dictionary.
        """
        return Annotated(self, values)
    def _with_annotations(self, values):
        """return a copy of this ClauseElement with annotations
        replaced by the given dictionary.
        """
        return Annotated(self, values)
    def _deannotate(self, values=None, clone=False):
        """return a copy of this :class:`.ClauseElement` with annotations
        removed.
        :param values: optional tuple of individual values
         to remove.
        """
        if clone:
            # clone is used when we are also copying
            # the expression for a deep deannotation
            return self._clone()
        else:
            # if no clone, since we have no annotations we return
            # self
            return self
    def unique_params(self, *optionaldict, **kwargs):
        """Return a copy with :func:`bindparam()` elements replaced.
        Same functionality as ``params()``, except adds `unique=True`
        to affected bind parameters so that multiple statements can be
        used.
        """
        return self._params(True, optionaldict, kwargs)
    def params(self, *optionaldict, **kwargs):
        """Return a copy with :func:`bindparam()` elements replaced.
        Returns a copy of this ClauseElement with :func:`bindparam()`
        elements replaced with values taken from the given dictionary::
          >>> clause = column('x') + bindparam('foo')
          >>> print clause.compile().params
          {'foo':None}
          >>> print clause.params({'foo':7}).compile().params
          {'foo':7}
        """
        return self._params(False, optionaldict, kwargs)
    def _params(self, unique, optionaldict, kwargs):
        if len(optionaldict) == 1:
            kwargs.update(optionaldict[0])
        elif len(optionaldict) > 1:
            raise exc.ArgumentError(
                "params() takes zero or one positional dictionary argument")
        def visit_bindparam(bind):
            if bind.key in kwargs:
                bind.value = kwargs[bind.key]
                bind.required = False
            if unique:
                bind._convert_to_unique()
        return cloned_traverse(self, {}, {'bindparam': visit_bindparam})
    def compare(self, other, **kw):
        """Compare this ClauseElement to the given ClauseElement.
        Subclasses should override the default behavior, which is a
        straight identity comparison.
        \**kw are arguments consumed by subclass compare() methods and
        may be used to modify the criteria for comparison.
        (see :class:`.ColumnElement`)
        """
        return self is other
    def _copy_internals(self, clone=_clone, **kw):
        """Reassign internal elements to be clones of themselves.
        Called during a copy-and-traverse operation on newly
        shallow-copied elements to create a deep copy.
        The given clone function should be used, which may be applying
        additional transformations to the element (i.e. replacement
        traversal, cloned traversal, annotations).
        """
        pass
    def get_children(self, **kwargs):
        """Return immediate child elements of this :class:`.ClauseElement`.
        This is used for visit traversal.
        \**kwargs may contain flags that change the collection that is
        returned, for example to return a subset of items in order to
        cut down on larger traversals, or to return child items from a
        different context (such as schema-level collections instead of
        clause-level).
        """
        return []
    def self_group(self, against=None):
        """Apply a 'grouping' to this :class:`.ClauseElement`.
        This method is overridden by subclasses to return a
        "grouping" construct, i.e. parenthesis.   In particular
        it's used by "binary" expressions to provide a grouping
        around themselves when placed into a larger expression,
        as well as by :func:`.select` constructs when placed into
        the FROM clause of another :func:`.select`.  (Note that
        subqueries should be normally created using the
        :func:`.Select.alias` method, as many platforms require
        nested SELECT statements to be named).
        As expressions are composed together, the application of
        :meth:`self_group` is automatic - end-user code should never
        need to use this method directly.  Note that SQLAlchemy's
        clause constructs take operator precedence into account -
        so parenthesis might not be needed, for example, in
        an expression like ``x OR (y AND z)`` - AND takes precedence
        over OR.
        The base :meth:`self_group` method of :class:`.ClauseElement`
        just returns self.
        """
        return self
    @util.dependencies("sqlalchemy.engine.default")
    def compile(self, default, bind=None, dialect=None, **kw):
        """Compile this SQL expression.
        The return value is a :class:`~.Compiled` object.
        Calling ``str()`` or ``unicode()`` on the returned value will yield a
        string representation of the result. The
        :class:`~.Compiled` object also can return a
        dictionary of bind parameter names and values
        using the ``params`` accessor.
        :param bind: An ``Engine`` or ``Connection`` from which a
            ``Compiled`` will be acquired. This argument takes precedence over
            this :class:`.ClauseElement`'s bound engine, if any.
        :param column_keys: Used for INSERT and UPDATE statements, a list of
            column names which should be present in the VALUES clause of the
            compiled statement. If ``None``, all columns from the target table
            object are rendered.
        :param dialect: A ``Dialect`` instance from which a ``Compiled``
            will be acquired. This argument takes precedence over the `bind`
            argument as well as this :class:`.ClauseElement`'s bound engine, if
            any.
        :param inline: Used for INSERT statements, for a dialect which does
            not support inline retrieval of newly generated primary key
            columns, will force the expression used to create the new primary
            key value to be rendered inline within the INSERT statement's
            VALUES clause. This typically refers to Sequence execution but may
            also refer to any server-side default generation function
            associated with a primary key `Column`.
        """
        if not dialect:
            if bind:
                dialect = bind.dialect
            elif self.bind:
                dialect = self.bind.dialect
                bind = self.bind
            else:
                dialect = default.DefaultDialect()
        return self._compiler(dialect, bind=bind, **kw)
    def _compiler(self, dialect, **kw):
        """Return a compiler appropriate for this ClauseElement, given a
        Dialect."""
        return dialect.statement_compiler(dialect, self, **kw)
    def __str__(self):
        if util.py3k:
            return str(self.compile())
        else:
            return unicode(self.compile()).encode('ascii', 'backslashreplace')
    def __and__(self, other):
        return and_(self, other)
    def __or__(self, other):
        return or_(self, other)
    def __invert__(self):
        return self._negate()
    def __bool__(self):
        raise TypeError("Boolean value of this clause is not defined")
    __nonzero__ = __bool__
    def _negate(self):
        if hasattr(self, 'negation_clause'):
            return self.negation_clause
        else:
            return UnaryExpression(
                        self.self_group(against=operators.inv),
                        operator=operators.inv,
                        negate=None)
    def __repr__(self):
        friendly = getattr(self, 'description', None)
        if friendly is None:
            return object.__repr__(self)
        else:
            return '<%s.%s at 0x%x; %s>' % (
                self.__module__, self.__class__.__name__, id(self), friendly)
class ColumnElement(ClauseElement, operators.ColumnOperators):
    """Represent a column-oriented SQL expression suitable for usage in the
    "columns" clause, WHERE clause etc. of a statement.
    While the most familiar kind of :class:`.ColumnElement` is the
    :class:`.Column` object, :class:`.ColumnElement` serves as the basis
    for any unit that may be present in a SQL expression, including
    the expressions themselves, SQL functions, bound parameters,
    literal expressions, keywords such as ``NULL``, etc.
    :class:`.ColumnElement` is the ultimate base class for all such elements.
    A :class:`.ColumnElement` provides the ability to generate new
    :class:`.ColumnElement`
    objects using Python expressions.  This means that Python operators
    such as ``==``, ``!=`` and ``<`` are overloaded to mimic SQL operations,
    and allow the instantiation of further :class:`.ColumnElement` instances
    which are composed from other, more fundamental :class:`.ColumnElement`
    objects.  For example, two :class:`.ColumnClause` objects can be added
    together with the addition operator ``+`` to produce
    a :class:`.BinaryExpression`.
    Both :class:`.ColumnClause` and :class:`.BinaryExpression` are subclasses
    of :class:`.ColumnElement`::
        >>> from sqlalchemy.sql import column
        >>> column('a') + column('b')
        <sqlalchemy.sql.expression.BinaryExpression object at 0x101029dd0>
        >>> print column('a') + column('b')
        a + b
    :class:`.ColumnElement` supports the ability to be a *proxy* element,
    which indicates that the :class:`.ColumnElement` may be associated with
    a :class:`.Selectable` which was derived from another :class:`.Selectable`.
    An example of a "derived" :class:`.Selectable` is an :class:`.Alias` of a
    :class:`~sqlalchemy.schema.Table`.  For the ambitious, an in-depth
    discussion of this concept can be found at
    `Expression Transformations <http://techspot.zzzeek.org/2008/01/23/expression-transformations/>`_.
    """
    __visit_name__ = 'column'
    primary_key = False
    foreign_keys = []
    _label = None
    _key_label = None
    _alt_names = ()
    @util.memoized_property
    def type(self):
        return type_api.NULLTYPE
    @util.memoized_property
    def comparator(self):
        return self.type.comparator_factory(self)
    def __getattr__(self, key):
        try:
            return getattr(self.comparator, key)
        except AttributeError:
            raise AttributeError(
                    'Neither %r object nor %r object has an attribute %r' % (
                    type(self).__name__,
                    type(self.comparator).__name__,
                    key)
            )
    def operate(self, op, *other, **kwargs):
        return op(self.comparator, *other, **kwargs)
    def reverse_operate(self, op, other, **kwargs):
        return op(other, self.comparator, **kwargs)
    def _bind_param(self, operator, obj):
        return BindParameter(None, obj,
                                    _compared_to_operator=operator,
                                    _compared_to_type=self.type, unique=True)
    @property
    def expression(self):
        """Return a column expression.
        Part of the inspection interface; returns self.
        """
        return self
    @property
    def _select_iterable(self):
        return (self, )
    @util.memoized_property
    def base_columns(self):
        return util.column_set(c for c in self.proxy_set
                                     if not hasattr(c, '_proxies'))
    @util.memoized_property
    def proxy_set(self):
        s = util.column_set([self])
        if hasattr(self, '_proxies'):
            for c in self._proxies:
                s.update(c.proxy_set)
        return s
    def shares_lineage(self, othercolumn):
        """Return True if the given :class:`.ColumnElement`
        has a common ancestor to this :class:`.ColumnElement`."""
        return bool(self.proxy_set.intersection(othercolumn.proxy_set))
    def _compare_name_for_result(self, other):
        """Return True if the given column element compares to this one
        when targeting within a result row."""
        return hasattr(other, 'name') and hasattr(self, 'name') and \
                other.name == self.name
    def _make_proxy(self, selectable, name=None, name_is_truncatable=False, **kw):
        """Create a new :class:`.ColumnElement` representing this
        :class:`.ColumnElement` as it appears in the select list of a
        descending selectable.
        """
        if name is None:
            name = self.anon_label
            try:
                key = str(self)
            except exc.UnsupportedCompilationError:
                key = self.anon_label
        else:
            key = name
        co = ColumnClause(
                _as_truncated(name) if name_is_truncatable else name,
                type_=getattr(self, 'type', None),
                _selectable=selectable
            )
        co._proxies = [self]
        if selectable._is_clone_of is not None:
            co._is_clone_of = \
                selectable._is_clone_of.columns.get(key)
        selectable._columns[key] = co
        return co
    def compare(self, other, use_proxies=False, equivalents=None, **kw):
        """Compare this ColumnElement to another.
        Special arguments understood:
        :param use_proxies: when True, consider two columns that
          share a common base column as equivalent (i.e. shares_lineage())
        :param equivalents: a dictionary of columns as keys mapped to sets
          of columns. If the given "other" column is present in this
          dictionary, if any of the columns in the corresponding set() pass the
          comparison test, the result is True. This is used to expand the
          comparison to other columns that may be known to be equivalent to
          this one via foreign key or other criterion.
        """
        to_compare = (other, )
        if equivalents and other in equivalents:
            to_compare = equivalents[other].union(to_compare)
        for oth in to_compare:
            if use_proxies and self.shares_lineage(oth):
                return True
            elif hash(oth) == hash(self):
                return True
        else:
            return False
    def label(self, name):
        """Produce a column label, i.e. ``<columnname> AS <name>``.
        This is a shortcut to the :func:`~.expression.label` function.
        if 'name' is None, an anonymous label name will be generated.
        """
        return Label(name, self, self.type)
    @util.memoized_property
    def anon_label(self):
        """provides a constant 'anonymous label' for this ColumnElement.
        This is a label() expression which will be named at compile time.
        The same label() is returned each time anon_label is called so
        that expressions can reference anon_label multiple times, producing
        the same label name at compile time.
        the compiler uses this function automatically at compile time
        for expressions that are known to be 'unnamed' like binary
        expressions and function calls.
        """
        return _anonymous_label('%%(%d %s)s' % (id(self), getattr(self,
                                'name', 'anon')))
class BindParameter(ColumnElement):
    """Represent a bound parameter value.
    """
    __visit_name__ = 'bindparam'
    _is_crud = False
    def __init__(self, key, value=NO_ARG, type_=None,
                            unique=False, required=NO_ARG,
                            quote=None, callable_=None,
                            isoutparam=False,
                            _compared_to_operator=None,
                            _compared_to_type=None):
        """Construct a new :class:`.BindParameter`.
            :param key:
              the key for this bind param.  Will be used in the generated
              SQL statement for dialects that use named parameters.  This
              value may be modified when part of a compilation operation,
              if other :class:`BindParameter` objects exist with the same
              key, or if its length is too long and truncation is
              required.
            :param value:
              Initial value for this bind param.  This value may be
              overridden by the dictionary of parameters sent to statement
              compilation/execution.
              Defaults to ``None``, however if neither ``value`` nor
              ``callable`` are passed explicitly, the ``required`` flag will be
              set to ``True`` which has the effect of requiring a value be present
              when the statement is actually executed.
              .. versionchanged:: 0.8 The ``required`` flag is set to ``True``
                 automatically if ``value`` or ``callable`` is not passed.
            :param callable\_:
              A callable function that takes the place of "value".  The function
              will be called at statement execution time to determine the
              ultimate value.   Used for scenarios where the actual bind
              value cannot be determined at the point at which the clause
              construct is created, but embedded bind values are still desirable.
            :param type\_:
              A ``TypeEngine`` object that will be used to pre-process the
              value corresponding to this :class:`BindParameter` at
              execution time.
            :param unique:
              if True, the key name of this BindParamClause will be
              modified if another :class:`BindParameter` of the same name
              already has been located within the containing
              :class:`.ClauseElement`.
            :param required:
              If ``True``, a value is required at execution time.  If not passed,
              is set to ``True`` or ``False`` based on whether or not
              one of ``value`` or ``callable`` were passed..
              .. versionchanged:: 0.8 If the ``required`` flag is not specified,
                 it will be set automatically to ``True`` or ``False`` depending
                 on whether or not the ``value`` or ``callable`` parameters
                 were specified.
            :param quote:
              True if this parameter name requires quoting and is not
              currently known as a SQLAlchemy reserved word; this currently
              only applies to the Oracle backend.
            :param isoutparam:
              if True, the parameter should be treated like a stored procedure
              "OUT" parameter.
              .. seealso::
                :func:`.outparam`
        """
        if isinstance(key, ColumnClause):
            type_ = key.type
            key = key.name
        if required is NO_ARG:
            required = (value is NO_ARG and callable_ is None)
        if value is NO_ARG:
            value = None
        if quote is not None:
            key = quoted_name(key, quote)
        if unique:
            self.key = _anonymous_label('%%(%d %s)s' % (id(self), key
                    or 'param'))
        else:
            self.key = key or _anonymous_label('%%(%d param)s'
                    % id(self))
        # identifying key that won't change across
        # clones, used to identify the bind's logical
        # identity
        self._identifying_key = self.key
        # key that was passed in the first place, used to
        # generate new keys
        self._orig_key = key or 'param'
        self.unique = unique
        self.value = value
        self.callable = callable_
        self.isoutparam = isoutparam
        self.required = required
        if type_ is None:
            if _compared_to_type is not None:
                self.type = \
                    _compared_to_type.coerce_compared_value(
                        _compared_to_operator, value)
            else:
                self.type = type_api._type_map.get(type(value),
                        type_api.NULLTYPE)
        elif isinstance(type_, type):
            self.type = type_()
        else:
            self.type = type_
    @property
    def effective_value(self):
        """Return the value of this bound parameter,
        taking into account if the ``callable`` parameter
        was set.
        The ``callable`` value will be evaluated
        and returned if present, else ``value``.
        """
        if self.callable:
            return self.callable()
        else:
            return self.value
    def _clone(self):
        c = ClauseElement._clone(self)
        if self.unique:
            c.key = _anonymous_label('%%(%d %s)s' % (id(c), c._orig_key
                    or 'param'))
        return c
    def _convert_to_unique(self):
        if not self.unique:
            self.unique = True
            self.key = _anonymous_label('%%(%d %s)s' % (id(self),
                    self._orig_key or 'param'))
    def compare(self, other, **kw):
        """Compare this :class:`BindParameter` to the given
        clause."""
        return isinstance(other, BindParameter) \
            and self.type._compare_type_affinity(other.type) \
            and self.value == other.value
    def __getstate__(self):
        """execute a deferred value for serialization purposes."""
        d = self.__dict__.copy()
        v = self.value
        if self.callable:
            v = self.callable()
            d['callable'] = None
        d['value'] = v
        return d
    def __repr__(self):
        return 'BindParameter(%r, %r, type_=%r)' % (self.key,
                self.value, self.type)
class TypeClause(ClauseElement):
    """Handle a type keyword in a SQL statement.
    Used by the ``Case`` statement.
    """
    __visit_name__ = 'typeclause'
    def __init__(self, type):
        self.type = type
class TextClause(Executable, ClauseElement):
    """Represent a literal SQL text fragment.
    Public constructor is the :func:`text()` function.
    """
    __visit_name__ = 'textclause'
    _bind_params_regex = re.compile(r'(?<![:\w\x5c]):(\w+)(?!:)', re.UNICODE)
    _execution_options = \
        Executable._execution_options.union(
            {'autocommit': PARSE_AUTOCOMMIT})
    @property
    def _select_iterable(self):
        return (self,)
    @property
    def selectable(self):
        return self
    _hide_froms = []
    def __init__(
                    self,
                    text='',
                    bind=None,
                    bindparams=None,
                    typemap=None,
                    autocommit=None):
        """Construct a new :class:`.TextClause` clause.
        E.g.::
            fom sqlalchemy import text
            t = text("SELECT * FROM users")
            result = connection.execute(t)
        The advantages :func:`.text` provides over a plain string are
        backend-neutral support for bind parameters, per-statement
        execution options, as well as
        bind parameter and result-column typing behavior, allowing
        SQLAlchemy type constructs to play a role when executing
        a statement that is specified literally.
        Bind parameters are specified by name, using the format ``:name``.
        E.g.::
            t = text("SELECT * FROM users WHERE id=:user_id")
            result = connection.execute(t, user_id=12)
        To invoke SQLAlchemy typing logic for bind parameters, the
        ``bindparams`` list allows specification of :func:`bindparam`
        constructs which specify the type for a given name::
            t = text("SELECT id FROM users WHERE updated_at>:updated",
                        bindparams=[bindparam('updated', DateTime())]
                    )
        Typing during result row processing is also an important concern.
        Result column types
        are specified using the ``typemap`` dictionary, where the keys
        match the names of columns.  These names are taken from what
        the DBAPI returns as ``cursor.description``::
            t = text("SELECT id, name FROM users",
                    typemap={
                        'id':Integer,
                        'name':Unicode
                    }
            )
        The :func:`text` construct is used internally for most cases when
        a literal string is specified for part of a larger query, such as
        within :func:`select()`, :func:`update()`,
        :func:`insert()` or :func:`delete()`.   In those cases, the same
        bind parameter syntax is applied::
            s = select([users.c.id, users.c.name]).where("id=:user_id")
            result = connection.execute(s, user_id=12)
        Using :func:`text` explicitly usually implies the construction
        of a full, standalone statement.   As such, SQLAlchemy refers
        to it as an :class:`.Executable` object, and it supports
        the :meth:`Executable.execution_options` method.  For example,
        a :func:`text` construct that should be subject to "autocommit"
        can be set explicitly so using the ``autocommit`` option::
            t = text("EXEC my_procedural_thing()").\\
                    execution_options(autocommit=True)
        Note that SQLAlchemy's usual "autocommit" behavior applies to
        :func:`text` constructs - that is, statements which begin
        with a phrase such as ``INSERT``, ``UPDATE``, ``DELETE``,
        or a variety of other phrases specific to certain backends, will
        be eligible for autocommit if no transaction is in progress.
        :param text:
          the text of the SQL statement to be created.  use ``:<param>``
          to specify bind parameters; they will be compiled to their
          engine-specific format.
        :param autocommit:
          Deprecated.  Use .execution_options(autocommit=<True|False>)
          to set the autocommit option.
        :param bind:
          an optional connection or engine to be used for this text query.
        :param bindparams:
          a list of :func:`bindparam()` instances which can be used to define
          the types and/or initial values for the bind parameters within
          the textual statement; the keynames of the bindparams must match
          those within the text of the statement.  The types will be used
          for pre-processing on bind values.
        :param typemap:
          a dictionary mapping the names of columns represented in the
          columns clause of a ``SELECT`` statement  to type objects,
          which will be used to perform post-processing on columns within
          the result set.   This argument applies to any expression
          that returns result sets.
        """
        self._bind = bind
        self.bindparams = {}
        self.typemap = typemap
        if autocommit is not None:
            util.warn_deprecated('autocommit on text() is deprecated.  '
                                 'Use .execution_options(autocommit=Tru'
                                 'e)')
            self._execution_options = \
                self._execution_options.union(
                    {'autocommit': autocommit})
        if typemap is not None:
            for key in typemap:
                typemap[key] = type_api.to_instance(typemap[key])
        def repl(m):
            self.bindparams[m.group(1)] = BindParameter(m.group(1))
            return ':%s' % m.group(1)
        # scan the string and search for bind parameter names, add them
        # to the list of bindparams
        self.text = self._bind_params_regex.sub(repl, text)
        if bindparams is not None:
            for b in bindparams:
                self.bindparams[b.key] = b
    @property
    def type(self):
        if self.typemap is not None and len(self.typemap) == 1:
            return list(self.typemap)[0]
        else:
            return type_api.NULLTYPE
    @property
    def comparator(self):
        return self.type.comparator_factory(self)
    def self_group(self, against=None):
        if against is operators.in_op:
            return Grouping(self)
        else:
            return self
    def _copy_internals(self, clone=_clone, **kw):
        self.bindparams = dict((b.key, clone(b, **kw))
                               for b in self.bindparams.values())
    def get_children(self, **kwargs):
        return list(self.bindparams.values())
class Null(ColumnElement):
    """Represent the NULL keyword in a SQL statement.
    """
    __visit_name__ = 'null'
    def __init__(self):
        """Return a :class:`Null` object, which compiles to ``NULL``.
        """
        self.type = type_api.NULLTYPE
    def compare(self, other):
        return isinstance(other, Null)
class False_(ColumnElement):
    """Represent the ``false`` keyword in a SQL statement.
    """
    __visit_name__ = 'false'
    def __init__(self):
        """Return a :class:`False_` object.
        """
        self.type = type_api.BOOLEANTYPE
    def compare(self, other):
        return isinstance(other, False_)
class True_(ColumnElement):
    """Represent the ``true`` keyword in a SQL statement.
    """
    __visit_name__ = 'true'
    def __init__(self):
        """Return a :class:`True_` object.
        """
        self.type = type_api.BOOLEANTYPE
    def compare(self, other):
        return isinstance(other, True_)
class ClauseList(ClauseElement):
    """Describe a list of clauses, separated by an operator.
    By default, is comma-separated, such as a column listing.
    """
    __visit_name__ = 'clauselist'
    def __init__(self, *clauses, **kwargs):
        self.operator = kwargs.pop('operator', operators.comma_op)
        self.group = kwargs.pop('group', True)
        self.group_contents = kwargs.pop('group_contents', True)
        if self.group_contents:
            self.clauses = [
                _literal_as_text(clause).self_group(against=self.operator)
                for clause in clauses if clause is not None]
        else:
            self.clauses = [
                _literal_as_text(clause)
                for clause in clauses if clause is not None]
    def __iter__(self):
        return iter(self.clauses)
    def __len__(self):
        return len(self.clauses)
    @property
    def _select_iterable(self):
        return iter(self)
    def append(self, clause):
        # TODO: not sure if i like the 'group_contents' flag.  need to
        # define the difference between a ClauseList of ClauseLists,
        # and a "flattened" ClauseList of ClauseLists.  flatten()
        # method ?
        if self.group_contents:
            self.clauses.append(_literal_as_text(clause).\
                                self_group(against=self.operator))
        else:
            self.clauses.append(_literal_as_text(clause))
    def _copy_internals(self, clone=_clone, **kw):
        self.clauses = [clone(clause, **kw) for clause in self.clauses]
    def get_children(self, **kwargs):
        return self.clauses
    @property
    def _from_objects(self):
        return list(itertools.chain(*[c._from_objects for c in self.clauses]))
    def self_group(self, against=None):
        if self.group and operators.is_precedent(self.operator, against):
            return Grouping(self)
        else:
            return self
    def compare(self, other, **kw):
        """Compare this :class:`.ClauseList` to the given :class:`.ClauseList`,
        including a comparison of all the clause items.
        """
        if not isinstance(other, ClauseList) and len(self.clauses) == 1:
            return self.clauses[0].compare(other, **kw)
        elif isinstance(other, ClauseList) and \
                len(self.clauses) == len(other.clauses):
            for i in range(0, len(self.clauses)):
                if not self.clauses[i].compare(other.clauses[i], **kw):
                    return False
            else:
                return self.operator == other.operator
        else:
            return False
class BooleanClauseList(ClauseList, ColumnElement):
    __visit_name__ = 'clauselist'
    def __init__(self, *clauses, **kwargs):
        super(BooleanClauseList, self).__init__(*clauses, **kwargs)
        self.type = type_api.to_instance(kwargs.get('type_',
                type_api.BOOLEANTYPE))
    @property
    def _select_iterable(self):
        return (self, )
    def self_group(self, against=None):
        if not self.clauses:
            return self
        else:
            return super(BooleanClauseList, self).self_group(against=against)
class Tuple(ClauseList, ColumnElement):
    """Represent a SQL tuple."""
    def __init__(self, *clauses, **kw):
        """Return a :class:`.Tuple`.
        Main usage is to produce a composite IN construct::
            from sqlalchemy import tuple_
            tuple_(table.c.col1, table.c.col2).in_(
                [(1, 2), (5, 12), (10, 19)]
            )
        .. warning::
            The composite IN construct is not supported by all backends,
            and is currently known to work on Postgresql and MySQL,
            but not SQLite.   Unsupported backends will raise
            a subclass of :class:`~sqlalchemy.exc.DBAPIError` when such
            an expression is invoked.
        """
        clauses = [_literal_as_binds(c) for c in clauses]
        self.type = kw.pop('type_', None)
        if self.type is None:
            self.type = _type_from_args(clauses)
        super(Tuple, self).__init__(*clauses, **kw)
    @property
    def _select_iterable(self):
        return (self, )
    def _bind_param(self, operator, obj):
        return Tuple(*[
            BindParameter(None, o, _compared_to_operator=operator,
                             _compared_to_type=self.type, unique=True)
            for o in obj
        ]).self_group()
class Case(ColumnElement):
    """Represent a SQL ``CASE`` construct.
    """
    __visit_name__ = 'case'
    def __init__(self, whens, value=None, else_=None):
        """Produce a :class:`.Case` object.
        :param whens: A sequence of pairs, or alternatively a dict,
          to be translated into "WHEN / THEN" clauses.
        :param value: Optional for simple case statements, produces
          a column expression as in "CASE <expr> WHEN ..."
        :param else\_: Optional as well, for case defaults produces
          the "ELSE" portion of the "CASE" statement.
        The expressions used for THEN and ELSE,
        when specified as strings, will be interpreted
        as bound values. To specify textual SQL expressions
        for these, use the :func:`literal_column`
        construct.
        The expressions used for the WHEN criterion
        may only be literal strings when "value" is
        present, i.e. CASE table.somecol WHEN "x" THEN "y".
        Otherwise, literal strings are not accepted
        in this position, and either the text(<string>)
        or literal(<string>) constructs must be used to
        interpret raw string values.
        Usage examples::
          case([(orderline.c.qty > 100, item.c.specialprice),
                (orderline.c.qty > 10, item.c.bulkprice)
              ], else_=item.c.regularprice)
          case(value=emp.c.type, whens={
                  'engineer': emp.c.salary * 1.1,
                  'manager':  emp.c.salary * 3,
              })
        Using :func:`.literal_column()`, to allow for databases that
        do not support bind parameters in the ``then`` clause.  The type
        can be specified which determines the type of the :func:`case()` construct
        overall::
            case([(orderline.c.qty > 100,
                    literal_column("'greaterthan100'", String)),
                  (orderline.c.qty > 10, literal_column("'greaterthan10'",
                    String))
                ], else_=literal_column("'lethan10'", String))
        """
        try:
            whens = util.dictlike_iteritems(whens)
        except TypeError:
            pass
        if value is not None:
            whenlist = [
                (_literal_as_binds(c).self_group(),
                _literal_as_binds(r)) for (c, r) in whens
            ]
        else:
            whenlist = [
                (_no_literals(c).self_group(),
                _literal_as_binds(r)) for (c, r) in whens
            ]
        if whenlist:
            type_ = list(whenlist[-1])[-1].type
        else:
            type_ = None
        if value is None:
            self.value = None
        else:
            self.value = _literal_as_binds(value)
        self.type = type_
        self.whens = whenlist
        if else_ is not None:
            self.else_ = _literal_as_binds(else_)
        else:
            self.else_ = None
    def _copy_internals(self, clone=_clone, **kw):
        if self.value is not None:
            self.value = clone(self.value, **kw)
        self.whens = [(clone(x, **kw), clone(y, **kw))
                            for x, y in self.whens]
        if self.else_ is not None:
            self.else_ = clone(self.else_, **kw)
    def get_children(self, **kwargs):
        if self.value is not None:
            yield self.value
        for x, y in self.whens:
            yield x
            yield y
        if self.else_ is not None:
            yield self.else_
    @property
    def _from_objects(self):
        return list(itertools.chain(*[x._from_objects for x in
                    self.get_children()]))
def literal_column(text, type_=None):
    """Return a textual column expression, as would be in the columns
    clause of a ``SELECT`` statement.
    The object returned supports further expressions in the same way as any
    other column object, including comparison, math and string operations.
    The type\_ parameter is important to determine proper expression behavior
    (such as, '+' means string concatenation or numerical addition based on
    the type).
    :param text: the text of the expression; can be any SQL expression.
      Quoting rules will not be applied. To specify a column-name expression
      which should be subject to quoting rules, use the :func:`column`
      function.
    :param type\_: an optional :class:`~sqlalchemy.types.TypeEngine`
      object which will
      provide result-set translation and additional expression semantics for
      this column. If left as None the type will be NullType.
    """
    return ColumnClause(text, type_=type_, is_literal=True)
class Cast(ColumnElement):
    """Represent the SQL ``CAST`` construct."""
    __visit_name__ = 'cast'
    def __init__(self, clause, totype, **kwargs):
        """Return a :class:`.Cast` object.
        Equivalent of SQL ``CAST(clause AS totype)``.
        Use with a :class:`~sqlalchemy.types.TypeEngine` subclass, i.e::
          cast(table.c.unit_price * table.c.qty, Numeric(10,4))
        or::
          cast(table.c.timestamp, DATE)
        :class:`.Cast` is available using :func:`.cast` or alternatively
        ``func.cast`` from the :data:`.func` namespace.
        """
        self.type = type_api.to_instance(totype)
        self.clause = _literal_as_binds(clause, None)
        self.typeclause = TypeClause(self.type)
    def _copy_internals(self, clone=_clone, **kw):
        self.clause = clone(self.clause, **kw)
        self.typeclause = clone(self.typeclause, **kw)
    def get_children(self, **kwargs):
        return self.clause, self.typeclause
    @property
    def _from_objects(self):
        return self.clause._from_objects
class Extract(ColumnElement):
    """Represent a SQL EXTRACT clause, ``extract(field FROM expr)``."""
    __visit_name__ = 'extract'
    def __init__(self, field, expr, **kwargs):
        """Return a :class:`.Extract` construct.
        This is typically available as :func:`.extract`
        as well as ``func.extract`` from the
        :data:`.func` namespace.
        """
        self.type = type_api.INTEGERTYPE
        self.field = field
        self.expr = _literal_as_binds(expr, None)
    def _copy_internals(self, clone=_clone, **kw):
        self.expr = clone(self.expr, **kw)
    def get_children(self, **kwargs):
        return self.expr,
    @property
    def _from_objects(self):
        return self.expr._from_objects
class UnaryExpression(ColumnElement):
    """Define a 'unary' expression.
    A unary expression has a single column expression
    and an operator.  The operator can be placed on the left
    (where it is called the 'operator') or right (where it is called the
    'modifier') of the column expression.
    """
    __visit_name__ = 'unary'
    def __init__(self, element, operator=None, modifier=None,
                            type_=None, negate=None):
        self.operator = operator
        self.modifier = modifier
        self.element = _literal_as_text(element).\
                    self_group(against=self.operator or self.modifier)
        self.type = type_api.to_instance(type_)
        self.negate = negate
    @classmethod
    def _create_nullsfirst(cls, column):
        """Return a NULLS FIRST ``ORDER BY`` clause element.
        e.g.::
          someselect.order_by(desc(table1.mycol).nullsfirst())
        produces::
          ORDER BY mycol DESC NULLS FIRST
        """
        return UnaryExpression(column, modifier=operators.nullsfirst_op)
    @classmethod
    def _create_nullslast(cls, column):
        """Return a NULLS LAST ``ORDER BY`` clause element.
        e.g.::
          someselect.order_by(desc(table1.mycol).nullslast())
        produces::
            ORDER BY mycol DESC NULLS LAST
        """
        return UnaryExpression(column, modifier=operators.nullslast_op)
    @classmethod
    def _create_desc(cls, column):
        """Return a descending ``ORDER BY`` clause element.
        e.g.::
          someselect.order_by(desc(table1.mycol))
        produces::
            ORDER BY mycol DESC
        """
        return UnaryExpression(column, modifier=operators.desc_op)
    @classmethod
    def _create_asc(cls, column):
        """Return an ascending ``ORDER BY`` clause element.
        e.g.::
          someselect.order_by(asc(table1.mycol))
        produces::
          ORDER BY mycol ASC
        """
        return UnaryExpression(column, modifier=operators.asc_op)
    @classmethod
    def _create_distinct(cls, expr):
        """Return a ``DISTINCT`` clause.
        e.g.::
            distinct(a)
        renders::
            DISTINCT a
        """
        expr = _literal_as_binds(expr)
        return UnaryExpression(expr,
                    operator=operators.distinct_op, type_=expr.type)
    @util.memoized_property
    def _order_by_label_element(self):
        if self.modifier in (operators.desc_op, operators.asc_op):
            return self.element._order_by_label_element
        else:
            return None
    @property
    def _from_objects(self):
        return self.element._from_objects
    def _copy_internals(self, clone=_clone, **kw):
        self.element = clone(self.element, **kw)
    def get_children(self, **kwargs):
        return self.element,
    def compare(self, other, **kw):
        """Compare this :class:`UnaryExpression` against the given
        :class:`.ClauseElement`."""
        return (
            isinstance(other, UnaryExpression) and
            self.operator == other.operator and
            self.modifier == other.modifier and
            self.element.compare(other.element, **kw)
        )
    def _negate(self):
        if self.negate is not None:
            return UnaryExpression(
                self.element,
                operator=self.negate,
                negate=self.operator,
                modifier=self.modifier,
                type_=self.type)
        else:
            return super(UnaryExpression, self)._negate()
    def self_group(self, against=None):
        if self.operator and operators.is_precedent(self.operator,
                against):
            return Grouping(self)
        else:
            return self
class BinaryExpression(ColumnElement):
    """Represent an expression that is ``LEFT <operator> RIGHT``.
    A :class:`.BinaryExpression` is generated automatically
    whenever two column expressions are used in a Python binary expresion::
        >>> from sqlalchemy.sql import column
        >>> column('a') + column('b')
        <sqlalchemy.sql.expression.BinaryExpression object at 0x101029dd0>
        >>> print column('a') + column('b')
        a + b
    """
    __visit_name__ = 'binary'
    def __init__(self, left, right, operator, type_=None,
                    negate=None, modifiers=None):
        # allow compatibility with libraries that
        # refer to BinaryExpression directly and pass strings
        if isinstance(operator, util.string_types):
            operator = operators.custom_op(operator)
        self._orig = (left, right)
        self.left = _literal_as_text(left).self_group(against=operator)
        self.right = _literal_as_text(right).self_group(against=operator)
        self.operator = operator
        self.type = type_api.to_instance(type_)
        self.negate = negate
        if modifiers is None:
            self.modifiers = {}
        else:
            self.modifiers = modifiers
    def __bool__(self):
        if self.operator in (operator.eq, operator.ne):
            return self.operator(hash(self._orig[0]), hash(self._orig[1]))
        else:
            raise TypeError("Boolean value of this clause is not defined")
    __nonzero__ = __bool__
    @property
    def is_comparison(self):
        return operators.is_comparison(self.operator)
    @property
    def _from_objects(self):
        return self.left._from_objects + self.right._from_objects
    def _copy_internals(self, clone=_clone, **kw):
        self.left = clone(self.left, **kw)
        self.right = clone(self.right, **kw)
    def get_children(self, **kwargs):
        return self.left, self.right
    def compare(self, other, **kw):
        """Compare this :class:`BinaryExpression` against the
        given :class:`BinaryExpression`."""
        return (
            isinstance(other, BinaryExpression) and
            self.operator == other.operator and
            (
                self.left.compare(other.left, **kw) and
                self.right.compare(other.right, **kw) or
                (
                    operators.is_commutative(self.operator) and
                    self.left.compare(other.right, **kw) and
                    self.right.compare(other.left, **kw)
                )
            )
        )
    def self_group(self, against=None):
        if operators.is_precedent(self.operator, against):
            return Grouping(self)
        else:
            return self
    def _negate(self):
        if self.negate is not None:
            return BinaryExpression(
                self.left,
                self.right,
                self.negate,
                negate=self.operator,
                type_=type_api.BOOLEANTYPE,
                modifiers=self.modifiers)
        else:
            return super(BinaryExpression, self)._negate()
class Grouping(ColumnElement):
    """Represent a grouping within a column expression"""
    __visit_name__ = 'grouping'
    def __init__(self, element):
        self.element = element
        self.type = getattr(element, 'type', type_api.NULLTYPE)
    @property
    def _label(self):
        return getattr(self.element, '_label', None) or self.anon_label
    def _copy_internals(self, clone=_clone, **kw):
        self.element = clone(self.element, **kw)
    def get_children(self, **kwargs):
        return self.element,
    @property
    def _from_objects(self):
        return self.element._from_objects
    def __getattr__(self, attr):
        return getattr(self.element, attr)
    def __getstate__(self):
        return {'element': self.element, 'type': self.type}
    def __setstate__(self, state):
        self.element = state['element']
        self.type = state['type']
    def compare(self, other, **kw):
        return isinstance(other, Grouping) and \
            self.element.compare(other.element)
class Over(ColumnElement):
    """Represent an OVER clause.
    This is a special operator against a so-called
    "window" function, as well as any aggregate function,
    which produces results relative to the result set
    itself.  It's supported only by certain database
    backends.
    """
    __visit_name__ = 'over'
    order_by = None
    partition_by = None
    def __init__(self, func, partition_by=None, order_by=None):
        """Produce an :class:`.Over` object against a function.
        Used against aggregate or so-called "window" functions,
        for database backends that support window functions.
        E.g.::
            from sqlalchemy import over
            over(func.row_number(), order_by='x')
        Would produce "ROW_NUMBER() OVER(ORDER BY x)".
        :param func: a :class:`.FunctionElement` construct, typically
         generated by :data:`~.expression.func`.
        :param partition_by: a column element or string, or a list
         of such, that will be used as the PARTITION BY clause
         of the OVER construct.
        :param order_by: a column element or string, or a list
         of such, that will be used as the ORDER BY clause
         of the OVER construct.
        This function is also available from the :data:`~.expression.func`
        construct itself via the :meth:`.FunctionElement.over` method.
        .. versionadded:: 0.7
        """
        self.func = func
        if order_by is not None:
            self.order_by = ClauseList(*util.to_list(order_by))
        if partition_by is not None:
            self.partition_by = ClauseList(*util.to_list(partition_by))
    @util.memoized_property
    def type(self):
        return self.func.type
    def get_children(self, **kwargs):
        return [c for c in
                (self.func, self.partition_by, self.order_by)
                if c is not None]
    def _copy_internals(self, clone=_clone, **kw):
        self.func = clone(self.func, **kw)
        if self.partition_by is not None:
            self.partition_by = clone(self.partition_by, **kw)
        if self.order_by is not None:
            self.order_by = clone(self.order_by, **kw)
    @property
    def _from_objects(self):
        return list(itertools.chain(
            *[c._from_objects for c in
                (self.func, self.partition_by, self.order_by)
            if c is not None]
        ))
class Label(ColumnElement):
    """Represents a column label (AS).
    Represent a label, as typically applied to any column-level
    element using the ``AS`` sql keyword.
    """
    __visit_name__ = 'label'
    def __init__(self, name, element, type_=None):
        """Return a :class:`Label` object for the
        given :class:`.ColumnElement`.
        A label changes the name of an element in the columns clause of a
        ``SELECT`` statement, typically via the ``AS`` SQL keyword.
        This functionality is more conveniently available via the
        :meth:`.ColumnElement.label` method on :class:`.ColumnElement`.
        :param name: label name
        :param obj: a :class:`.ColumnElement`.
        """
        while isinstance(element, Label):
            element = element.element
        if name:
            self.name = name
        else:
            self.name = _anonymous_label('%%(%d %s)s' % (id(self),
                                getattr(element, 'name', 'anon')))
        self.key = self._label = self._key_label = self.name
        self._element = element
        self._type = type_
        self._proxies = [element]
    @util.memoized_property
    def _order_by_label_element(self):
        return self
    @util.memoized_property
    def type(self):
        return type_api.to_instance(
                    self._type or getattr(self._element, 'type', None)
                )
    @util.memoized_property
    def element(self):
        return self._element.self_group(against=operators.as_)
    def self_group(self, against=None):
        sub_element = self._element.self_group(against=against)
        if sub_element is not self._element:
            return Label(self.name,
                        sub_element,
                        type_=self._type)
        else:
            return self
    @property
    def primary_key(self):
        return self.element.primary_key
    @property
    def foreign_keys(self):
        return self.element.foreign_keys
    def get_children(self, **kwargs):
        return self.element,
    def _copy_internals(self, clone=_clone, **kw):
        self.element = clone(self.element, **kw)
    @property
    def _from_objects(self):
        return self.element._from_objects
    def _make_proxy(self, selectable, name=None, **kw):
        e = self.element._make_proxy(selectable,
                                name=name if name else self.name)
        e._proxies.append(self)
        if self._type is not None:
            e.type = self._type
        return e
class ColumnClause(Immutable, ColumnElement):
    """Represents a generic column expression from any textual string.
    This includes columns associated with tables, aliases and select
    statements, but also any arbitrary text.  May or may not be bound
    to an underlying :class:`.Selectable`.
    :class:`.ColumnClause` is constructed by itself typically via
    the :func:`~.expression.column` function.  It may be placed directly
    into constructs such as :func:`.select` constructs::
        from sqlalchemy.sql import column, select
        c1, c2 = column("c1"), column("c2")
        s = select([c1, c2]).where(c1==5)
    There is also a variant on :func:`~.expression.column` known
    as :func:`~.expression.literal_column` - the difference is that
    in the latter case, the string value is assumed to be an exact
    expression, rather than a column name, so that no quoting rules
    or similar are applied::
        from sqlalchemy.sql import literal_column, select
        s = select([literal_column("5 + 7")])
    :class:`.ColumnClause` can also be used in a table-like
    fashion by combining the :func:`~.expression.column` function
    with the :func:`~.expression.table` function, to produce
    a "lightweight" form of table metadata::
        from sqlalchemy.sql import table, column
        user = table("user",
                column("id"),
                column("name"),
                column("description"),
        )
    The above construct can be created in an ad-hoc fashion and is
    not associated with any :class:`.schema.MetaData`, unlike it's
    more full fledged :class:`.schema.Table` counterpart.
    """
    __visit_name__ = 'column'
    onupdate = default = server_default = server_onupdate = None
    _memoized_property = util.group_expirable_memoized_property()
    def __init__(self, text, type_=None, is_literal=False, _selectable=None):
        """Construct a :class:`.ColumnClause` object.
        :param text: the text of the element.
        :param type: :class:`.types.TypeEngine` object which can associate
          this :class:`.ColumnClause` with a type.
        :param is_literal: if True, the :class:`.ColumnClause` is assumed to
          be an exact expression that will be delivered to the output with no
          quoting rules applied regardless of case sensitive settings. the
          :func:`literal_column()` function is usually used to create such a
          :class:`.ColumnClause`.
        :param text: the name of the column.  Quoting rules will be applied
          to the clause like any other column name. For textual column constructs
          that are not to be quoted, use the :func:`literal_column` function.
        :param type\_: an optional :class:`~sqlalchemy.types.TypeEngine` object
          which will provide result-set translation for this column.
        """
        self.key = self.name = text
        self.table = _selectable
        self.type = type_api.to_instance(type_)
        self.is_literal = is_literal
    def _compare_name_for_result(self, other):
        if self.is_literal or \
            self.table is None or \
            not hasattr(other, 'proxy_set') or (
            isinstance(other, ColumnClause) and other.is_literal
        ):
            return super(ColumnClause, self).\
                    _compare_name_for_result(other)
        else:
            return other.proxy_set.intersection(self.proxy_set)
    def _get_table(self):
        return self.__dict__['table']
    def _set_table(self, table):
        self._memoized_property.expire_instance(self)
        self.__dict__['table'] = table
    table = property(_get_table, _set_table)
    @_memoized_property
    def _from_objects(self):
        t = self.table
        if t is not None:
            return [t]
        else:
            return []
    @util.memoized_property
    def description(self):
        if util.py3k:
            return self.name
        else:
            return self.name.encode('ascii', 'backslashreplace')
    @_memoized_property
    def _key_label(self):
        if self.key != self.name:
            return self._gen_label(self.key)
        else:
            return self._label
    @_memoized_property
    def _label(self):
        return self._gen_label(self.name)
    def _gen_label(self, name):
        t = self.table
        if self.is_literal:
            return None
        elif t is not None and t.named_with_column:
            if getattr(t, 'schema', None):
                label = t.schema.replace('.', '_') + "_" + \
                            t.name + "_" + name
            else:
                label = t.name + "_" + name
            # propagate name quoting rules for labels.
            if getattr(name, "quote", None) is not None:
                label = quoted_name(label, name.quote)
            elif getattr(t.name, "quote", None) is not None:
                label = quoted_name(label, t.name.quote)
            # ensure the label name doesn't conflict with that
            # of an existing column
            if label in t.c:
                _label = label
                counter = 1
                while _label in t.c:
                    _label = label + "_" + str(counter)
                    counter += 1
                label = _label
            return _as_truncated(label)
        else:
            return name
    def _bind_param(self, operator, obj):
        return BindParameter(self.name, obj,
                                _compared_to_operator=operator,
                                _compared_to_type=self.type,
                                unique=True)
    def _make_proxy(self, selectable, name=None, attach=True,
                            name_is_truncatable=False, **kw):
        # propagate the "is_literal" flag only if we are keeping our name,
        # otherwise its considered to be a label
        is_literal = self.is_literal and (name is None or name == self.name)
        c = self._constructor(
                    _as_truncated(name or self.name) if \
                                    name_is_truncatable else \
                                    (name or self.name),
                    type_=self.type,
                    _selectable=selectable,
                    is_literal=is_literal
                )
        if name is None:
            c.key = self.key
        c._proxies = [self]
        if selectable._is_clone_of is not None:
            c._is_clone_of = \
                selectable._is_clone_of.columns.get(c.key)
        if attach:
            selectable._columns[c.key] = c
        return c
class _IdentifiedClause(Executable, ClauseElement):
    __visit_name__ = 'identified'
    _execution_options = \
        Executable._execution_options.union({'autocommit': False})
    def __init__(self, ident):
        self.ident = ident
class SavepointClause(_IdentifiedClause):
    __visit_name__ = 'savepoint'
class RollbackToSavepointClause(_IdentifiedClause):
    __visit_name__ = 'rollback_to_savepoint'
class ReleaseSavepointClause(_IdentifiedClause):
    __visit_name__ = 'release_savepoint'
class quoted_name(util.text_type):
    """Represent a SQL identifier combined with quoting preferences.
    :class:`.quoted_name` is a Python unicode/str subclass which
    represents a particular identifier name along with a
    ``quote`` flag.  This ``quote`` flag, when set to
    ``True`` or ``False``, overrides automatic quoting behavior
    for this identifier in order to either unconditionally quote
    or to not quote the name.  If left at its default of ``None``,
    quoting behavior is applied to the identifier on a per-backend basis
    based on an examination of the token itself.
    A :class:`.quoted_name` object with ``quote=True`` is also
    prevented from being modified in the case of a so-called
    "name normalize" option.  Certain database backends, such as
    Oracle, Firebird, and DB2 "normalize" case-insensitive names
    as uppercase.  The SQLAlchemy dialects for these backends
    convert from SQLAlchemy's lower-case-means-insensitive convention
    to the upper-case-means-insensitive conventions of those backends.
    The ``quote=True`` flag here will prevent this conversion from occurring
    to support an identifier that's quoted as all lower case against
    such a backend.
    The :class:`.quoted_name` object is normally created automatically
    when specifying the name for key schema constructs such as :class:`.Table`,
    :class:`.Column`, and others.   The class can also be passed explicitly
    as the name to any function that receives a name which can be quoted.
    Such as to use the :meth:`.Engine.has_table` method with an unconditionally
    quoted name::
        from sqlaclchemy import create_engine
        from sqlalchemy.sql.elements import quoted_name
        engine = create_engine("oracle+cx_oracle://some_dsn")
        engine.has_table(quoted_name("some_table", True))
    The above logic will run the "has table" logic against the Oracle backend,
    passing the name exactly as ``"some_table"`` without converting to
    upper case.
    .. versionadded:: 0.9.0
    """
    #def __new__(cls, value, quote, sprcls=False):
    def __new__(cls, value, quote):
        if value is None:
            return None
        # experimental - don't bother with quoted_name
        # if quote flag is None.  doesn't seem to make any dent
        # in performance however
        # elif not sprcls and quote is None:
        #   return value
        elif isinstance(value, cls) and (
                quote is None or value.quote == quote
            ):
            return value
        self = super(quoted_name, cls).__new__(cls, value)
        self.quote = quote
        return self
    def __reduce__(self):
        return quoted_name, (util.text_type(self), self.quote)
    @util.memoized_instancemethod
    def lower(self):
        if self.quote:
            return self
        else:
            return util.text_type(self).lower()
    @util.memoized_instancemethod
    def upper(self):
        if self.quote:
            return self
        else:
            return util.text_type(self).upper()
    def __repr__(self):
        return "'%s'" % self
class _truncated_label(quoted_name):
    """A unicode subclass used to identify symbolic "
    "names that may require truncation."""
    def __new__(cls, value, quote=None):
        quote = getattr(value, "quote", quote)
        #return super(_truncated_label, cls).__new__(cls, value, quote, True)
        return super(_truncated_label, cls).__new__(cls, value, quote)
    def __reduce__(self):
        return self.__class__, (util.text_type(self), self.quote)
    def apply_map(self, map_):
        return self
# for backwards compatibility in case
# someone is re-implementing the
# _truncated_identifier() sequence in a custom
# compiler
_generated_label = _truncated_label
class _anonymous_label(_truncated_label):
    """A unicode subclass used to identify anonymously
    generated names."""
    def __add__(self, other):
        return _anonymous_label(
                    quoted_name(
                        util.text_type.__add__(self, util.text_type(other)),
                        self.quote)
                )
    def __radd__(self, other):
        return _anonymous_label(
                    quoted_name(
                        util.text_type.__add__(util.text_type(other), self),
                        self.quote)
                    )
    def apply_map(self, map_):
        if self.quote is not None:
            # preserve quoting only if necessary
            return quoted_name(self % map_, self.quote)
        else:
            # else skip the constructor call
            return self % map_
def _as_truncated(value):
    """coerce the given value to :class:`._truncated_label`.
    Existing :class:`._truncated_label` and
    :class:`._anonymous_label` objects are passed
    unchanged.
    """
    if isinstance(value, _truncated_label):
        return value
    else:
        return _truncated_label(value)
def _string_or_unprintable(element):
    if isinstance(element, util.string_types):
        return element
    else:
        try:
            return str(element)
        except:
            return "unprintable element %r" % element
def _expand_cloned(elements):
    """expand the given set of ClauseElements to be the set of all 'cloned'
    predecessors.
    """
    return itertools.chain(*[x._cloned_set for x in elements])
def _select_iterables(elements):
    """expand tables into individual columns in the
    given list of column expressions.
    """
    return itertools.chain(*[c._select_iterable for c in elements])
def _cloned_intersection(a, b):
    """return the intersection of sets a and b, counting
    any overlap between 'cloned' predecessors.
    The returned set is in terms of the entities present within 'a'.
    """
    all_overlap = set(_expand_cloned(a)).intersection(_expand_cloned(b))
    return set(elem for elem in a
               if all_overlap.intersection(elem._cloned_set))
def _cloned_difference(a, b):
    all_overlap = set(_expand_cloned(a)).intersection(_expand_cloned(b))
    return set(elem for elem in a
                if not all_overlap.intersection(elem._cloned_set))
def _labeled(element):
    if not hasattr(element, 'name'):
        return element.label(None)
    else:
        return element
def _is_column(col):
    """True if ``col`` is an instance of :class:`.ColumnElement`."""
    return isinstance(col, ColumnElement)
def _find_columns(clause):
    """locate Column objects within the given expression."""
    cols = util.column_set()
    traverse(clause, {}, {'column': cols.add})
    return cols
# there is some inconsistency here between the usage of
# inspect() vs. checking for Visitable and __clause_element__.
# Ideally all functions here would derive from inspect(),
# however the inspect() versions add significant callcount
# overhead for critical functions like _interpret_as_column_or_from().
# Generally, the column-based functions are more performance critical
# and are fine just checking for __clause_element__().  it's only
# _interpret_as_from() where we'd like to be able to receive ORM entities
# that have no defined namespace, hence inspect() is needed there.
def _column_as_key(element):
    if isinstance(element, util.string_types):
        return element
    if hasattr(element, '__clause_element__'):
        element = element.__clause_element__()
    try:
        return element.key
    except AttributeError:
        return None
def _clause_element_as_expr(element):
    if hasattr(element, '__clause_element__'):
        return element.__clause_element__()
    else:
        return element
def _literal_as_text(element):
    if isinstance(element, Visitable):
        return element
    elif hasattr(element, '__clause_element__'):
        return element.__clause_element__()
    elif isinstance(element, util.string_types):
        return TextClause(util.text_type(element))
    elif isinstance(element, (util.NoneType, bool)):
        return _const_expr(element)
    else:
        raise exc.ArgumentError(
            "SQL expression object or string expected."
        )
def _no_literals(element):
    if hasattr(element, '__clause_element__'):
        return element.__clause_element__()
    elif not isinstance(element, Visitable):
        raise exc.ArgumentError("Ambiguous literal: %r.  Use the 'text()' "
                                "function to indicate a SQL expression "
                                "literal, or 'literal()' to indicate a "
                                "bound value." % element)
    else:
        return element
def _is_literal(element):
    return not isinstance(element, Visitable) and \
            not hasattr(element, '__clause_element__')
def _only_column_elements_or_none(element, name):
    if element is None:
        return None
    else:
        return _only_column_elements(element, name)
def _only_column_elements(element, name):
    if hasattr(element, '__clause_element__'):
        element = element.__clause_element__()
    if not isinstance(element, ColumnElement):
        raise exc.ArgumentError(
                "Column-based expression object expected for argument "
                "'%s'; got: '%s', type %s" % (name, element, type(element)))
    return element
def _literal_as_binds(element, name=None, type_=None):
    if hasattr(element, '__clause_element__'):
        return element.__clause_element__()
    elif not isinstance(element, Visitable):
        if element is None:
            return Null()
        else:
            return BindParameter(name, element, type_=type_, unique=True)
    else:
        return element
def _interpret_as_column_or_from(element):
    if isinstance(element, Visitable):
        return element
    elif hasattr(element, '__clause_element__'):
        return element.__clause_element__()
    insp = inspection.inspect(element, raiseerr=False)
    if insp is None:
        if isinstance(element, (util.NoneType, bool)):
            return _const_expr(element)
    elif hasattr(insp, "selectable"):
        return insp.selectable
    return ColumnClause(str(element), is_literal=True)
def _const_expr(element):
    if isinstance(element, (Null, False_, True_)):
        return element
    elif element is None:
        return Null()
    elif element is False:
        return False_()
    elif element is True:
        return True_()
    else:
        raise exc.ArgumentError(
            "Expected None, False, or True"
        )
def _type_from_args(args):
    for a in args:
        if not a.type._isnull:
            return a.type
    else:
        return type_api.NULLTYPE
def _corresponding_column_or_error(fromclause, column,
                                        require_embedded=False):
    c = fromclause.corresponding_column(column,
            require_embedded=require_embedded)
    if c is None:
        raise exc.InvalidRequestError(
                "Given column '%s', attached to table '%s', "
                "failed to locate a corresponding column from table '%s'"
                %
                (column,
                    getattr(column, 'table', None),
                    fromclause.description)
                )
    return c
class AnnotatedColumnElement(Annotated):
    def __init__(self, element, values):
        Annotated.__init__(self, element, values)
        ColumnElement.comparator._reset(self)
        for attr in ('name', 'key'):
            if self.__dict__.get(attr, False) is None:
                self.__dict__.pop(attr)
    def _with_annotations(self, values):
        clone = super(AnnotatedColumnElement, self)._with_annotations(values)
        ColumnElement.comparator._reset(clone)
        return clone
    @util.memoized_property
    def name(self):
        """pull 'name' from parent, if not present"""
        return self._Annotated__element.name
    @util.memoized_property
    def key(self):
        """pull 'key' from parent, if not present"""
        return self._Annotated__element.key
    @util.memoized_property
    def info(self):
        return self._Annotated__element.info
 | 
	alex/sqlalchemy | 
	lib/sqlalchemy/sql/elements.py | 
	Python | 
	mit | 80,420 | 
	[
  "VisIt"
] | 
	86bfc65f9d734ee04a7c2773fb927f89f54190ec9301257a444d40b24eadaa09 | 
| 
	from .base import *
class session(object):
    """
    cytoscape session interface as shown in CyREST's swagger documentation for 'session'.
    :param url: an url of the type 'http://' + host + ':' + str(port) + '/' + version + '/'.
    """
    def __init__(self, url):
        self.__url = url + 'commands/session'
        self.___url=url
    def new(self, verbose=False):
        """
        Destroys the current session and creates a new, empty one.
        :param wid: Window ID
        :param verbose: print more
        """
        response=api(url=self.__url+"/new", verbose=verbose)
        return response
    
    def open(self, session_file=None,session_url=None, verbose=False):
        """
        Opens a session from a local file or URL.
        :param session_file: The path to the session file (.cys) to be loaded.
        :param session_url: A URL that provides a session file.
        :param verbose: print more
        """
        PARAMS=set_param(["file", "url"],[session_file, session_url])
        response=api(url=self.__url+"/open", PARAMS=PARAMS, verbose=verbose)
        return response
    
    def save(self, session_file, verbose=False):
        """
        Saves the current session to an existing file, which will be replaced.
        If this is a new session that has not been saved yet, use 'save as'
        instead.
        :param session_file: The path to the file where the current session
        must be saved to.
        :param verbose: print more
        """
        PARAMS={"file":session_file}
        response=api(url=self.__url+"/save", PARAMS=PARAMS, verbose=verbose)
        return response
    
    def save_as(self, session_file, verbose=False):
        """
        Saves the current session as a new file.
        :param session_file: The path to the file where the current session
        must be saved to.
        :param verbose: print more
        """
        PARAMS={"file":session_file}
        response=api(url=self.__url+"/save as", PARAMS=PARAMS, verbose=verbose)
        return response
    def createSessionFile(self, file, verbose=None):
        """
        Saves the current session to a file. If successful, the session file location will be returned.
        :param file: Session file location as an absolute path
        :param verbose: print more
        :returns: 200: successful operation
        """
        PARAMS=set_param(['file'],[file])
        response=api(url=self.___url+'session', PARAMS=PARAMS, method="POST", verbose=verbose)
        return response
    def deleteSession(self, verbose=None):
        """
        This deletes the current session and initializes a new one. A message is returned to indicate the success of the deletion.
        :param verbose: print more
        :returns: 200: successful operation
        """
        response=api(url=self.___url+'session', method="DELETE", verbose=verbose)
        return response
    def getSessionFromFile(self, file, verbose=None):
        """
        Loads a session from a local file and returns the session file name
        :param file: Session file location as an absolute path
        :param verbose: print more
        :returns: 200: successful operation
        """
        response=api(url=self.___url+'session', PARAMS={'file':file}, method="GET", verbose=verbose, parse_params=False)
        return response
    def getSessionName(self, verbose=None):
        """
        Returns the file name for the current Cytoscape session.
        :param verbose: print more
        :returns: 200: successful operation
        """
        response=api(url=self.___url+'session/name', method="GET", verbose=verbose, parse_params=False)
        return response
    def runGarbageCollection(self, verbose=None):
        """
        Manually call Java's System.gc() to free up unused memory. This process happens automatically, but may be useful to call explicitly for testing or evaluation purposes.
        :param verbose: print more
        :returns: 204: Successful Garbage Collection
        """
        response=api(url=self.___url+'gc', method="GET", verbose=verbose, parse_params=False)
        return response | 
	idekerlab/py2cytoscape | 
	py2cytoscape/cyrest/session.py | 
	Python | 
	mit | 4,191 | 
	[
  "Cytoscape"
] | 
	fe26253e7102c00f30e59407705c422f04c4aea74d370ec2e61a6ff1b43b3e24 | 
| 
	#!/usr/bin/env python3
from abc import ABC, abstractproperty
import torch
from .. import settings
from ..distributions import Delta, MultivariateNormal
from ..module import Module
from ..utils.broadcasting import _mul_broadcast_shape
from ..utils.memoize import cached, clear_cache_hook
class _VariationalStrategy(Module, ABC):
    """
    Abstract base class for all Variational Strategies.
    """
    def __init__(self, model, inducing_points, variational_distribution, learn_inducing_locations=True):
        super().__init__()
        # Model
        object.__setattr__(self, "model", model)
        # Inducing points
        inducing_points = inducing_points.clone()
        if inducing_points.dim() == 1:
            inducing_points = inducing_points.unsqueeze(-1)
        if learn_inducing_locations:
            self.register_parameter(name="inducing_points", parameter=torch.nn.Parameter(inducing_points))
        else:
            self.register_buffer("inducing_points", inducing_points)
        # Variational distribution
        self._variational_distribution = variational_distribution
        self.register_buffer("variational_params_initialized", torch.tensor(0))
    def _expand_inputs(self, x, inducing_points):
        """
        Pre-processing step in __call__ to make x the same batch_shape as the inducing points
        """
        batch_shape = _mul_broadcast_shape(inducing_points.shape[:-2], x.shape[:-2])
        inducing_points = inducing_points.expand(*batch_shape, *inducing_points.shape[-2:])
        x = x.expand(*batch_shape, *x.shape[-2:])
        return x, inducing_points
    @abstractproperty
    @cached(name="prior_distribution_memo")
    def prior_distribution(self):
        r"""
        The :func:`~gpytorch.variational.VariationalStrategy.prior_distribution` method determines how to compute the
        GP prior distribution of the inducing points, e.g. :math:`p(u) \sim N(\mu(X_u), K(X_u, X_u))`. Most commonly,
        this is done simply by calling the user defined GP prior on the inducing point data directly.
        :rtype: :obj:`~gpytorch.distributions.MultivariateNormal`
        :return: The distribution :math:`p( \mathbf u)`
        """
        raise NotImplementedError
    @property
    @cached(name="variational_distribution_memo")
    def variational_distribution(self):
        return self._variational_distribution()
    def forward(self, x, inducing_points, inducing_values, variational_inducing_covar=None):
        r"""
        The :func:`~gpytorch.variational.VariationalStrategy.forward` method determines how to marginalize out the
        inducing point function values. Specifically, forward defines how to transform a variational distribution
        over the inducing point values, :math:`q(u)`, in to a variational distribution over the function values at
        specified locations x, :math:`q(f|x)`, by integrating :math:`\int p(f|x, u)q(u)du`
        :param torch.Tensor x: Locations :math:`\mathbf X` to get the
            variational posterior of the function values at.
        :param torch.Tensor inducing_points: Locations :math:`\mathbf Z` of the inducing points
        :param torch.Tensor inducing_values: Samples of the inducing function values :math:`\mathbf u`
            (or the mean of the distribution :math:`q(\mathbf u)` if q is a Gaussian.
        :param ~gpytorch.lazy.LazyTensor variational_inducing_covar: If the distribuiton :math:`q(\mathbf u)`
            is Gaussian, then this variable is the covariance matrix of that Gaussian. Otherwise, it will be
            :attr:`None`.
        :rtype: :obj:`~gpytorch.distributions.MultivariateNormal`
        :return: The distribution :math:`q( \mathbf f(\mathbf X))`
        """
        raise NotImplementedError
    def kl_divergence(self):
        r"""
        Compute the KL divergence between the variational inducing distribution :math:`q(\mathbf u)`
        and the prior inducing distribution :math:`p(\mathbf u)`.
        :rtype: torch.Tensor
        """
        with settings.max_preconditioner_size(0):
            kl_divergence = torch.distributions.kl.kl_divergence(self.variational_distribution, self.prior_distribution)
        return kl_divergence
    def train(self, mode=True):
        # Make sure we are clearing the cache if we change modes
        if (self.training and not mode) or mode:
            clear_cache_hook(self)
        return super().train(mode=mode)
    def __call__(self, x, prior=False):
        # If we're in prior mode, then we're done!
        if prior:
            return self.model.forward(x)
        # Delete previously cached items from the training distribution
        if self.training:
            clear_cache_hook(self)
        # (Maybe) initialize variational distribution
        if not self.variational_params_initialized.item():
            prior_dist = self.prior_distribution
            self._variational_distribution.initialize_variational_distribution(prior_dist)
            self.variational_params_initialized.fill_(1)
        # Ensure inducing_points and x are the same size
        inducing_points = self.inducing_points
        if inducing_points.shape[:-2] != x.shape[:-2]:
            x, inducing_points = self._expand_inputs(x, inducing_points)
        # Get p(u)/q(u)
        variational_dist_u = self.variational_distribution
        # Get q(f)
        if isinstance(variational_dist_u, MultivariateNormal):
            return super().__call__(
                x,
                inducing_points,
                inducing_values=variational_dist_u.mean,
                variational_inducing_covar=variational_dist_u.lazy_covariance_matrix,
            )
        elif isinstance(variational_dist_u, Delta):
            return super().__call__(
                x, inducing_points, inducing_values=variational_dist_u.mean, variational_inducing_covar=None
            )
        else:
            raise RuntimeError(
                f"Invalid variational distribuition ({type(variational_dist_u)}). "
                "Expected a multivariate normal or a delta distribution."
            )
 | 
	jrg365/gpytorch | 
	gpytorch/variational/_variational_strategy.py | 
	Python | 
	mit | 6,122 | 
	[
  "Gaussian"
] | 
	cbf329ff3ac64378b8e2456fbfd4a4611c6f179ada0ab8216307b67e4a26bc48 | 
| 
	from __future__ import division, unicode_literals
import warnings
import matplotlib
matplotlib.use('pdf')
import unittest as unittest
import numpy as np
from pymatgen import Composition
from pymatgen.entries.computed_entries import ComputedEntry
from pymatgen.analysis.phase_diagram import PhaseDiagram, \
    GrandPotentialPhaseDiagram
from pymatgen.analysis.reaction_calculator import Reaction
from pymatgen.analysis.interface_reactions import InterfacialReactivity
class InterfaceReactionTest(unittest.TestCase):
    def setUp(self):
        self.entries = [ComputedEntry(Composition('Li'), 0),
                        ComputedEntry(Composition('Mn'), 0),
                        ComputedEntry(Composition('O2'), 0),
                        ComputedEntry(Composition('MnO2'), -10),
                        ComputedEntry(Composition('Mn2O4'), -60),
                        ComputedEntry(Composition('MnO3'), 20),
                        ComputedEntry(Composition('Li2O'), -10),
                        ComputedEntry(Composition('Li2O2'), -8),
                        ComputedEntry(Composition('LiMnO2'), -30)
                        ]
        self.pd = PhaseDiagram(self.entries)
        chempots = {'Li': -3}
        self.gpd = GrandPotentialPhaseDiagram(self.entries, chempots)
        self.ir = []
        self.ir.append(
            InterfacialReactivity(Composition('O2'), Composition('Mn'),
                                  self.pd, norm=0, include_no_mixing_energy=0,
                                  pd_non_grand=None, use_hull_energy=False))
        self.ir.append(
            InterfacialReactivity(Composition('MnO2'), Composition('Mn'),
                                  self.gpd, norm=0, include_no_mixing_energy=1,
                                  pd_non_grand=self.pd, use_hull_energy=False))
        self.ir.append(
            InterfacialReactivity(Composition('Mn'), Composition('O2'),
                                  self.gpd, norm=1, include_no_mixing_energy=1,
                                  pd_non_grand=self.pd, use_hull_energy=False))
        self.ir.append(
            InterfacialReactivity(Composition('Li2O'), Composition('Mn'),
                                  self.gpd, norm=0, include_no_mixing_energy=1,
                                  pd_non_grand=self.pd, use_hull_energy=False))
        self.ir.append(
            InterfacialReactivity(Composition('Mn'), Composition('O2'),
                                  self.gpd, norm=1, include_no_mixing_energy=0,
                                  pd_non_grand=self.pd, use_hull_energy=False))
        self.ir.append(
            InterfacialReactivity(Composition('Mn'), Composition('Li2O'),
                                  self.gpd, norm=1, include_no_mixing_energy=1,
                                  pd_non_grand=self.pd, use_hull_energy=False))
        self.ir.append(
            InterfacialReactivity(Composition('Li2O2'), Composition('Li'),
                                  self.pd, norm=0, include_no_mixing_energy=0,
                                  pd_non_grand=None, use_hull_energy=True))
        self.ir.append(
            InterfacialReactivity(Composition('Li2O2'), Composition('Li'),
                                  self.pd, norm=0, include_no_mixing_energy=0,
                                  pd_non_grand=None, use_hull_energy=False))
        self.ir.append(
            InterfacialReactivity(Composition('Li2O2'), Composition('MnO2'),
                                  self.gpd, norm=0, include_no_mixing_energy=0,
                                  pd_non_grand=self.pd, use_hull_energy=True))
        self.ir.append(
            InterfacialReactivity(Composition('Li2O2'), Composition('MnO2'),
                                  self.gpd, norm=0, include_no_mixing_energy=0,
                                  pd_non_grand=self.pd, use_hull_energy=False))
        self.ir.append(
            InterfacialReactivity(Composition('O2'), Composition('Mn'),
                                  self.pd, norm=1, include_no_mixing_energy=0,
                                  pd_non_grand=None, use_hull_energy=False))
        with self.assertRaises(Exception) as context1:
            self.ir.append(
                InterfacialReactivity(Composition('Li2O2'), Composition('Li'),
                                      self.pd, norm=0,
                                      include_no_mixing_energy=1,
                                      pd_non_grand=None))
        self.assertTrue(
            'Please provide grand phase diagram '
            'to compute no_mixing_energy!' == str(context1.exception))
        with self.assertRaises(Exception) as context2:
            self.ir.append(
                InterfacialReactivity(Composition('O2'), Composition('Mn'),
                                      self.gpd, norm=0,
                                      include_no_mixing_energy=1,
                                      pd_non_grand=None))
        self.assertTrue(
            'Please provide non-grand phase diagram '
            'to compute no_mixing_energy!' == str(context2.exception))
    def test_get_entry_energy(self):
        # Test warning message.
        comp = Composition('MnO3')
        with warnings.catch_warnings(record=True) as w:
            warnings.simplefilter("always")
            energy = InterfacialReactivity._get_entry_energy(self.pd, comp)
            self.assertTrue(len(w) == 1)
            self.assertTrue("The reactant MnO3 has no matching entry with"
                            " negative formation energy, instead convex "
                            "hull energy for this composition will be used"
                            " for reaction energy calculation."
                            in str(w[-1].message))
        test1 = np.isclose(energy, -30, atol=1e-03)
        self.assertTrue(test1,
                        '_get_entry_energy: energy for {} is wrong!'.format(
                            comp.reduced_formula))
        # Test normal functionality
        comp = Composition('MnO2')
        test2 = np.isclose(InterfacialReactivity._get_entry_energy(self.pd, comp), -30,
                           atol=1e-03)
        self.assertTrue(test2,
                        '_get_entry_energy: energy for {} is wrong!'.format(
                            comp.reduced_formula))
    def test_get_grand_potential(self):
        comp = Composition('LiMnO2')
        # Test non-normalized case
        test1 = np.isclose(self.ir[1]._get_grand_potential(comp), -27,
                           atol=1e-03)
        self.assertTrue(test1,
                        '_get_grand_potential: '
                        'Non-normalized case gets error!')
        # Test normalized case
        test2 = np.isclose(self.ir[2]._get_grand_potential(comp), -9,
                           atol=1e-03)
        self.assertTrue(test2,
                        '_get_grand_potential: '
                        'Normalized case gets error!')
        comp2 = Composition('Li2O2')
        # Test use_hull_energy option.
        test3 = np.isclose(self.ir[8]._get_grand_potential(comp2), -4,
                           atol=1e-03)
        self.assertTrue(test3,
                        '_get_grand_potential: '
                        'get hull energy gets error!')
        test4 = np.isclose(self.ir[9]._get_grand_potential(comp2), -2,
                           atol=1e-03)
        self.assertTrue(test4,
                        '_get_grand_potential: '
                        'gets error for {}!'.format(comp2.reduced_formula))
    def test_get_energy(self):
        test1 = (np.isclose(self.ir[0]._get_energy(0.5), -15, atol=1e-03))
        self.assertTrue(test1, '_get_energy: phase diagram gets error!')
        test2 = (
        np.isclose(self.ir[3]._get_energy(0.6666666), -7.333333, atol=1e-03))
        self.assertTrue(test2,
                        '_get_energy: '
                        'grand canonical phase diagram gets error!')
        test3 = (
        np.isclose(self.ir[6]._get_energy(0.3333333), -3.333333, atol=1e-03))
        self.assertTrue(test3,
                        '_get_energy: convex hull energy gets error. ')
        test4 = (
        np.isclose(self.ir[7]._get_energy(0.3333333), -4, atol=1e-03))
        self.assertTrue(test4,
                        '_get_energy: gets error. ')
    def test_get_reaction(self):
        test1 = str(self.ir[0]._get_reaction(0.5)) == 'O2 + Mn -> MnO2'
        self.assertTrue(test1,
                        '_get_reaction: '
                        'reaction not involving chempots species gets error!')
        test2 = str(self.ir[3]._get_reaction(0.666666)) \
                == 'Mn + Li2O -> 2 Li + 0.5 MnO2 + 0.5 Mn' \
                or str(self.ir[3]._get_reaction(0.666666)) \
                == 'Mn +  Li2O -> 2 Li + 0.5 Mn + 0.5 MnO2'
        self.assertTrue(test2,
                        '_get_reaction: '
                        'reaction involving chempots species gets error!')
    def test_get_get_elmt_amt_in_rxt(self):
        rxt1 = Reaction(
            [Composition('Mn'), Composition('O2'), Composition('Li')],
            [Composition('LiMnO2')])
        test1 = np.isclose(self.ir[2]._get_elmt_amt_in_rxt(rxt1), 3)
        self.assertTrue(test1,
                        '_get_get_elmt_amt_in_rxt: '
                        'gpd elements amounts gets error!')
        rxt2 = rxt1
        rxt2.normalize_to(Composition('Li'), 0.5)
        test2 = np.isclose(self.ir[2]._get_elmt_amt_in_rxt(rxt2), 1.5)
        self.assertTrue(test2,
                        '_get_get_elmt_amt_in_rxt: '
                        'gpd elements amounts gets error!')
        rxt3 = Reaction([Composition('O2'), Composition('Li')],
                        [Composition('Li2O')])
        # Li is not counted
        test3 = np.isclose(self.ir[2]._get_elmt_amt_in_rxt(rxt3), 1)
        self.assertTrue(test3,
                        '_get_get_elmt_amt_in_rxt: '
                        'gpd elements amounts gets error!')
        # Li is counted
        test4 = np.isclose(self.ir[6]._get_elmt_amt_in_rxt(rxt3), 3)
        self.assertTrue(test4,
                        '_get_get_elmt_amt_in_rxt: '
                        'pd elements amounts gets error!')
    def test_convert(self):
        test_array = [(0.5, 1, 3), (0.4, 2, 3), (0, 1, 9), (1, 2, 7)]
        result = [InterfacialReactivity._convert(x, f1, f2) for x, f1, f2 in test_array]
        answer = [0.75, 0.5, 0, 1]
        self.assertTrue(np.allclose(result, answer),
                        '_convert: conversion gets error! {0} expected,'
                        ' but gets {1}'.format(answer, result))
    def test_reverse_convert(self):
        test_array = [(0.5, 1, 3), (0.4, 2, 3), (0, 1, 9), (1, 2, 7)]
        result = [InterfacialReactivity._reverse_convert(x, f1, f2) for x, f1, f2 in
                  test_array]
        answer = [0.25, 0.3076923, 0, 1]
        self.assertTrue(np.allclose(result, answer),
                        '_convert: conversion gets error! {0} expected,'
                        ' but gets {1}'.format(answer, result))
    def test_get_products(self):
        test1 = sorted(self.ir[0].get_products()) == sorted(
            ['MnO2', 'O2', 'Mn'])
        self.assertTrue(test1,
                        'get_products: decomposition products gets error '
                        'for reaction not involving chempots species!')
        test2 = sorted(self.ir[3].get_products()) == sorted(
            ['Li', 'MnO2', 'Mn', 'Li2O'])
        self.assertTrue(test2,
                        'get_decomp: decomposition products gets error '
                        'for reaction involving chempots species!')
    def test_get_kinks(self):
        ir = self.ir[0]
        lst = list(self.ir[0].get_kinks())
        index = [i[0] for i in lst]
        x_kink = [i[1] for i in lst]
        energy_kink = [i[2] for i in lst]
        react_kink = [str(i[3]) for i in lst]
        energy_per_rxt_kink = [i[4] for i in lst]
        test1 = index == [1, 2, 3]
        self.assertTrue(test1, 'get_kinks:index gets error!')
        test2 = np.allclose(x_kink, [0, 0.5, 1])
        self.assertTrue(test2, 'get_kinks:x kinks gets error!')
        test3 = np.allclose(energy_kink, [0, -15, 0])
        self.assertTrue(test3, 'get_kinks:energy kinks gets error!')
        test4 = react_kink == ['Mn -> Mn', 'O2 + Mn -> MnO2', 'O2 -> O2']
        self.assertTrue(test4,
                        'get_kinks: reaction kinks '
                        'gets error for {0} and {1} reaction!'.format(
                            ir.c1_original.reduced_formula,
                            ir.c2_original.reduced_formula))
        test5 = np.allclose(energy_per_rxt_kink,
                            [0,
                            -30 * InterfacialReactivity.EV_TO_KJ_PER_MOL,
                            0])
        self.assertTrue(test5, 'get_kinks: energy_per_rxt_kinks gets error!')
        lst = list(self.ir[10].get_kinks())
        index = [i[0] for i in lst]
        x_kink = [i[1] for i in lst]
        energy_kink = [i[2] for i in lst]
        react_kink = [str(i[3]) for i in lst]
        energy_per_rxt_kink = [i[4] for i in lst]
        test6 = index == [1, 2, 3]
        self.assertTrue(test6, 'get_kinks:index gets error!')
        test7 = np.allclose(x_kink, [0, 0.66667, 1])
        self.assertTrue(test7, 'get_kinks:x kinks gets error!')
        test8 = np.allclose(energy_kink, [0, -10, 0])
        self.assertTrue(test8, 'get_kinks:energy kinks gets error!')
        test9 = react_kink == ['Mn -> Mn', 'O2 + Mn -> MnO2', 'O2 -> O2']
        self.assertTrue(test9,
                        'get_kinks:reaction kinks '
                        'gets error for {0} and {1} reaction!'.format(
                            ir.c1_original.reduced_formula,
                            ir.c2_original.reduced_formula))
        test10 = np.allclose(energy_per_rxt_kink,
                             [0,
                              -30 * InterfacialReactivity.EV_TO_KJ_PER_MOL,
                              0])
        self.assertTrue(test10, 'get_kinks:energy_per_rxt_kinks gets error!')
    def test_labels(self):
        ir = self.ir[0]
        dict = ir.labels()
        test1 = dict == {1: 'x= 0.0 energy in eV/atom = 0.0 Mn -> Mn',
                         2: 'x= 0.5 energy in eV/atom = -15.0 O2 + Mn -> MnO2',
                         3: 'x= 1.0 energy in eV/atom = 0.0 O2 -> O2'}
        self.assertTrue(test1,
                        'labels:label does not match for interfacial system '
                        'with {0} and {1}.'.format(
                            ir.c1_original.reduced_formula,
                            ir.c2_original.reduced_formula))
    def test_plot(self):
        # Test plot is hard. Here just to call the plot function to see if any
        #  error occurs.
        for i in self.ir:
            i.plot()
    def test_minimum(self):
        answer = [
            (0.5, -15),
            (0, 0),
            (0.3333333, -10),
            (0.6666666, -7.333333),
            (0.3333333, -7.333333),
            (0.1428571, -7.333333),
            (0.3333333, -3.333333),
            (0.3333333, -4.0),
        ]
        for i, j in zip(self.ir, answer):
            self.assertTrue(np.allclose(i.minimum(), j),
                            'minimum: the system with {0} and {1} '
                            'gets error!{2} expected, but gets {3}'.format(
                                i.c1_original.reduced_formula,
                                i.c2_original.reduced_formula, str(j),
                                str(i.minimum())))
    def test_get_no_mixing_energy(self):
        with self.assertRaises(Exception) as context1:
            self.ir[0].get_no_mixing_energy()
        self.assertTrue(
            'Please provide grand potential phase diagram'
            ' for computing no_mixing_energy!' == str(context1.exception))
        answer = [
            [(u'MnO2 (eV/f.u.)', 0.0), (u'Mn (eV/f.u.)', 0.0)],
            [(u'Mn (eV/atom)', 0.0), (u'O2 (eV/atom)', -4.0)],
            [(u'Li2O (eV/f.u.)', 0.0), (u'Mn (eV/f.u.)', 0.0)],
            [(u'Mn (eV/atom)', 0.0), (u'O2 (eV/atom)', -4.0)],
            [(u'Mn (eV/atom)', 0.0), (u'Li2O (eV/atom)', 0.0)]
        ]
        def name_lst(lst):
            return (lst[0][0], lst[1][0])
        def energy_lst(lst):
            return (lst[0][1], lst[1][1])
        result_info = [i.get_no_mixing_energy() for i in self.ir if i.grand]
        for i, j in zip(result_info, answer):
            self.assertTrue(name_lst(i) == name_lst(j),
                            'get_no_mixing_energy: names get error,'
                            ' {0} expected but gets {1}'.format(
                                name_lst(j), name_lst(i)))
            self.assertTrue(np.allclose(energy_lst(i), energy_lst(j)),
                            'get_no_mixing_energy: '
                            'no_mixing energies get error, '
                            '{0} expected but gets {1}'.format(
                                energy_lst(j), energy_lst(i)))
if __name__ == '__main__':
    unittest.main()
 | 
	nisse3000/pymatgen | 
	pymatgen/analysis/tests/test_interface_reactions.py | 
	Python | 
	mit | 17,218 | 
	[
  "pymatgen"
] | 
	5ab5543c3163c6a13a930820d2e2aad8e90291dea8fb5580e6fc7d826acf1d31 | 
| 
	from __future__ import unicode_literals
import datetime
import requests
from requests_oauthlib import OAuth1
from oauthlib.oauth1 import (SIGNATURE_RSA, SIGNATURE_TYPE_AUTH_HEADER,
                             SIGNATURE_HMAC)
from six.moves.urllib.parse import urlencode, parse_qs
from .constants import (XERO_BASE_URL, XERO_PARTNER_BASE_URL,
                        REQUEST_TOKEN_URL, AUTHORIZE_URL, ACCESS_TOKEN_URL)
from .exceptions import *
OAUTH_EXPIRY_SECONDS = 3600 # Default unless a response reports differently
class PrivateCredentials(object):
    """An object wrapping the 2-step OAuth process for Private Xero API access.
    Usage:
     1) Construct a PrivateCredentials() instance:
        >>> from xero.auth import PrivateCredentials
        >>> credentials = PrivateCredentials(<consumer_key>, <rsa_key>)
        rsa_key should be a multi-line string, starting with:
            -----BEGIN RSA PRIVATE KEY-----\n
     2) Use the credentials:
        >>> from xero import Xero
        >>> xero = Xero(credentials)
        >>> xero.contacts.all()
        ...
    """
    def __init__(self, consumer_key, rsa_key):
        self.consumer_key = consumer_key
        self.rsa_key = rsa_key
        self.base_url = XERO_BASE_URL
        # Private API uses consumer key as the OAuth token.
        self.oauth_token = consumer_key
        self.oauth = OAuth1(
            self.consumer_key,
            resource_owner_key=self.oauth_token,
            rsa_key=self.rsa_key,
            signature_method=SIGNATURE_RSA,
            signature_type=SIGNATURE_TYPE_AUTH_HEADER,
        )
class PublicCredentials(object):
    """An object wrapping the 3-step OAuth process for Public Xero API access.
    Usage:
     1) Construct a PublicCredentials() instance:
        >>> from xero import PublicCredentials
        >>> credentials = PublicCredentials(<consumer_key>, <consumer_secret>)
     2) Visit the authentication URL:
        >>> credentials.url
        If a callback URI was provided (e.g., https://example.com/oauth),
        the user will be redirected to a URL of the form:
        https://example.com/oauth?oauth_token=<token>&oauth_verifier=<verifier>&org=<organization ID>
        from which the verifier can be extracted. If no callback URI is
        provided, the verifier will be shown on the screen, and must be
        manually entered by the user.
     3) Verify the instance:
        >>> credentials.verify(<verifier string>)
     4) Use the credentials.
        >>> from xero import Xero
        >>> xero = Xero(credentials)
        >>> xero.contacts.all()
        ...
    """
    def __init__(self, consumer_key, consumer_secret,
                 callback_uri=None, verified=False,
                 oauth_token=None, oauth_token_secret=None,
                 oauth_expires_at=None, oauth_authorization_expires_at=None):
        """Construct the auth instance.
        Must provide the consumer key and secret.
        A callback URL may be provided as an option. If provided, the
        Xero verification process will redirect to that URL when
        """
        self.consumer_key = consumer_key
        self.consumer_secret = consumer_secret
        self.callback_uri = callback_uri
        self.verified = verified
        self._oauth = None
        self.oauth_expires_at = oauth_expires_at
        self.oauth_authorization_expires_at = oauth_authorization_expires_at
        self.base_url = XERO_BASE_URL
        self._signature_method = SIGNATURE_HMAC
        # These are not strictly used by Public Credentials, but
        # are reserved for use by other credentials (i.e. Partner)
        self.rsa_key = None
        self.client_cert = None
        self.oauth_session_handle = None
        self._init_credentials(oauth_token, oauth_token_secret)
    def _init_credentials(self, oauth_token, oauth_token_secret):
        "Depending on the state passed in, get self._oauth up and running"
        if oauth_token and oauth_token_secret:
            if self.verified:
                # If provided, this is a fully verified set of
                # credentials. Store the oauth_token and secret
                # and initialize OAuth around those
                self._init_oauth(oauth_token, oauth_token_secret)
            else:
                # If provided, we are reconstructing an initalized
                # (but non-verified) set of public credentials.
                self.oauth_token = oauth_token
                self.oauth_token_secret = oauth_token_secret
        else:
            # This is a brand new set of credentials - we need to generate
            # an oauth token so it's available for the url property.
            oauth = OAuth1(
                self.consumer_key,
                client_secret=self.consumer_secret,
                callback_uri=self.callback_uri,
                rsa_key=self.rsa_key,
                signature_method=self._signature_method
            )
            url = self.base_url + REQUEST_TOKEN_URL
            response = requests.post(url=url, auth=oauth, cert=self.client_cert)
            self._process_oauth_response(response)
    def _init_oauth(self, oauth_token, oauth_token_secret):
        "Store and initialize a verified set of OAuth credentials"
        self.oauth_token = oauth_token
        self.oauth_token_secret = oauth_token_secret
        self._oauth = OAuth1(
            self.consumer_key,
            client_secret=self.consumer_secret,
            resource_owner_key=self.oauth_token,
            resource_owner_secret=self.oauth_token_secret,
            rsa_key=self.rsa_key,
            signature_method=self._signature_method
        )
    def _process_oauth_response(self, response):
        "Extracts the fields from an oauth response"
        if response.status_code == 200:
            credentials = parse_qs(response.text)
            # Initialize the oauth credentials
            self._init_oauth(
                credentials.get('oauth_token')[0],
                credentials.get('oauth_token_secret')[0]
            )
            # If tokens are refreshable, we'll get a session handle
            self.oauth_session_handle = credentials.get(
                    'oauth_session_handle', [None])[0]
            # Calculate token/auth expiry
            oauth_expires_in = credentials.get(
                    'oauth_expires_in',
                    [OAUTH_EXPIRY_SECONDS])[0]
            oauth_authorisation_expires_in = credentials.get(
                    'oauth_authorization_expires_in',
                    [OAUTH_EXPIRY_SECONDS])[0]
            self.oauth_expires_at = datetime.datetime.now() + \
                                    datetime.timedelta(seconds=int(
                                        oauth_expires_in))
            self.oauth_authorization_expires_at = \
                                    datetime.datetime.now() + \
                                    datetime.timedelta(seconds=int(
                                        oauth_authorisation_expires_in))
        else:
            self._handle_error_response(response)
    def _handle_error_response(self, response):
        if response.status_code == 400:
            raise XeroBadRequest(response)
        elif response.status_code == 401:
            raise XeroUnauthorized(response)
        elif response.status_code == 403:
            raise XeroForbidden(response)
        elif response.status_code == 404:
            raise XeroNotFound(response)
        elif response.status_code == 500:
            raise XeroInternalError(response)
        elif response.status_code == 501:
            raise XeroNotImplemented(response)
        elif response.status_code == 503:
            # Two 503 responses are possible. Rate limit errors
            # return encoded content; offline errors don't.
            # If you parse the response text and there's nothing
            # encoded, it must be a not-available error.
            payload = parse_qs(response.text)
            if payload:
                raise XeroRateLimitExceeded(response, payload)
            else:
                raise XeroNotAvailable(response)
        else:
            raise XeroExceptionUnknown(response)
    @property
    def state(self):
        """Obtain the useful state of this credentials object so that
        we can reconstruct it independently.
        """
        return dict(
            (attr, getattr(self, attr))
            for attr in (
                'consumer_key', 'consumer_secret', 'callback_uri',
                'verified', 'oauth_token', 'oauth_token_secret',
                'oauth_session_handle', 'oauth_expires_at',
                'oauth_authorization_expires_at'
            )
            if getattr(self, attr) is not None
        )
    def verify(self, verifier):
        "Verify an OAuth token"
        # Construct the credentials for the verification request
        oauth = OAuth1(
            self.consumer_key,
            client_secret=self.consumer_secret,
            resource_owner_key=self.oauth_token,
            resource_owner_secret=self.oauth_token_secret,
            verifier=verifier,
            rsa_key=self.rsa_key,
            signature_method=self._signature_method
        )
        # Make the verification request, gettiung back an access token
        url = self.base_url + ACCESS_TOKEN_URL
        response = requests.post(url=url, auth=oauth, cert=self.client_cert)
        self._process_oauth_response(response)
        self.verified = True
    @property
    def url(self):
        "Returns the URL that can be visited to obtain a verifier code"
        # The authorize url is always api.xero.com
        url = XERO_BASE_URL + AUTHORIZE_URL + '?' + \
              urlencode({'oauth_token': self.oauth_token})
        return url
    @property
    def oauth(self):
        "Returns the requests-compatible OAuth object"
        if self._oauth is None:
            raise XeroNotVerified("OAuth credentials haven't been verified")
        return self._oauth
    def expired(self, now=None):
        if now is None:
            now = datetime.datetime.now()
        # Credentials states from older versions might not have
        # oauth_expires_at available
        if self.oauth_expires_at is None:
            raise XeroException(None, "Expiry time is not available")
        # Allow a bit of time for clock differences and round trip times
        # to prevent false negatives. If users want the precise expiry,
        # they can use self.oauth_expires_at
        CONSERVATIVE_SECONDS = 30
        return self.oauth_expires_at <= \
               (now + datetime.timedelta(seconds=CONSERVATIVE_SECONDS))
class PartnerCredentials(PublicCredentials):
    """An object wrapping the 3-step OAuth process for Partner Xero API access.
    Usage is very similar to Public Credentials with the following changes:
     1) You'll need to pass the private key for your RSA certificate.
        >>> rsa_key = "-----BEGIN RSA PRIVATE KEY----- ..."
     2) You'll need to pass a tuple to the Entrust certificate pair.
        >>> client_cert = ('/path/to/entrust-cert.pem',
                           '/path/to/entrust-private-nopass.pem')
     3) Once a token has expired, you can refresh it to get another 30 mins
        >>> credentials = PartnerCredentials(**state)
        >>> if credentials.expired():
                credentials.refresh()
     4) Authorization expiry and token expiry become different things.
        oauth_expires_at tells when the current token expires (~30 min window)
        oauth_authorization_expires_at tells when the overall access
        permissions expire (~10 year window)
    """
    def __init__(self, consumer_key, consumer_secret, rsa_key, client_cert,
                 callback_uri=None, verified=False,
                 oauth_token=None, oauth_token_secret=None,
                 oauth_expires_at=None, oauth_authorization_expires_at=None,
                 oauth_session_handle=None):
        """Construct the auth instance.
        Must provide the consumer key and secret.
        A callback URL may be provided as an option. If provided, the
        Xero verification process will redirect to that URL when
        """
        self.consumer_key = consumer_key
        self.consumer_secret = consumer_secret
        self.callback_uri = callback_uri
        self.verified = verified
        self._oauth = None
        self.oauth_expires_at = oauth_expires_at
        self.oauth_authorization_expires_at = oauth_authorization_expires_at
        self._signature_method = SIGNATURE_RSA
        self.base_url = XERO_PARTNER_BASE_URL
        self.rsa_key = rsa_key
        self.client_cert = client_cert
        self.oauth_session_handle = oauth_session_handle
        self._init_credentials(oauth_token, oauth_token_secret)
    def refresh(self):
        "Refresh an expired token"
        # Construct the credentials for the verification request
        oauth = OAuth1(
            self.consumer_key,
            client_secret=self.consumer_secret,
            resource_owner_key=self.oauth_token,
            resource_owner_secret=self.oauth_token_secret,
            rsa_key=self.rsa_key,
            signature_method=self._signature_method
        )
        # Make the verification request, getting back an access token
        params = {'oauth_session_handle': self.oauth_session_handle}
        response = requests.post(url=self.base_url + ACCESS_TOKEN_URL,
                params=params, auth=oauth, cert=self.client_cert)
        self._process_oauth_response(response)
 | 
	MJMortimer/pyxero | 
	xero/auth.py | 
	Python | 
	bsd-3-clause | 13,625 | 
	[
  "VisIt"
] | 
	e7c50eaf91b091a9ca538d2b45240df1a54ccca446f71eff0b782f19c8a6baa2 | 
| 
	import ast
import collections
from ..visitor import ClassVisitor, handle
from . import Metric
class _TypeCountVisitor(ClassVisitor):
    @handle(ast.AST)
    def __visit_ast(self, node):
        return (node.__class__,) + tuple(cls for name in node._fields for cls in self.visit(getattr(node, name)))
    @handle(collections.Sequence)
    def __visit_sequence(self, node):
        return tuple(cls for entry in node for cls in self.visit(entry))
    @handle(str)
    def __visit_str(self, node):
        return ()
    def default(self, node):
        return ()
    @classmethod
    def count(cls, node):
        res = {}
        for entry in cls().visit(node):
            res.setdefault(entry, 0)
            res[entry] += 1
        return res
class _CyclomaticVisitor(ClassVisitor):
    @handle(
        ast.If,
        ast.IfExp,
        ast.For,
        ast.While,
        ast.TryExcept,
        ast.TryFinally,
        ast.Break,
        ast.Continue,
        ast.And,
        ast.Or
    )
    def __visit_selected(self, node):
        return 1 + self.__visit_ast(node)
    @handle(ast.FunctionDef)
    def __visit_function(self, node):
        count = _TypeCountVisitor.count(node).get(ast.Return, 0)
        if isinstance(node.body[-1], ast.Return):
            count -= 1
        return count + self.__visit_ast(node)
    @handle(ast.AST)
    def __visit_ast(self, node):
        return sum(self.visit(getattr(node, name)) for name in node._fields)
    @handle(collections.Sequence)
    def __visit_sequence(self, node):
        return sum(self.visit(entry) for entry in node)
    @handle(str)
    def __visit_str(self, node):
        return 0
    def default(self, node):
        return 0
class CyclomaticComplexity(Metric):
    def calculate(self, node):
        return _CyclomaticVisitor().visit(node.ast)
    def get_metric_name(self):
        return 'Cyclomatic complexity'
 | 
	herczy/pydepend | 
	pydepend/metric/cyclomatic.py | 
	Python | 
	bsd-3-clause | 1,908 | 
	[
  "VisIt"
] | 
	c66a25e202655c7f073a823fb8d8dccc257ea7f48e319421947bec27c7206669 | 
| 
	from django.conf import settings
from django.contrib.sites.models import get_current_site
from django.core.urlresolvers import reverse
from django.http import Http404, HttpResponse
from django.shortcuts import redirect
from .models import APIKey, Short, Visit
def _record_visit(request, short):
    remote_addr = (
        request.META.get('REMOTE_ADDR') or
        request.META.get('HTTP_X_REAL_IP') or
        request.META.get('HTTP_X_FORWARDED_FOR')
    )
    return Visit.objects.create(
        short=short,
        remote_addr=remote_addr,
        user_agent=request.META.get('HTTP_USER_AGENT'),
        referrer=request.META.get('HTTP_REFERER'),
    )
def short_detail(request, short_key):
    try:
        short = Short.objects.get_for_key(short_key)
    except Short.DoesNotExist as e:
        raise Http404(e.message)
    _record_visit(request, short)
    if short.destination:
        return redirect(short.destination)
    return redirect(short.image.url)
def short_create(request):
    url = request.GET.get('url')
    api_key = request.GET.get('key')
    user = APIKey.objects.get(key=api_key).user
    short, __ = Short.objects.get_or_create(
        destination=url,
        created_by=user,
    )
    domain = get_current_site(request).domain
    short_path = reverse('short_detail', kwargs={'short_key': short.key})
    short_url = '{scheme}://{domain}{short_path}'.format(
        scheme=settings.SHORT_SCHEME,
        domain=domain,
        short_path=short_path)
    return HttpResponse(short_url, content_type='text/plain')
 | 
	sneeu/little | 
	little/views.py | 
	Python | 
	mit | 1,560 | 
	[
  "VisIt"
] | 
	5044b35c3eb85a66e78dc6ba0307c40f432a7e54e2055aee67a8bee015916f5c | 
ChemPile-Code
A comprehensive collection of filtered scientific code from chemistry, biology, and materials science
π Dataset Summary
ChemPile-Code includes filtered code from popular datasets such as the Stack and GitHub-code. It is designed to provide a rich source of scientific coding from fields such as chemistry, biology, and materials science. The dataset is part of the ChemPile project, and aims to create a comprehensive collection of chemistry code for training language models. The filtering process is keyword-based, focusing on packages and libraries relevant to chemistry, biology, and materials science. Those keywords include simulation packages such as LAMMPS, GROMACS, and OpenMM, as well as libraries like RDKit, ASE, and MDTraj, or plotting programmes like VMD or PyMOL. To avoid duplicates, exact hash matching was used to filter out identical code snippets.
π Dataset Statistics
| Subset | Tokens | Documents | Description | 
|---|---|---|---|
| CodeParrot GitHub-Code Chemistry Python | 1.8B | 208K | Python code from GitHub repositories | 
| StarCoder Chemistry | 16.1B | 2.06M | Python code from the Stack dataset | 
| Total | ~17.9B | ~2.27M | Scientific code snippets | 
ποΈ Dataset Configurations
The dataset includes different subsets available as Hugging Face configurations:
- codeparrot_github-code-chemistry-python-default
- starcoder-chemistry-default
π License
All content is released under the AGPL-3.0 license, which allows for:
- β Free use and distribution
- β Commercial use
- β Modification and derivatives
- β οΈ Attribution required
However, the dataset combines code under different licenses. The config codeparrot_github-code-chemistry-python-default is designed such that is possible to filter the dataset based on the license. Therefore, this config has code under the next licenses:
- MIT
- GPL-3.0
- BSD-3-Clause
- GPL-2.0
- Apache-2.0
- LGPL-2.1
- AGPL-2.0
- AGPL-3.0
- LGPL-3.0
- MPL-2.0
- BSD-2-Clause
π Dataset Details
π CodeParrot
Source: CodeParrot is a subset of GitHub code, that we specifically filtered for chemistry-related content
Coverage: Python code from the GitHub Code dataset
Extraction Method: Keyword-based filtering focusing on chemistry, biology, and materials science packages and libraries
Fields:
- text: The code snippet
- repo_name: The name of the repository where the code snippet was found
- path: The path to the file within the repository
- language: The programming language of the code snippet
- license: The license of the repository
- size: The size of the code snippet in bytes
- keyword: A list of keywords that were used to filter the code snippet
- text_hash: A hash of the code snippet to avoid duplicates
Statistics: 208K code snippets with a total of over 1.8B tokens
βοΈ StarCoder
Source: StarCoder is a subset of the Stack dataset, that we specifically filtered for chemistry-related content
Coverage: Python code from the Stack dataset
Extraction Method: Keyword-based filtering with exact hash matching to avoid duplicates
Fields:
- text: The code snippet
- repo_name: The name of the repository where the code snippet was found
- keyword: A list of keywords that were used to filter the code snippet
- text_hash: A hash of the code snippet to avoid duplicates
Statistics: 2.06M code snippets with a total of over 16.1B tokens
π Quick Start
from datasets import load_dataset, get_dataset_config_names
# Print available configs for the dataset
configs = get_dataset_config_names("jablonkagroup/chempile-code")
print(f"Available configs: {configs}")
# Available configs: ['codeparrot_github-code-chemistry-python-default', 'starcoder-chemistry-default']
dataset = load_dataset("jablonkagroup/chempile-code", name=configs[0])
# Loading config: codeparrot_github-code-chemistry-python-default
print(dataset)
# DatasetDict({
    # train: Dataset({
        # features: ['text', 'repo_name', 'path', 'language', 'license', 'size', 'keyword', 'text_hash'],
        # num_rows: 186878
    # })
    # test: Dataset({
        # features: ['text', 'repo_name', 'path', 'language', 'license', 'size', 'keyword', 'text_hash'],
        # num_rows: 10383
    # })
    # val: Dataset({
        # features: ['text', 'repo_name', 'path', 'language', 'license', 'size', 'keyword', 'text_hash'],
        # num_rows: 10382
    # })
# })
split_name = list(dataset.keys())[0]
sample = dataset[split_name][0]
print(sample)
# {
#     'text': 'import moogli
except Exception as e:...
#     'repo_name': 'BhallaLab/moose', 
#     'path': 'moose-examples/paper-2015/Fig2_elecModels/Fig2C.py', 
#     'language': 'Python', 
#     'license': 'gpl-3.0', 
#     'size': 14223, 
#     'keyword': ['MOOSE', 'NEURON'], 
#     'text_hash': '5eb6a5a439a675762a02c12cdff996e6a0d98f6ee874773cba2951727562aac5'
# }
π― Use Cases
- π€ Code Generation: Training models for scientific code generation and completion
- π¬ Scientific Computing: Building systems for computational chemistry and materials science
- π Code Search: Advanced scientific code repository search and analysis
- π Documentation: Automated code documentation and analysis for scientific software
- π§ Domain Adaptation: Adapting models to scientific computing paradigms and libraries
β οΈ Limitations & Considerations
- Language: Primarily Python code (monolingual dataset)
- Scope: Focused on scientific computing; may include domain-specific jargon and advanced concepts
- Quality: Variable quality across sources; some code may be incomplete or contain errors
- Bias: Reflects biases present in open-source scientific software development
- License: Mixed licenses from source repositories - check individual licensefield
- Duplicates: Hash-based deduplication applied but some semantic duplicates may remain
π οΈ Data Processing Pipeline
- Collection: Automated extraction from GitHub-code and Stack datasets
- Filtering: Keyword-based filtering for chemistry, biology, and materials science relevance
- Deduplication: Exact hash matching to remove identical code snippets
- Quality Control: Automated filtering and validation
- Standardization: Consistent formatting and metadata extraction
- Validation: Train/validation/test splits and quality checks
ποΈ ChemPile Collection
This dataset is part of the ChemPile collection, a comprehensive open dataset containing over 75 billion tokens of curated chemical data for training and evaluating general-purpose models in the chemical sciences.
Collection Overview
- π Scale: 75+ billion tokens across multiple modalities
- 𧬠Modalities: Structured representations (SMILES, SELFIES, IUPAC, InChI), scientific text, executable code, and molecular images
- π― Design: Integrates foundational educational knowledge with specialized scientific literature
- π¬ Curation: Extensive expert curation and validation
- π Benchmarking: Standardized train/validation/test splits for robust evaluation
- π Availability: Openly released via Hugging Face
π Citation
If you use this dataset in your research, please cite:
@article{mirza2025chempile0,
  title   = {ChemPile: A 250GB Diverse and Curated Dataset for Chemical Foundation Models},
  author  = {Adrian Mirza and Nawaf Alampara and MartiΓ±o RΓos-GarcΓa and others},
  year    = {2025},
  journal = {arXiv preprint arXiv:2505.12534}
}
π₯ Contact & Support
- Paper: arXiv:2505.12534
- Website: ChemPile Project
- Dataset: Hugging Face
- Issues: Please report data issues or questions via the Hugging Face dataset page
- Downloads last month
- 511


