text
stringlengths 12
1.05M
| repo_name
stringlengths 5
86
| path
stringlengths 4
191
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 12
1.05M
| keyword
listlengths 1
23
| text_hash
stringlengths 64
64
|
|---|---|---|---|---|---|---|---|
#!/usr/bin/env python
# $HeadURL$
"""
Perform initial sanity checks on WN, installs and configures DIRAC and runs
Job Agent to execute pending workload on WMS.
It requires dirac-install script to be sitting in the same directory.
"""
__RCSID__ = "$Id$"
import os
import sys
import getopt
import urllib2
import stat
import socket
import imp
import re
import time
#Check PYTHONPATH and LD_LIBARY_PATH
try:
os.umask( 022 )
pythonpath = os.getenv( 'PYTHONPATH', '' ).split( ':' )
newpythonpath = []
for p in pythonpath:
if p == '': continue
try:
if os.path.normpath( p ) in sys.path:
# In case a given directory is twice in PYTHONPATH it has to removed only once
sys.path.remove( os.path.normpath( p ) )
except Exception, x:
print 'Directories in PYTHONPATH:', pythonpath
print 'Failing path:', p, os.path.normpath( p )
print 'sys.path:', sys.path
raise x
except Exception, x:
print sys.executable
print sys.version
print os.uname()
print x
raise x
class CliParams:
MAX_CYCLES = 100
def __init__( self ):
self.debug = False
self.local = False
self.dryRun = False
self.testVOMSOK = False
self.site = ""
self.ceName = ""
self.queueName = ""
self.platform = ""
self.minDiskSpace = 2560 #MB
self.jobCPUReq = 900
self.pythonVersion = '26'
self.userGroup = ""
self.userDN = ""
self.maxCycles = CliParams.MAX_CYCLES
self.flavour = 'DIRAC'
self.gridVersion = '2013-04-22'
self.pilotReference = ''
self.releaseVersion = ''
self.releaseProject = ''
# The following parameters are added for BOINC computing element with virtual machine.
self.boincUserID = '' # The user ID in a BOINC computing element
self.boincHostPlatform = '' # The os type of the host machine running the pilot, not the virtual machine
self.boincHostID = '' # the host id in a BOINC computing element
self.boincHostName = '' # the host name of the host machine running the pilot, not the virtual machine
cliParams = CliParams()
###
# Helper functions
###
def logDEBUG( msg ):
if cliParams.debug:
for _line in msg.split( "\n" ):
print "%s UTC dirac-pilot [DEBUG] %s" % ( time.strftime( '%Y-%m-%d %H:%M:%S', time.gmtime() ), _line )
sys.stdout.flush()
def logERROR( msg ):
for _line in msg.split( "\n" ):
print "%s UTC dirac-pilot [ERROR] %s" % ( time.strftime( '%Y-%m-%d %H:%M:%S', time.gmtime() ), _line )
sys.stdout.flush()
def logINFO( msg ):
for _line in msg.split( "\n" ):
print "%s UTC dirac-pilot [INFO] %s" % ( time.strftime( '%Y-%m-%d %H:%M:%S', time.gmtime() ), _line )
sys.stdout.flush()
def executeAndGetOutput( cmd ):
try:
import subprocess
_p = subprocess.Popen( "%s" % cmd, shell = True, stdout = subprocess.PIPE,
stderr = subprocess.PIPE, close_fds = True )
outData = _p.stdout.read().strip()
returnCode = _p.wait()
except ImportError:
import popen2
_p3 = popen2.Popen3( "%s" % cmd )
outData = _p3.fromchild.read().strip()
returnCode = _p3.wait()
return ( returnCode, outData )
# Version print
logINFO( "Running %s" % " ".join( sys.argv ) )
logINFO( "Version %s" % __RCSID__ )
###
# Checking scripts are ok
###
try:
pilotScript = os.path.realpath( __file__ )
# in old python versions __file__ is not defined
except:
pilotScript = os.path.realpath( sys.argv[0] )
pilotScriptName = os.path.basename( pilotScript )
pilotRootPath = os.path.dirname( pilotScript )
installScriptName = 'dirac-install.py'
originalRootPath = os.getcwd()
rootPath = os.getcwd()
for path in ( pilotRootPath, rootPath ):
installScript = os.path.join( path, installScriptName )
if os.path.isfile( installScript ):
break
if not os.path.isfile( installScript ):
logERROR( "%s requires %s to exist in one of: %s, %s" % ( pilotScriptName, installScriptName,
pilotRootPath, rootPath ) )
logINFO( "Trying to download it to %s..." % rootPath )
try:
#remoteLocation = "http://svnweb.cern.ch/guest/dirac/DIRAC/trunk/DIRAC/Core/scripts/dirac-install.py"
#remoteLocation = "http://svnweb.cern.ch/guest/dirac/DIRAC/trunk/DIRAC/Core/scripts/dirac-install.py"
remoteLocation = "https://raw.github.com/SuperDIRAC/DIRAC/master/Core/scripts/dirac-install.py"
remoteFD = urllib2.urlopen( remoteLocation )
installScript = os.path.join( rootPath, installScriptName )
localFD = open( installScript, "w" )
localFD.write( remoteFD.read() )
localFD.close()
remoteFD.close()
except Exception, e:
logERROR( "Could not download %s..: %s" % ( remoteLocation, str( e ) ) )
sys.exit( 1 )
os.chmod( installScript, stat.S_IRWXU )
###
# Option parsing
###
cmdOpts = ( ( 'b', 'build', 'Force local compilation' ),
( 'd', 'debug', 'Set debug flag' ),
( 'e:', 'extraPackages=', 'Extra packages to install (comma separated)' ),
( 'g:', 'grid=', 'lcg tools package version' ),
( 'h', 'help', 'Show this help' ),
( 'i:', 'python=', 'Use python<24|25> interpreter' ),
( 'l:', 'project=', 'Project to install' ),
( 'p:', 'platform=', 'Use <platform> instead of local one' ),
( 't', 'test', 'Make a dry run. Do not run JobAgent' ),
( 'u:', 'url=', 'Use <url> to download tarballs' ),
( 'r:', 'release=', 'DIRAC release to install' ),
( 'n:', 'name=', 'Set <Site> as Site Name' ),
( 'D:', 'disk=', 'Require at least <space> MB available' ),
( 'M:', 'MaxCycles=', 'Maximum Number of JobAgent cycles to run' ),
( 'N:', 'Name=', 'Use <CEName> to determine Site Name' ),
( 'P:', 'path=', 'Install under <path>' ),
( 'E', 'server', 'Make a full server installation' ),
( 'S:', 'setup=', 'DIRAC Setup to use' ),
( 'C:', 'configurationServer=', 'Configuration servers to use' ),
( 'T:', 'CPUTime', 'Requested CPU Time' ),
( 'G:', 'Group=', 'DIRAC Group to use' ),
( 'O:', 'OwnerDN', 'Pilot OwnerDN (for private pilots)' ),
( 'U', 'Upload', 'Upload compiled distribution (if built)' ),
( 'V:', 'VO=', 'Virtual Organization' ),
( 'W:', 'gateway=', 'Configure <gateway> as DIRAC Gateway during installation' ),
( 's:', 'section=', 'Set base section for relative parsed options' ),
( 'o:', 'option=', 'Option=value to add' ),
( 'c', 'cert', 'Use server certificate instead of proxy' ),
( 'R:', 'reference=', 'Use this pilot reference' ),
)
installOpts = []
configureOpts = []
optList, args = getopt.getopt( sys.argv[1:],
"".join( [ opt[0] for opt in cmdOpts ] ),
[ opt[1] for opt in cmdOpts ] )
for o, v in optList:
if o in ( '-h', '--help' ):
print "Usage %s <opts>" % sys.argv[0]
for cmdOpt in cmdOpts:
print "%s %s : %s" % ( cmdOpt[0].ljust( 4 ), cmdOpt[1].ljust( 20 ), cmdOpt[2] )
sys.exit( 1 )
elif o in ( '-b', '--build' ):
installOpts.append( '-b' )
elif o == '-d' or o == '--debug':
cliParams.debug = True
installOpts.append( '-d' )
elif o == '-e' or o == '--extraPackages':
installOpts.append( '-e "%s"' % v )
elif o == '-g' or o == '--grid':
cliParams.gridVersion = v
elif o == '-i' or o == '--python':
cliParams.pythonVersion = v
elif o in ( '-l', '--project' ):
installOpts.append( "-l '%s'" % v )
cliParams.releaseProject = v
elif o == '-n' or o == '--name':
configureOpts.append( '-n "%s"' % v )
cliParams.site = v
elif o == '-p' or o == '--platform':
installOpts.append( '-p "%s"' % v )
cliParams.platform = v
elif o == '-r' or o == '--release':
installOpts.append( '-r "%s"' % v )
cliParams.releaseVersion = v
elif o == '-t' or o == '--test':
cliParams.dryRun = True
elif o == '-u' or o == '--url':
installOpts.append( '-u "%s"' % v )
elif o == '-N' or o == '--Name':
configureOpts.append( '-N "%s"' % v )
cliParams.ceName = v
elif o == '-D' or o == '--disk':
try:
cliParams.minDiskSpace = int( v )
except:
pass
elif o == '-M' or o == '--MaxCycles':
try:
cliParams.maxCycles = min( CliParams.MAX_CYCLES, int( v ) )
except:
pass
elif o == '-R' or o == '--reference':
cliParams.pilotReference = v
elif o in ( '-S', '--setup' ):
configureOpts.append( '-S "%s"' % v )
elif o in ( '-C', '--configurationServer' ):
configureOpts.append( '-C "%s"' % v )
elif o in ( '-P', '--path' ):
installOpts.append( '-P "%s"' % v )
rootPath = v
elif o in ( '-T', '--CPUTime' ):
cliParams.jobCPUReq = v
elif o in ( '-G', '--Group' ):
cliParams.userGroup = v
elif o in ( '-O', '--OwnerDN' ):
cliParams.userDN = v
elif o in ( '-U', '--Upload' ):
#TODO
pass
elif o in ( '-V', '--installation' ):
installOpts.append( '-V "%s"' % v )
configureOpts.append( 'defaults-%s.cfg' % v )
elif o in ( '-W', '--gateway' ):
configureOpts.append( '-W "%s"' % v )
elif o == '-E' or o == '--server':
installOpts.append( '-t "server"' )
elif o == '-o' or o == '--option':
configureOpts.append( '-o "%s"' % v )
elif o == '-s' or o == '--section':
configureOpts.append( '-s "%s"' % v )
elif o == '-c' or o == '--cert':
configureOpts.append( '--UseServerCertificate' )
############################################################################
# Locate installation script
for path in ( pilotRootPath, originalRootPath, rootPath ):
installScript = os.path.join( path, installScriptName )
if os.path.isfile( installScript ):
break
if not os.path.isfile( installScript ):
logERROR( "%s requires %s to exist in one of: %s, %s, %s" % ( pilotScriptName, installScriptName,
pilotRootPath, originalRootPath, rootPath ) )
logINFO( "Trying to download it to %s..." % originalRootPath )
try:
remoteLocation = "http://lhcbproject.web.cern.ch/lhcbproject/dist/Dirac_project/dirac-install.py"
remoteFD = urllib2.urlopen( remoteLocation )
installScript = os.path.join( originalRootPath, installScriptName )
localFD = open( installScript, "w" )
localFD.write( remoteFD.read() )
localFD.close()
remoteFD.close()
except Exception, e:
logERROR( "Could not download %s..: %s" % ( remoteLocation, str( e ) ) )
sys.exit( 1 )
os.chmod( installScript, stat.S_IRWXU )
######################################################################
if cliParams.gridVersion:
installOpts.append( "-g '%s'" % cliParams.gridVersion )
if cliParams.pythonVersion:
installOpts.append( '-i "%s"' % cliParams.pythonVersion )
######################################################################
# Attempt to determine the flavour
##
pilotRef = 'Unknown'
# Pilot reference is specified at submission
if cliParams.pilotReference:
cliParams.flavour = 'DIRAC'
pilotRef = cliParams.pilotReference
# Take the reference from the Torque batch system
if os.environ.has_key( 'PBS_JOBID' ):
cliParams.flavour = 'SSHTorque'
pilotRef = 'sshtorque://'+cliParams.ceName+'/'+os.environ['PBS_JOBID']
cliParams.queueName = os.environ['PBS_QUEUE']
# Grid Engine
if os.environ.has_key( 'JOB_ID' ):
cliParams.flavour = 'SSHGE'
pilotRef = 'sshge://'+cliParams.ceName+'/'+os.environ['JOB_ID']
# Condor
if os.environ.has_key( 'CONDOR_JOBID' ):
cliParams.flavour = 'SSHCondor'
pilotRef = 'sshcondor://'+cliParams.ceName+'/'+os.environ['CONDOR_JOBID']
# LSF
if os.environ.has_key( 'LSB_BATCH_JID' ):
cliParams.flavour = 'SSHLSF'
pilotRef = 'sshlsf://'+cliParams.ceName+'/'+os.environ['LSB_BATCH_JID']
# This is the CREAM direct submission case
if os.environ.has_key( 'CREAM_JOBID' ):
cliParams.flavour = 'CREAM'
pilotRef = os.environ['CREAM_JOBID']
# If we still have the GLITE_WMS_JOBID, it means that the submission
# was through the WMS, take this reference then
if os.environ.has_key( 'EDG_WL_JOBID' ):
cliParams.flavour = 'LCG'
pilotRef = os.environ['EDG_WL_JOBID']
if os.environ.has_key( 'GLITE_WMS_JOBID' ):
if os.environ['GLITE_WMS_JOBID'] != 'N/A':
cliParams.flavour = 'gLite'
pilotRef = os.environ['GLITE_WMS_JOBID']
if os.environ.has_key( 'OSG_WN_TMP' ):
cliParams.flavour = 'OSG'
# Direct SSH tunnel submission
if os.environ.has_key( 'SSHCE_JOBID' ):
cliParams.flavour = 'SSH'
pilotRef = 'ssh://'+cliParams.ceName+'/'+os.environ['SSHCE_JOBID']
# ARC case
if os.environ.has_key( 'GRID_GLOBAL_JOBID' ):
cliParams.flavour = 'ARC'
pilotRef = os.environ['GRID_GLOBAL_JOBID']
# This is for BOINC case
if os.environ.has_key( 'BOINC_JOB_ID' ):
cliParams.flavour = 'BOINC'
pilotRef = os.environ['BOINC_JOB_ID']
if cliParams.flavour == 'BOINC':
if os.environ.has_key('BOINC_USER_ID'):
cliParams.boincUserID = os.environ['BOINC_USER_ID']
if os.environ.has_key('BOINC_HOST_ID'):
cliParams.boincHostID = os.environ['BOINC_HOST_ID']
if os.environ.has_key('BOINC_HOST_PLATFORM'):
cliParams.boincHostPlatform = os.environ['BOINC_HOST_PLATFORM']
if os.environ.has_key('BOINC_HOST_NAME'):
cliParams.boincHostName = os.environ['BOINC_HOST_NAME']
logDEBUG( "Flavour: %s; pilot reference: %s " % ( cliParams.flavour, pilotRef ) )
configureOpts.append( '-o /LocalSite/GridMiddleware=%s' % cliParams.flavour )
if pilotRef != 'Unknown':
configureOpts.append( '-o /LocalSite/PilotReference=%s' % pilotRef )
# add options for BOINc
if cliParams.boincUserID:
configureOpts.append( '-o /LocalSite/BoincUserID=%s' % cliParams.boincUserID )
if cliParams.boincHostID:
configureOpts.append( '-o /LocalSite/BoincHostID=%s' % cliParams.boincHostID)
if cliParams.boincHostPlatform:
configureOpts.append( '-o /LocalSite/BoincHostPlatform=%s' % cliParams.boincHostPlatform)
if cliParams.boincHostName:
configureOpts.append( '-o /LocalSite/BoincHostName=%s' % cliParams.boincHostName )
###
# Try to get the CE name
###
#cliParams.ceName = 'Local'
if cliParams.flavour in ['LCG','gLite','OSG']:
retCode, CE = executeAndGetOutput( 'glite-brokerinfo getCE || edg-brokerinfo getCE' )
if not retCode:
cliParams.ceName = CE.split( ':' )[0]
if len( CE.split( '/' ) ) > 1:
cliParams.queueName = CE.split( '/' )[1]
configureOpts.append( '-N "%s"' % cliParams.ceName )
elif os.environ.has_key( 'OSG_JOB_CONTACT' ):
# OSG_JOB_CONTACT String specifying the endpoint to use within the job submission
# for reaching the site (e.g. manager.mycluster.edu/jobmanager-pbs )
CE = os.environ['OSG_JOB_CONTACT']
cliParams.ceName = CE.split( '/' )[0]
if len( CE.split( '/' ) ) > 1:
cliParams.queueName = CE.split( '/' )[1]
configureOpts.append( '-N "%s"' % cliParams.ceName )
else:
logERROR( "There was an error executing brokerinfo. Setting ceName to local " )
elif cliParams.flavour == "CREAM":
if os.environ.has_key( 'CE_ID' ):
cliParams.ceName = os.environ['CE_ID'].split( ':' )[0]
if os.environ['CE_ID'].count( "/" ):
cliParams.queueName = os.environ['CE_ID'].split( '/' )[1]
configureOpts.append( '-N "%s"' % cliParams.ceName )
#if cliParams.queueName:
# configureOpts.append( '-o /LocalSite/CEQueue="%s"' % cliParams.queueName )
if cliParams.queueName:
configureOpts.append( '-o /LocalSite/CEQueue=%s' % cliParams.queueName )
if cliParams.ceName:
configureOpts.append( '-o /LocalSite/GridCE=%s' % cliParams.ceName )
if cliParams.releaseVersion:
configureOpts.append( '-o /LocalSite/ReleaseVersion=%s' % cliParams.releaseVersion )
if cliParams.releaseProject:
configureOpts.append( '-o /LocalSite/ReleaseProject=%s' % cliParams.releaseProject )
###
# Set the platform if defined
###
if cliParams.platform:
installOpts.append( '-p "%s"' % cliParams.platform )
###
# Set the group and the DN
###
if cliParams.userGroup:
configureOpts.append( '-o /AgentJobRequirements/OwnerGroup="%s"' % cliParams.userGroup )
if cliParams.userDN:
configureOpts.append( '-o /AgentJobRequirements/OwnerDN="%s"' % cliParams.userDN )
#############################################################################
# Treat the OSG case
osgDir = ''
if cliParams.flavour == "OSG":
vo = cliParams.releaseProject.replace( 'DIRAC', '' ).upper()
if not vo:
vo = 'DIRAC'
osgDir = os.environ['OSG_WN_TMP']
# Make a separate directory per Project if it is defined
jobDir = os.path.basename( pilotRef )
if not jobDir: # just in case
import random
jobDir = str( random.randint( 1000, 10000 ) )
osgDir = os.path.join( osgDir, vo, jobDir )
if not os.path.isdir(osgDir):
os.makedirs(osgDir)
os.chdir( osgDir )
try:
import shutil
shutil.copy( installScript, os.path.join( osgDir, installScriptName ) )
except Exception, x:
print sys.executable
print sys.version
print os.uname()
print x
raise x
if os.environ.has_key( 'OSG_APP' ):
# Try to define it here although this will be only in the local shell environment
os.environ['VO_%s_SW_DIR' % vo] = os.path.join( os.environ['OSG_APP'], vo )
if rootPath == originalRootPath:
# No special root path was requested
rootPath = os.getcwd()
###
# Do the installation
###
installCmd = "%s %s" % ( installScript, " ".join( installOpts ) )
logDEBUG( "Installing with: %s" % installCmd )
if os.system( installCmd ):
logERROR( "Could not make a proper DIRAC installation" )
sys.exit( 1 )
###
# Set the env to use the recently installed DIRAC
###
diracScriptsPath = os.path.join( rootPath, 'scripts' )
sys.path.insert( 0, diracScriptsPath )
###
# Configure DIRAC
###
# Instead of dumping the Full configuration, include all Server in dirac.cfg
configureOpts.append( '-I' )
configureCmd = "%s %s" % ( os.path.join( diracScriptsPath, "dirac-configure" ), " ".join( configureOpts ) )
logDEBUG( "Configuring DIRAC with: %s" % configureCmd )
if os.system( configureCmd ):
logERROR( "Could not configure DIRAC" )
sys.exit( 1 )
###
# Dump the CS to cache in file
###
# cfgFile = os.path.join( rootPath, "etc", "dirac.cfg" )
# cacheScript = os.path.join( diracScriptsPath, "dirac-configuration-dump-local-cache" )
# if os.system( "%s -f %s" % ( cacheScript, cfgFile ) ):
# logERROR( "Could not dump the CS to %s" % cfgFile )
configureScript = os.path.join( diracScriptsPath, "dirac-configure" )
###
# Set the LD_LIBRARY_PATH and PATH
###
if not cliParams.platform:
platformPath = os.path.join( rootPath, "DIRAC", "Core", "Utilities", "Platform.py" )
platFD = open( platformPath, "r" )
PlatformModule = imp.load_module( "Platform", platFD, platformPath, ( "", "r", imp.PY_SOURCE ) )
platFD.close()
cliParams.platform = PlatformModule.getPlatformString()
if cliParams.testVOMSOK:
# Check voms-proxy-info before touching the original PATH and LD_LIBRARY_PATH
os.system( 'which voms-proxy-info && voms-proxy-info -all' )
diracLibPath = os.path.join( rootPath, cliParams.platform, 'lib' )
diracBinPath = os.path.join( rootPath, cliParams.platform, 'bin' )
for envVarName in ( 'LD_LIBRARY_PATH', 'PYTHONPATH' ):
if envVarName in os.environ:
os.environ[ '%s_SAVE' % envVarName ] = os.environ[ envVarName ]
del( os.environ[ envVarName ] )
else:
os.environ[ '%s_SAVE' % envVarName ] = ""
os.environ['LD_LIBRARY_PATH'] = "%s" % ( diracLibPath )
os.environ['PATH'] = '%s:%s:%s' % ( diracBinPath, diracScriptsPath, os.getenv( 'PATH' ) )
###
# End of initialisation
###
#
# Check proxy
#
ret = os.system( 'dirac-proxy-info' )
if cliParams.testVOMSOK:
ret = os.system( 'dirac-proxy-info | grep -q fqan' )
if ret != 0:
os.system( 'dirac-proxy-info 2>&1 | mail -s "dirac-pilot: missing voms certs at %s" [email protected]' % cliParams.site )
sys.exit( -1 )
#
# Set the local architecture
#
architectureScriptName = "dirac-architecture"
architectureScript = ""
candidate = os.path.join( rootPath, "scripts", architectureScriptName )
if os.path.isfile( candidate ):
architectureScript = candidate
else:
# If the extension does not provide a dirac-architecture, use dirac-platform as default value
candidate = os.path.join( rootPath, "scripts", "dirac-platform" )
if os.path.isfile( candidate ):
architectureScript = candidate
if architectureScript:
retCode, localArchitecture = executeAndGetOutput( architectureScript )
if not retCode:
localArchitecture = localArchitecture.strip()
os.environ['CMTCONFIG'] = localArchitecture
logINFO( 'Setting CMTCONFIG=%s' % localArchitecture )
# os.system( "%s -f %s -o '/LocalSite/Architecture=%s'" % ( cacheScript, cfgFile, localArchitecture ) )
# dirac-configure will not change existing cfg unless -U option is used.
os.system( "%s -F -o '/LocalSite/Architecture=%s'" % ( configureScript, localArchitecture ) )
else:
logERROR( "There was an error calling %s" % architectureScript )
#
# Get host and local user info
#
localUid = os.getuid()
try:
import pwd
localUser = pwd.getpwuid( localUid )[0]
except:
localUser = 'Unknown'
logINFO( 'Uname = %s' % " ".join( os.uname() ) )
logINFO( 'Host Name = %s' % socket.gethostname() )
logINFO( 'Host FQDN = %s' % socket.getfqdn() )
logINFO( 'User Name = %s' % localUser )
logINFO( 'User Id = %s' % localUid )
logINFO( 'CurrentDir = %s' % rootPath )
fileName = '/etc/redhat-release'
if os.path.exists( fileName ):
f = open( fileName, 'r' )
logINFO( 'RedHat Release = %s' % f.read().strip() )
f.close()
fileName = '/etc/lsb-release'
if os.path.isfile( fileName ):
f = open( fileName, 'r' )
logINFO( 'Linux release:\n%s' % f.read().strip() )
f.close()
fileName = '/proc/cpuinfo'
if os.path.exists( fileName ):
f = open( fileName, 'r' )
cpu = f.readlines()
f.close()
nCPU = 0
for line in cpu:
if line.find( 'cpu MHz' ) == 0:
nCPU += 1
freq = line.split()[3]
elif line.find( 'model name' ) == 0:
CPUmodel = line.split( ': ' )[1].strip()
logINFO( 'CPU (model) = %s' % CPUmodel )
logINFO( 'CPU (MHz) = %s x %s' % ( nCPU, freq ) )
fileName = '/proc/meminfo'
if os.path.exists( fileName ):
f = open( fileName, 'r' )
mem = f.readlines()
f.close()
freeMem = 0
for line in mem:
if line.find( 'MemTotal:' ) == 0:
totalMem = int( line.split()[1] )
if line.find( 'MemFree:' ) == 0:
freeMem += int( line.split()[1] )
if line.find( 'Cached:' ) == 0:
freeMem += int( line.split()[1] )
logINFO( 'Memory (kB) = %s' % totalMem )
logINFO( 'FreeMem. (kB) = %s' % freeMem )
#
# Disk space check
#
fs = os.statvfs( rootPath )
# bsize; /* file system block size */
# frsize; /* fragment size */
# blocks; /* size of fs in f_frsize units */
# bfree; /* # free blocks */
# bavail; /* # free blocks for non-root */
# files; /* # inodes */
# ffree; /* # free inodes */
# favail; /* # free inodes for non-root */
# flag; /* mount flags */
# namemax; /* maximum filename length */
diskSpace = fs[4] * fs[0] / 1024 / 1024
logINFO( 'DiskSpace (MB) = %s' % diskSpace )
if diskSpace < cliParams.minDiskSpace:
logERROR( '%s MB < %s MB, not enough local disk space available, exiting'
% ( diskSpace, cliParams.minDiskSpace ) )
sys.exit( 1 )
#
# Get job CPU requirement and queue normalization
#
if cliParams.flavour in ['LCG','gLite','OSG']:
logINFO( 'CE = %s' % CE )
logINFO( 'LCG_SITE_CE = %s' % cliParams.ceName )
retCode, queueNormList = executeAndGetOutput( 'dirac-wms-get-queue-normalization %s' % CE )
if not retCode:
queueNormList = queueNormList.strip().split( ' ' )
if len( queueNormList ) == 2:
queueNorm = float( queueNormList[1] )
logINFO( 'Queue Normalization = %s SI00' % queueNorm )
if queueNorm:
# Update the local normalization factor: We are using seconds @ 250 SI00 = 1 HS06
# This is the ratio SpecInt published by the site over 250 (the reference used for Matching)
# os.system( "%s -f %s -o /LocalSite/CPUScalingFactor=%s" % ( cacheScript, cfgFile, queueNorm / 250. ) )
# os.system( "%s -f %s -o /LocalSite/CPUNormalizationFactor=%s" % ( cacheScript, cfgFile, queueNorm / 250. ) )
os.system( "%s -F -o /LocalSite/CPUScalingFactor=%s -o /LocalSite/CPUNormalizationFactor=%s" % ( configureScript,
queueNorm / 250.,
queueNorm / 250. ) )
else:
logERROR( 'Fail to get Normalization of the Queue' )
else:
logERROR( "There was an error calling dirac-wms-get-queue-normalization" )
retCode, queueLength = executeAndGetOutput( 'dirac-wms-get-normalized-queue-length %s' % CE )
if not retCode:
queueLength = queueLength.strip().split( ' ' )
if len( queueLength ) == 2:
cliParams.jobCPUReq = float( queueLength[1] )
logINFO( 'Normalized Queue Length = %s' % cliParams.jobCPUReq )
else:
logERROR( 'Failed to get Normalized length of the Queue' )
else:
logERROR( "There was an error calling dirac-wms-get-normalized-queue-length" )
# Instead of using the Average reported by the Site, determine a Normalization
os.system( "dirac-wms-cpu-normalization -U" )
#
# further local configuration
#
inProcessOpts = ['-s /Resources/Computing/CEDefaults' ]
inProcessOpts .append( '-o WorkingDirectory=%s' % rootPath )
inProcessOpts .append( '-o GridCE=%s' % cliParams.ceName )
if cliParams.flavour in ['LCG','gLite','OSG']:
inProcessOpts .append( '-o GridCEQueue=%s' % CE )
inProcessOpts .append( '-o LocalAccountString=%s' % localUser )
inProcessOpts .append( '-o TotalCPUs=%s' % 1 )
inProcessOpts .append( '-o MaxCPUTime=%s' % ( int( cliParams.jobCPUReq ) ) )
inProcessOpts .append( '-o CPUTime=%s' % ( int( cliParams.jobCPUReq ) ) )
inProcessOpts .append( '-o MaxRunningJobs=%s' % 1 )
# To prevent a wayward agent picking up and failing many jobs.
inProcessOpts .append( '-o MaxTotalJobs=%s' % 10 )
jobAgentOpts = [ '-o MaxCycles=%s' % cliParams.maxCycles ]
# jobAgentOpts.append( '-o CEUniqueID=%s' % JOB_AGENT_CE )
if cliParams.debug:
jobAgentOpts.append( '-o LogLevel=DEBUG' )
if cliParams.userGroup:
logINFO( 'Setting DIRAC Group to "%s"' % cliParams.userGroup )
inProcessOpts .append( '-o OwnerGroup="%s"' % cliParams.userGroup )
if cliParams.userDN:
logINFO( 'Setting Owner DN to "%s"' % cliParams.userDN )
inProcessOpts .append( '-o OwnerDN="%s"' % cliParams.userDN )
# Find any .cfg file uploaded with the sandbox
extraCFG = []
for i in os.listdir( rootPath ):
cfg = os.path.join( rootPath, i )
if os.path.isfile( cfg ) and re.search( '.cfg&', cfg ):
extraCFG.append( cfg )
#
# Start the job agent
#
logINFO( 'Starting JobAgent' )
os.environ['PYTHONUNBUFFERED'] = 'yes'
diracAgentScript = os.path.join( rootPath, "scripts", "dirac-agent" )
jobAgent = '%s WorkloadManagement/JobAgent %s %s %s' % ( diracAgentScript,
" ".join( jobAgentOpts ),
" ".join( inProcessOpts ),
" ".join( extraCFG ) )
logINFO( "JobAgent execution command:\n%s" % jobAgent )
if not cliParams.dryRun:
os.system( jobAgent )
fs = os.statvfs( rootPath )
# bsize; /* file system block size */
# frsize; /* fragment size */
# blocks; /* size of fs in f_frsize units */
# bfree; /* # free blocks */
# bavail; /* # free blocks for non-root */
# files; /* # inodes */
# ffree; /* # free inodes */
# favail; /* # free inodes for non-root */
# flag; /* mount flags */
# namemax; /* maximum filename length */
diskSpace = fs[4] * fs[0] / 1024 / 1024
logINFO( 'DiskSpace (MB) = %s' % diskSpace )
ret = os.system( 'dirac-proxy-info' )
# Do some cleanup
if os.environ.has_key( 'OSG_WN_TMP' ) and osgDir:
os.chdir( originalRootPath )
import shutil
shutil.rmtree( osgDir )
sys.exit( 0 )
|
SuperDIRAC/DIRAC
|
WorkloadManagementSystem/PilotAgent/dirac-pilot.py
|
Python
|
gpl-3.0
| 28,246
|
[
"DIRAC"
] |
26ffd822646934ee612a57e2f30bf9fc01c4be38d7fe59b019b115e383a1cb15
|
# coding: utf-8
"""
Vericred API
Vericred's API allows you to search for Health Plans that a specific doctor
accepts.
## Getting Started
Visit our [Developer Portal](https://developers.vericred.com) to
create an account.
Once you have created an account, you can create one Application for
Production and another for our Sandbox (select the appropriate Plan when
you create the Application).
## SDKs
Our API follows standard REST conventions, so you can use any HTTP client
to integrate with us. You will likely find it easier to use one of our
[autogenerated SDKs](https://github.com/vericred/?query=vericred-),
which we make available for several common programming languages.
## Authentication
To authenticate, pass the API Key you created in the Developer Portal as
a `Vericred-Api-Key` header.
`curl -H 'Vericred-Api-Key: YOUR_KEY' "https://api.vericred.com/providers?search_term=Foo&zip_code=11215"`
## Versioning
Vericred's API default to the latest version. However, if you need a specific
version, you can request it with an `Accept-Version` header.
The current version is `v3`. Previous versions are `v1` and `v2`.
`curl -H 'Vericred-Api-Key: YOUR_KEY' -H 'Accept-Version: v2' "https://api.vericred.com/providers?search_term=Foo&zip_code=11215"`
## Pagination
Endpoints that accept `page` and `per_page` parameters are paginated. They expose
four additional fields that contain data about your position in the response,
namely `Total`, `Per-Page`, `Link`, and `Page` as described in [RFC-5988](https://tools.ietf.org/html/rfc5988).
For example, to display 5 results per page and view the second page of a
`GET` to `/networks`, your final request would be `GET /networks?....page=2&per_page=5`.
## Sideloading
When we return multiple levels of an object graph (e.g. `Provider`s and their `State`s
we sideload the associated data. In this example, we would provide an Array of
`State`s and a `state_id` for each provider. This is done primarily to reduce the
payload size since many of the `Provider`s will share a `State`
```
{
providers: [{ id: 1, state_id: 1}, { id: 2, state_id: 1 }],
states: [{ id: 1, code: 'NY' }]
}
```
If you need the second level of the object graph, you can just match the
corresponding id.
## Selecting specific data
All endpoints allow you to specify which fields you would like to return.
This allows you to limit the response to contain only the data you need.
For example, let's take a request that returns the following JSON by default
```
{
provider: {
id: 1,
name: 'John',
phone: '1234567890',
field_we_dont_care_about: 'value_we_dont_care_about'
},
states: [{
id: 1,
name: 'New York',
code: 'NY',
field_we_dont_care_about: 'value_we_dont_care_about'
}]
}
```
To limit our results to only return the fields we care about, we specify the
`select` query string parameter for the corresponding fields in the JSON
document.
In this case, we want to select `name` and `phone` from the `provider` key,
so we would add the parameters `select=provider.name,provider.phone`.
We also want the `name` and `code` from the `states` key, so we would
add the parameters `select=states.name,states.code`. The id field of
each document is always returned whether or not it is requested.
Our final request would be `GET /providers/12345?select=provider.name,provider.phone,states.name,states.code`
The response would be
```
{
provider: {
id: 1,
name: 'John',
phone: '1234567890'
},
states: [{
id: 1,
name: 'New York',
code: 'NY'
}]
}
```
## Benefits summary format
Benefit cost-share strings are formatted to capture:
* Network tiers
* Compound or conditional cost-share
* Limits on the cost-share
* Benefit-specific maximum out-of-pocket costs
**Example #1**
As an example, we would represent [this Summary of Benefits & Coverage](https://s3.amazonaws.com/vericred-data/SBC/2017/33602TX0780032.pdf) as:
* **Hospital stay facility fees**:
- Network Provider: `$400 copay/admit plus 20% coinsurance`
- Out-of-Network Provider: `$1,500 copay/admit plus 50% coinsurance`
- Vericred's format for this benefit: `In-Network: $400 before deductible then 20% after deductible / Out-of-Network: $1,500 before deductible then 50% after deductible`
* **Rehabilitation services:**
- Network Provider: `20% coinsurance`
- Out-of-Network Provider: `50% coinsurance`
- Limitations & Exceptions: `35 visit maximum per benefit period combined with Chiropractic care.`
- Vericred's format for this benefit: `In-Network: 20% after deductible / Out-of-Network: 50% after deductible | limit: 35 visit(s) per Benefit Period`
**Example #2**
In [this other Summary of Benefits & Coverage](https://s3.amazonaws.com/vericred-data/SBC/2017/40733CA0110568.pdf), the **specialty_drugs** cost-share has a maximum out-of-pocket for in-network pharmacies.
* **Specialty drugs:**
- Network Provider: `40% coinsurance up to a $500 maximum for up to a 30 day supply`
- Out-of-Network Provider `Not covered`
- Vericred's format for this benefit: `In-Network: 40% after deductible, up to $500 per script / Out-of-Network: 100%`
**BNF**
Here's a description of the benefits summary string, represented as a context-free grammar:
```
root ::= coverage
coverage ::= (simple_coverage | tiered_coverage) (space pipe space coverage_modifier)?
tiered_coverage ::= tier (space slash space tier)*
tier ::= tier_name colon space (tier_coverage | not_applicable)
tier_coverage ::= simple_coverage (space (then | or | and) space simple_coverage)* tier_limitation?
simple_coverage ::= (pre_coverage_limitation space)? coverage_amount (space post_coverage_limitation)? (comma? space coverage_condition)?
coverage_modifier ::= limit_condition colon space (((simple_coverage | simple_limitation) (semicolon space see_carrier_documentation)?) | see_carrier_documentation | waived_if_admitted | shared_across_tiers)
waived_if_admitted ::= ("copay" space)? "waived if admitted"
simple_limitation ::= pre_coverage_limitation space "copay applies"
tier_name ::= "In-Network-Tier-2" | "Out-of-Network" | "In-Network"
limit_condition ::= "limit" | "condition"
tier_limitation ::= comma space "up to" space (currency | (integer space time_unit plural?)) (space post_coverage_limitation)?
coverage_amount ::= currency | unlimited | included | unknown | percentage | (digits space (treatment_unit | time_unit) plural?)
pre_coverage_limitation ::= first space digits space time_unit plural?
post_coverage_limitation ::= (((then space currency) | "per condition") space)? "per" space (treatment_unit | (integer space time_unit) | time_unit) plural?
coverage_condition ::= ("before deductible" | "after deductible" | "penalty" | allowance | "in-state" | "out-of-state") (space allowance)?
allowance ::= upto_allowance | after_allowance
upto_allowance ::= "up to" space (currency space)? "allowance"
after_allowance ::= "after" space (currency space)? "allowance"
see_carrier_documentation ::= "see carrier documentation for more information"
shared_across_tiers ::= "shared across all tiers"
unknown ::= "unknown"
unlimited ::= /[uU]nlimited/
included ::= /[iI]ncluded in [mM]edical/
time_unit ::= /[hH]our/ | (((/[cC]alendar/ | /[cC]ontract/) space)? /[yY]ear/) | /[mM]onth/ | /[dD]ay/ | /[wW]eek/ | /[vV]isit/ | /[lL]ifetime/ | ((((/[bB]enefit/ plural?) | /[eE]ligibility/) space)? /[pP]eriod/)
treatment_unit ::= /[pP]erson/ | /[gG]roup/ | /[cC]ondition/ | /[sS]cript/ | /[vV]isit/ | /[eE]xam/ | /[iI]tem/ | /[sS]tay/ | /[tT]reatment/ | /[aA]dmission/ | /[eE]pisode/
comma ::= ","
colon ::= ":"
semicolon ::= ";"
pipe ::= "|"
slash ::= "/"
plural ::= "(s)" | "s"
then ::= "then" | ("," space) | space
or ::= "or"
and ::= "and"
not_applicable ::= "Not Applicable" | "N/A" | "NA"
first ::= "first"
currency ::= "$" number
percentage ::= number "%"
number ::= float | integer
float ::= digits "." digits
integer ::= /[0-9]/+ (comma_int | under_int)*
comma_int ::= ("," /[0-9]/*3) !"_"
under_int ::= ("_" /[0-9]/*3) !","
digits ::= /[0-9]/+ ("_" /[0-9]/+)*
space ::= /[ \t]/+
```
OpenAPI spec version: 1.0.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import absolute_import
import os
import sys
import unittest
import vericred_client
from vericred_client.rest import ApiException
from vericred_client.models.zip_county_response import ZipCountyResponse
class TestZipCountyResponse(unittest.TestCase):
""" ZipCountyResponse unit test stubs """
def setUp(self):
pass
def tearDown(self):
pass
def testZipCountyResponse(self):
"""
Test ZipCountyResponse
"""
model = vericred_client.models.zip_county_response.ZipCountyResponse()
if __name__ == '__main__':
unittest.main()
|
vericred/vericred-python
|
test/test_zip_county_response.py
|
Python
|
apache-2.0
| 10,047
|
[
"VisIt"
] |
2f59c5dbf7b13875eb4044e9f3786601a5cdc06ba5ad04eef9225b7fab584214
|
import math
import operator
from collections import defaultdict
from numpy.core.multiarray import zeros
from tabfile import TabFile
class CharDict(defaultdict):
"""
CharDict is a generalized dictionary that inherits defaultdict.
default values are 0. Accepts any keys.
"""
def __init__(self, dict={}):
def returnZero():
return 0
super(CharDict,self).__init__(returnZero, dict)
def __add__(self, x, y):
z = CharDict()
for k in set(x.keys() + y.keys()):
z[k] = x[k] + y[k]
return z
def uncertainty(self):
"""
calculates the uncertainty H in this position
(specified by CharDict).
reats 0*log(0) as 0
"""
H = 0
for pN in self.itervalues():
if pN==0: H = 0
else: H = -pN * math.log(pN, 2)
return H
class PositionWeightMatrix(object):
"""
Stores counts of nucleotide bases at each position. objects are immutable.
sequences may be added to the counts, but the object may not be modified
in situ
"""
def __init__(self,n=None):
raise DeprecationWarning("Use Bio.Motif. PositionWeightMatrix will be \
removed soon")
self._errN = 'n must be a positive integer'
if not n==None: self._initialize_positions(n)
self._is_probs=False
self._is_Ri=False
def _initialize_positions(self,n):
self._L = []
if type(n)==int:
if n > 0:
self._n = n
for dummyVariable in range(self._n):
self._L.append( CharDict() )
else: raise ValueError(self._errN)
else: raise TypeError(self._errN)
def __add__(self, x,y):
n = x._n
if n == y._n:
z = PositionWeightMatrix(n)
for i in range(n):
z[i] = x[i] + y[i]
else: raise ValueError('PositionWeightMatrix objects are not the same \
length (number of positions)')
return z
def __getitem__(self, y):
return self._L[y]
def __len__(self):
return len(self._L)
def count_file(self, seqsFile, n=0):
"""uses a tabFile with a list of sequences, in column n (by default n=0, the first column) and extracts counts"""
if self._is_probs: raise UserWarning('Already converted to probabilities')
# open file
seqsFile.open()
# read first sequence and set siteLength
rows = (row for row in seqsFile)
row = rows.next()
site = row[n]
siteLength = len(site)
self._initialize_positions(siteLength)
# initialize the object
for i in range(self._n): self._L[i][ site[i].upper() ] += 1
# read remaining sequences
while True:
try:
row = rows.next()
site = row[n]
except StopIteration: break
if len(site)==siteLength:
for i in range(self._n): self._L[i][ site[i].upper() ] += 1
else:
# clean up
del self._L
del self._n
seqsFile.close()
raise ValueError('One of the sequences you are trying to add is not the correct length ('+str(self._n)+'): '+site)
self._n = siteLength
def count_seqs(self, L, debug=False):
"""adds a list of sequences to the counts"""
if self._is_probs: raise UserWarning('Already converted to probabilities')
firstSite = True
n = 0
m = 0
for site in L:
n += 1
if n%(10**6)==0:
m += 1
if debug: print str(n)
if firstSite:
siteLength=len(site)
self._initialize_positions(siteLength)
firstSite = False
for i in range(self._n):
if len(site)==siteLength: self._L[i][ site[i].upper() ] += 1
else:
# clean up
del self._L
del self._n
raise ValueError('One of the sequences you are trying to add is not the correct length ('+str(self._n)+'): '+site)
def import_from_MEME(self,filename,n=1,mode='biotools'):
"""imports a motif from the output of MEME (meme.txt)
if there are multiple motifs in the output, we will use motif n (the first is n=1, which is also the default)
"""
import Bio.Motif.Parsers.MEME as MEME
f = open(filename)
MEME_object = MEME.read(f)
motif_name = 'Motif ' + str(n)
biopython_motif = MEME_object.get_motif_by_name(motif_name)
if mode=='biopython': return biopython_motif
if mode=='biotools':
internal_n = len(biopython_motif)
# this next line is instead of initializePositions
biotools_motif = [CharDict(biopython_motif[i]) for i in range(internal_n)]
self._L = biotools_motif
self._n = internal_n
else: raise UserWarning('Not a valid mode.')
def rc(self):
"""returns the reverse complement of this object"""
new = PositionWeightMatrix(self._n)
# complement the object
for i in range(self._n):
for base in self._L[i].keys():
complement = {'A': 'T', 'C': 'G', 'G': 'C', 'T': 'A'}
if complement.has_key(base): new[i][complement[base]] = self._L[i][base]
else: new[i][base] = self._L[i][base]
new._L.reverse() # reverse the object
return new
def __repr__(self):
return self._L.__repr__()
def make_probs(self,trueProbs=False):
"""normalizes everything to 1"""
if self._is_Ri:
for x in self._L:
for k in x.keys():
x[k] = 2**(x[k]-2)
self._is_Ri = False
else:
for x in self._L:
total = sum( x.values() )
zeros = x.values().count(0)
if trueProbs or zeros==0:
for k in x.keys():
x[k] = float(x[k]) / total
else:
# fake one occurrence
total += 1
for k in x.keys():
if x[k]==0: x[k]=1
x[k] = float(x[k]) / total
self._is_probs = True
def make_Ri(self):
"""changes from counts or probabilities to Ri, information content"""
if not self._is_Ri:
if not self._is_probs: self.make_probs()
for p in self._L:
for k in p.keys():
p[k] = 2+math.log(p[k],2)
self._is_probs = False
self._is_Ri = True
else:
print 'Already Ri'
def seq_Ri(self,s):
"""seqRi returns the information content Ri in bits of a sequences, as measured with the given positionmatrix"""
if not self._is_Ri: self.makeRi()
Ri = 0
if len(s) != self._n:
raise UserWarning('Cannot evaluate a sequence which is not the exact length of the position matrix')
for x in range(self._n): Ri += self[x][s[x].upper()]
return Ri
def uncertainty(self):
"""returns the uncertainty H(l) of the matrix as a list. Use sum() for the total uncertainty.
Note: this function calls uncertainty() from the baseDict instance, and as such it can be overwritten implicitly. baseDict.uncertainty() treats 0*log(0) as 0"""
if not self._is_probs: self.make_probs()
return [position.uncertainty() for position in self]
def Rs(self):
"""returns the Schneider Rs value, which is the expectation of Ri over all possible sequences, calculated as the sum of 2-uncertainty."""
if not self._is_probs: self.make_probs()
return sum([2 - position.uncertainty() for position in self])
def KL(p,q):
"""returns a list of the KL divergence (relative entropy) at each position from positionmatrix p to positionmatrix q. use sum() for the sum"""
if not len(p)==len(q): raise SyntaxError('Length of p and q must be the same, instead length of p is ' + len(p) + ' and length of q is ' + len(q))
else: n = len(p)
for i in xrange(n):
KLi = 0
for j in ['A','G','T','C']:
KLi += p[i][j] * math.log(p[i][j] / q[i][j], 2)
KL.append(KLi)
return KL
# requires numpy, maybe relax requierment?
# needs to return an mmMatrix object
# needs to be able to save to file
# needs to be able to make and save image
def joint_matrix(sites):
"""takes as input a filename and returns the joint Rate matrix
for the list of sequences contained in that file
Joint rates R(X;Y_ are defined as
R(X;Y) = - sum over X,Y p(x,y) * I(X;Y)
I(X;y) = - sum over X,Y p(x,y) * log2[p(x,y)/(p(x)p(y))]
"""
bases = ['A','C','G','T']
indexDictionary = {} # the index dictionary
for i in range(4):
for j in range(4):
ssPair = bases[i] + bases[j]
indexDictionary[ssPair]=i,j
site_length = len(sites[0])
# initialize the matrix
di_counts = zeros([site_length,site_length],dtype='(4,4)int')
def add_seq(m,s,n,b):
"""adds the dinucleotide counts from one sequence to the mm_matrix (an array, passed by refence). requires the length n"""
for i in range(n):
for j in range(n):
m[i,j][ b[s[i]+s[j]] ] += 1
# count pairs over every sequence
for site in sites:
add_seq(di_counts,site.upper(),site_length,indexDictionary)
# convert to probabilities
di_probs = zeros([site_length,site_length],dtype='(4,4)float')
total_seqs = di_counts[0,0].sum()
for i in range(site_length):
for j in range(site_length):
for ii in range(4):
for jj in range(4):
di_probs[i,j][ii,jj] = di_counts[i,j][ii,jj] / float(total_seqs)
mm_matrix = zeros([site_length,site_length],dtype='float')
for i in range(site_length):
for j in range(site_length):
# sum over all dinucleotide combinations
pM = di_probs[i,j]
# Determine Iij
Iij = 0.0
for x in range(4):
for y in range(4):
px = pM[x,:].sum()
py = pM[:,y].sum()
pxy = pM[x,y]
if any([pxy==0, py==0, px==0]): continue
Iij += pxy * math.log(pxy/px/py, 2)
# Determine Rij
Rij = 0.0
for x in range(4):
for y in range(4):
pxy = pM[x, y]
Rij -= pxy * Iij
mm_matrix[i][j] = Rij
return (di_counts, di_probs, mm_matrix)
def spacerGC(L, spacerOffset=6, spacerLength=3):
"""
spacerGC takes as input a list of [15 bp GBSs (strings)] and
returns the number of sequences that have 0,1,2,3 G/Cs in the 3 bp spacer
as an array in that order
"""
# gc counts of 0, 1, 2, 3
GCcounts = [0, 0, 0, 0]
for s in L:
spacer = s[0][spacerOffset:spacerOffset+spacerLength].upper()
GCs = spacer.count('C') + spacer.count('G')
GCcounts[GCs] += 1
return GCcounts
def center_region(f, max_dist=75, motif_length=17):
"""returns a function that specifies whether a given motif is in +/-
x bp from the peak_center
requires the tabFile object f to determine the indices properly
"""
column_dict = f.column_dict()
peak_summit = column_dict['peak_summit']
for offset_name in ('motif_offset', 'offset', 'max_offset'):
if column_dict.has_key(offset_name):
site_offset = column_dict[offset_name]
break
return lambda x: int(x[peak_summit]) > (int(x[site_offset]) -
max_dist) and \
int(x[peak_summit]) < (int(x[site_offset]) +
max_dist - motif_length)
def count_spacers_from_info(foo, cutoff=None, region_rule=None,
region_width=None, spacer_offset=8, spacer_length=3, output_file=None):
"""
count spacers from a .sites.info or .peaks.info file
optionally you may supply
cutoff, a minimum cutoff (float or int)
region_rule, a function that selects the column
"""
input_file = TabFile(foo, colNames=True)
rows = (x for x in input_file)
conditions = [lambda x: x[7].isalpha, # col 7 is a sequence
lambda x: x[7] is not '-', # col 7 is not -
lambda x: x[7] is not 'NA', # col 7 is not NA
lambda x: x[7].strip() is not '-'] # col 7 is not missing
if cutoff is not None:
conditions.append(lambda x: float(x[4])>cutoff)
if region_rule is 'center_region':
if region_width is not None:
conditions.append(center_region(input_file,
max_dist=region_width/2))
else:
conditions.append(center_region(input_file,
max_dist=75))
elif region_rule is not None:
conditions.append(lambda x: region_rule(x))
selected_rows = (x[7].upper() for x in rows if
all([f(x) for f in conditions]))
spacers = CharDict(dict)
for s in selected_rows:
if not s== '-' and not s == 'NA':
spacer = s[spacer_offset:spacer_offset + spacer_length].upper()
spacers[spacer] += 1
if output_file is None:
output_file = raw_input('Output file name: ')
with TabFile(output_file, 'w') as f:
f.write_table(sorted(spacers.iteritems(),
key=operator.itemgetter(1), reverse=True))
return spacers
def count_letters(L):
n=xrange(len(L[0]))
counts = []
for j in n:
counts.append(CharDict())
for x in L:
for j in n:
counts[j][x[j]]+=1
return counts
|
benjschiller/seriesoftubes
|
bioplus/motif.py
|
Python
|
artistic-2.0
| 13,949
|
[
"Biopython"
] |
1c5a73df924f767974f9df17d6469e1b7ebe3d28f4b09a6041811835003c981d
|
#! /usr/bin/env python
from ppclass import pp
########
## ppclass allows for getting fields very easily from a netcdf file
## ... this is contained in the "f" attributes of the pp object
## ... and the "l" attributes of the pp object contains useful information
## ... two methods getf() and getfl() are helpful shortcuts
###########################################
## 1 --> an unique and straightforward request
## --> very easy ! see also minimal_field.py
##############################################
icetot = pp(file="/home/aymeric/Big_Data/DATAPLOT/diagfired.nc",var="icetot",t=0.5).getf()
print "icetot", icetot[10,10]
## 2 --> simple multiple request, no labelling
##############################################
test = pp(file="/home/aymeric/Big_Data/DATAPLOT/diagfired.nc",t=0.5)
test.var = ["mtot","icetot"]
allf = test.getf() # or allf = test.get().f
##
mtot = allf[0]
icetot = allf[1]
print "mtot", mtot[10,10]
print "icetot", icetot[10,10]
## 3 --> complex multiple requests and labelling
################################################
test = pp(file="/home/aymeric/Big_Data/DATAPLOT/diagfired.nc")
test.var = ["mtot","icetot"]
test.t = [0.4,0.5]
allf,lab = test.getfl()
##
icetot04 = allf[lab.index("_v=icetot_t=0.4_")]
mtot04 = allf[lab.index("_v=mtot_t=0.4_")]
icetot05 = allf[lab.index("_v=icetot_t=0.5_")]
mtot05 = allf[lab.index("_v=mtot_t=0.5_")]
print "mtot04", mtot04[10,10]
print "icetot04", icetot04[10,10]
print "mtot05", mtot05[10,10]
print "icetot05", icetot05[10,10]
## 4 --> an example of complete labelling
## .... a rather unlikely example ....
## .... but shows label ordering ....
#########################################
test = pp()
test.file = ["/home/aymeric/Big_Data/DATAPLOT/diagfired.nc","/home/aymeric/Big_Data/DATAPLOT/diagfired.nc"]
test.var = ["u","v"]
test.x = [10.,20.]
test.y = [10.,20.]
test.z = [10.,20.]
test.t = [0.4,0.5]
print "... please wait. this one is a bit stupid..."
allf,lab = test.getfl()
## note label ordering: file -- var -- x -- y -- z -- t
l1 = "_f=#2_v=u_x=10.0_y=10.0_z=20.0_t=0.4_"
l2 = "_f=#2_v=v_x=10.0_y=10.0_z=20.0_t=0.4_"
u_example = allf[lab.index(l1)]
v_example = allf[lab.index(l2)]
print l1, u_example
print l2, v_example
|
aymeric-spiga/planetoplot
|
examples/ppclass_additional/easy_get_field.py
|
Python
|
gpl-2.0
| 2,209
|
[
"NetCDF"
] |
64a41c77f1b2b64d927f43e68de21c9c0d528ea65650db0234852ed0a8dd013b
|
# -*- coding: utf-8 -*-
"""
Created on Tue Aug 30 08:57:31 2016
@author: nicholas
"""
import sys
import logging
import os
import unittest
from riboSeed.riboSeed import NgsLib, nonify_empty_lib_files
logger = logging
@unittest.skipIf((sys.version_info[0] != 3) or (sys.version_info[1] < 5),
"Subprocess.call among other things wont run if tried " +
" with less than python 3.5")
class NgsLibTest(unittest.TestCase):
""" tests for riboSeed.py
"""
def setUp(self):
self.test_dir = os.path.join(os.path.dirname(__file__),
"output_NgsLib_tests")
self.ref_dir = os.path.join(os.path.dirname(__file__), "references")
self.ref_fasta = os.path.join(self.test_dir,
'cluster1.fasta')
self.ref_Ffastq = os.path.join(self.ref_dir,
'toy_reads1.fq')
self.ref_Rfastq = os.path.join(self.ref_dir,
'toy_reads2.fq')
self.smalt_exe = "smalt"
self.bwa_exe = "bwa"
self.to_be_removed = []
if not os.path.exists(self.test_dir):
os.makedirs(self.test_dir, exist_ok=True)
def test_NgsLib(self):
""" Can we create an NgsLib object correctly
"""
# make a non-master object
testlib_pe_s = NgsLib(
name="test",
master=False,
readF=self.ref_Ffastq,
readR=self.ref_Rfastq,
readS0="dummy",
ref_fasta=self.ref_fasta,
mapper_exe=self.smalt_exe)
testlib_s = NgsLib(
name="test",
master=True,
readF=None,
readR=None,
readS0=self.ref_Ffastq,
ref_fasta=self.ref_fasta,
mapper_exe=self.smalt_exe)
self.assertEqual(testlib_s.libtype, "s_1")
self.assertEqual(testlib_s.readlen, 145.0)
self.assertEqual(testlib_s.liblist, [self.ref_Ffastq])
# test unnamed fails
with self.assertRaises(ValueError):
NgsLib(
name=None,
master=False,
readF=self.ref_Ffastq,
readR=self.ref_Rfastq,
readS0="dummy",
ref_fasta=self.ref_fasta,
mapper_exe=self.smalt_exe)
self.assertEqual(testlib_pe_s.libtype, "pe_s")
self.assertEqual(testlib_pe_s.readlen, None)
# test fails with singe PE file
with self.assertRaises(ValueError):
NgsLib(
name=None,
master=False,
readF=self.ref_Ffastq,
readR=None,
readS0="dummy",
ref_fasta=self.ref_fasta,
mapper_exe=self.smalt_exe)
with self.assertRaises(ValueError):
NgsLib(
name=None,
master=False,
readF=self.ref_Ffastq,
readR=None,
readS0=None,
ref_fasta=self.ref_fasta,
mapper_exe=self.smalt_exe)
# check master files cannot bge deleted
testlib_pe = NgsLib(
name="test",
master=True,
make_dist=False,
readF=self.ref_Ffastq,
readR=self.ref_Rfastq,
ref_fasta=self.ref_fasta,
mapper_exe=self.smalt_exe,
logger=logger)
self.assertEqual(
1, # return code for no deleting tool place
testlib_pe.purge_old_files(master=testlib_pe_s, logger=logger))
self.assertTrue(os.path.isfile(self.ref_Ffastq))
self.assertTrue(os.path.isfile(self.ref_Rfastq))
# test killer lib that tries to purge files that are in a master ob
testlib_killer = NgsLib(
name="test",
master=False,
make_dist=False,
readF=self.ref_Ffastq,
readR=self.ref_Rfastq,
ref_fasta=self.ref_fasta,
mapper_exe=self.smalt_exe,
logger=logger)
self.assertEqual(
1, # return code for no deleting tool place
testlib_killer.purge_old_files(master=testlib_pe_s, logger=logger))
self.assertTrue(os.path.isfile(self.ref_Ffastq))
self.assertTrue(os.path.isfile(self.ref_Rfastq))
def test_dont_check_nonmaster_read_len(self):
testlib_pe = NgsLib(
name="test",
master=False,
make_dist=False,
readF=self.ref_Ffastq,
readR=self.ref_Rfastq,
ref_fasta=self.ref_fasta,
mapper_exe=self.smalt_exe,
logger=logger)
self.assertEqual(testlib_pe.readlen, None)
def test_lib_check(self):
""" does the NgsLib identify empty libraries
"""
empty_file = os.path.join(self.test_dir, "test_not_real_file")
# make an empty file
with open(empty_file, 'w') as ef:
pass
ngs_ob = NgsLib(
name="test",
master=False,
readF=self.ref_Ffastq,
readR=empty_file,
ref_fasta=self.ref_fasta,
mapper_exe=self.smalt_exe)
nonify_empty_lib_files(ngsLib=ngs_ob, logger=logger)
self.assertTrue(ngs_ob.readR is None)
self.to_be_removed.append(empty_file)
def test_single_lib(self):
testlib_s = NgsLib(
name="test",
master=True,
readF=None,
readR=None,
readS0=self.ref_Ffastq,
ref_fasta=self.ref_fasta,
mapper_exe=self.smalt_exe)
self.assertEqual(testlib_s.libtype, "s_1")
def tearDown(self):
"""
"""
for filename in self.to_be_removed:
os.unlink(filename)
pass
pass
if __name__ == '__main__':
unittest.main()
|
nickp60/riboSeed
|
tests/test_NgsLib.py
|
Python
|
mit
| 5,902
|
[
"BWA"
] |
218db59aa5dbf84dca43830fd2dfde68c7bfa2ce9ffe43b24ccec48ace5cafb2
|
from django.contrib import admin
from collections import OrderedDict
from edc.export.actions import export_as_csv_action
from edc.subject.appointment.admin import BaseAppointmentModelAdmin
from bhp074.apps.eit_lab.models import MaternalRequisition
from ..models import MaternalVisit
from ..forms import MaternalVisitForm
class MaternalVisitAdmin(BaseAppointmentModelAdmin):
form = MaternalVisitForm
visit_model_instance_field = 'maternal_visit'
requisition_model = MaternalRequisition
dashboard_type = 'maternal'
list_display = (
'appointment',
'report_datetime',
'reason',
# "info_source",
'created',
'user_created',
)
list_filter = (
'report_datetime',
'reason',
# 'appointment__appt_status',
'appointment__visit_definition__code',
)
search_fields = (
'appointment__registered_subject__subject_identifier',
'appointment__registered_subject__registration_identifier',
'appointment__registered_subject__first_name',
'appointment__registered_subject__identity',
)
fields = (
"appointment",
"report_datetime",
# "info_source",
# "info_source_other",
"reason",
"reason_missed",
# 'survival_status',
# 'date_last_alive',
"comments",
)
actions = [export_as_csv_action(description="CSV Export of Maternal Visit",
fields=[],
delimiter=',',
exclude=['created', 'modified', 'user_created',
'user_modified', 'revision', 'id',
'hostname_created', 'hostname_modified'],
extra_fields=OrderedDict(
{'subject_identifier': 'appointment__registered_subject__subject_identifier',
'gender': 'appointment__registered_subject__gender',
'dob': 'appointment__registered_subject__dob',
'registered': 'appointment__registered_subject__registration_datetime'}),
)]
admin.site.register(MaternalVisit, MaternalVisitAdmin)
|
botswana-harvard/edc-bhp074
|
bhp074/apps/eit_maternal/admin/maternal_visit_admin.py
|
Python
|
gpl-2.0
| 2,145
|
[
"VisIt"
] |
dd565972b4976c02215f25ae89430bdb832f82d8e156d049e32206a697d81458
|
# -*- Mode: python; tab-width: 4; indent-tabs-mode:nil; coding:utf-8 -*-
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4
#
# MDAnalysis --- https://www.mdanalysis.org
# Copyright (c) 2006-2017 The MDAnalysis Development Team and contributors
# (see the file AUTHORS for the full list of names)
#
# Released under the GNU Public Licence, v2 or any higher version
#
# Please cite your use of MDAnalysis in published work:
#
# R. J. Gowers, M. Linke, J. Barnoud, T. J. E. Reddy, M. N. Melo, S. L. Seyler,
# D. L. Dotson, J. Domanski, S. Buchoux, I. M. Kenney, and O. Beckstein.
# MDAnalysis: A Python package for the rapid analysis of molecular dynamics
# simulations. In S. Benthall and S. Rostrup editors, Proceedings of the 15th
# Python in Science Conference, pages 102-109, Austin, TX, 2016. SciPy.
# doi: 10.25080/majora-629e541a-00e
#
# N. Michaud-Agrawal, E. J. Denning, T. B. Woolf, and O. Beckstein.
# MDAnalysis: A Toolkit for the Analysis of Molecular Dynamics Simulations.
# J. Comput. Chem. 32 (2011), 2319--2327, doi:10.1002/jcc.21787
#
"""
Picklable read-only I/O classes --- :mod:`MDAnalysis.lib.picklable_file_io`
===========================================================================
Provide with an interface for pickling read-only IO file object.
These classes are used for further pickling :class:`MDAnalysis.core.universe`
in a object composition approach.
.. autoclass:: FileIOPicklable
:members:
.. autoclass:: BufferIOPicklable
:members:
.. autoclass:: TextIOPicklable
:members:
.. autoclass:: BZ2Picklable
:members:
.. autoclass:: GzipPicklable
:members:
.. autofunction:: pickle_open
.. autofunction:: bz2_pickle_open
.. autofunction:: gzip_pickle_open
.. versionadded:: 2.0.0
"""
import io
import os
import bz2
import gzip
class FileIOPicklable(io.FileIO):
"""File object (read-only) that can be pickled.
This class provides a file-like object (as returned by :func:`open`,
namely :class:`io.FileIO`) that, unlike standard Python file objects,
can be pickled. Only read mode is supported.
When the file is pickled, filename and position of the open file handle in
the file are saved. On unpickling, the file is opened by filename,
and the file is seeked to the saved position.
This means that for a successful unpickle, the original file still has to
be accessible with its filename.
Note
----
This class only supports reading files in binary mode. If you need to open
a file in text mode, use the :func:`pickle_open`.
Parameters
----------
name : str
either a text or byte string giving the name (and the path
if the file isn't in the current working directory) of the file to
be opened.
mode : str
only reading ('r') mode works. It exists to be consistent
with a wider API.
Example
-------
::
>>> file = FileIOPicklable(PDB)
>>> file.readline()
>>> file_pickled = pickle.loads(pickle.dumps(file))
>>> print(file.tell(), file_pickled.tell())
55 55
See Also
---------
TextIOPicklable
BufferIOPicklable
.. versionadded:: 2.0.0
"""
def __init__(self, name, mode='r'):
self._mode = mode
super().__init__(name, mode)
def __getstate__(self):
if self._mode != 'r':
raise RuntimeError("Can only pickle files that were opened "
"in read mode, not {}".format(self._mode))
return self.name, self.tell()
def __setstate__(self, args):
name = args[0]
super().__init__(name, mode='r')
self.seek(args[1])
class BufferIOPicklable(io.BufferedReader):
"""A picklable buffer object for read-only FileIO object.
This class provides a buffered :class:`io.BufferedReader`
that can be pickled.
Note that this only works in read mode.
Parameters
----------
raw : FileIO object
Example
-------
::
file = FileIOPicklable('filename')
buffer_wrapped = BufferIOPicklable(file)
See Also
---------
FileIOPicklable
TextIOPicklable
.. versionadded:: 2.0.0
"""
def __init__(self, raw):
super().__init__(raw)
self.raw_class = raw.__class__
def __getstate__(self):
return self.raw_class, self.name, self.tell()
def __setstate__(self, args):
raw_class = args[0]
name = args[1]
raw = raw_class(name)
super().__init__(raw)
self.seek(args[2])
class TextIOPicklable(io.TextIOWrapper):
"""Character and line based picklable file-like object.
This class provides a file-like :class:`io.TextIOWrapper` object that can
be pickled. Note that this only works in read mode.
Note
----
After pickling, the current position is reset. `universe.trajectory[i]` has
to be used to return to its original frame.
Parameters
----------
raw : FileIO object
Example
-------
::
file = FileIOPicklable('filename')
text_wrapped = TextIOPicklable(file)
See Also
---------
FileIOPicklable
BufferIOPicklable
.. versionadded:: 2.0.0
"""
def __init__(self, raw):
super().__init__(raw)
self.raw_class = raw.__class__
def __getstate__(self):
try:
name = self.name
except AttributeError:
# This is kind of ugly--BZ2File does not save its name.
name = self.buffer._fp.name
return self.raw_class, name
def __setstate__(self, args):
raw_class = args[0]
name = args[1]
# raw_class is used for further expansion this functionality to
# Gzip files, which also requires a text wrapper.
raw = raw_class(name)
super().__init__(raw)
class BZ2Picklable(bz2.BZ2File):
"""File object (read-only) for bzip2 (de)compression that can be pickled.
This class provides a file-like object (as returned by :func:`bz2.open`,
namely :class:`bz2.BZ2File`) that, unlike standard Python file objects,
can be pickled. Only read mode is supported.
When the file is pickled, filename and position of the open file handle in
the file are saved. On unpickling, the file is opened by filename,
and the file is seeked to the saved position.
This means that for a successful unpickle, the original file still has to
be accessible with its filename.
Note
----
This class only supports reading files in binary mode. If you need to open
to open a compressed file in text mode, use :func:`bz2_pickle_open`.
Parameters
----------
name : str
either a text or byte string giving the name (and the path
if the file isn't in the current working directory) of the file to
be opened.
mode : str
can only be 'r', 'rb' to make pickle work.
Example
-------
::
>>> file = BZ2Picklable(XYZ_bz2)
>>> file.readline()
>>> file_pickled = pickle.loads(pickle.dumps(file))
>>> print(file.tell(), file_pickled.tell())
5 5
See Also
---------
FileIOPicklable
BufferIOPicklable
TextIOPicklable
GzipPicklable
.. versionadded:: 2.0.0
"""
def __init__(self, name, mode='rb'):
self._bz_mode = mode
super().__init__(name, mode)
def __getstate__(self):
if not self._bz_mode.startswith('r'):
raise RuntimeError("Can only pickle files that were opened "
"in read mode, not {}".format(self._bz_mode))
return self._fp.name, self.tell()
def __setstate__(self, args):
super().__init__(args[0])
self.seek(args[1])
class GzipPicklable(gzip.GzipFile):
"""Gzip file object (read-only) that can be pickled.
This class provides a file-like object (as returned by :func:`gzip.open`,
namely :class:`gzip.GzipFile`) that, unlike standard Python file objects,
can be pickled. Only read mode is supported.
When the file is pickled, filename and position of the open file handle in
the file are saved. On unpickling, the file is opened by filename,
and the file is seeked to the saved position.
This means that for a successful unpickle, the original file still has to
be accessible with its filename.
Note
----
This class only supports reading files in binary mode. If you need to open
to open a compressed file in text mode, use the :func:`gzip_pickle_open`.
Parameters
----------
name : str
either a text or byte string giving the name (and the path
if the file isn't in the current working directory) of the file to
be opened.
mode : str
can only be 'r', 'rb' to make pickle work.
Example
-------
::
>>> file = GzipPicklable(MMTF_gz)
>>> file.readline()
>>> file_pickled = pickle.loads(pickle.dumps(file))
>>> print(file.tell(), file_pickled.tell())
1218 1218
See Also
---------
FileIOPicklable
BufferIOPicklable
TextIOPicklable
BZ2Picklable
.. versionadded:: 2.0.0
"""
def __init__(self, name, mode='rb'):
self._gz_mode = mode
super().__init__(name, mode)
def __getstate__(self):
if not self._gz_mode.startswith('r'):
raise RuntimeError("Can only pickle files that were opened "
"in read mode, not {}".format(self._gz_mode))
return self.name, self.tell()
def __setstate__(self, args):
super().__init__(args[0])
self.seek(args[1])
def pickle_open(name, mode='rt'):
"""Open file and return a stream with pickle function implemented.
This function returns a FileIOPicklable object wrapped in a
BufferIOPicklable class when given the "rb" reading mode,
or a FileIOPicklable object wrapped in a TextIOPicklable class with the "r"
or "rt" reading mode. It can be used as a context manager, and replace the
built-in :func:`open` function in read mode that only returns an
unpicklable file object.
In order to serialize a :class:`MDAnalysis.core.Universe`, this function
can used to open trajectory/topology files. This object composition is more
flexible and easier than class inheritance to implement pickling
for new readers.
Note
----
Can be only used with read mode.
Parameters
----------
name : str
either a text or byte string giving the name (and the path
if the file isn't in the current working directory) of the file to
be opened.
mode: {'r', 'rt', 'rb'} (optional)
'r': open for reading in text mode;
'rt': read in text mode (default);
'rb': read in binary mode;
Returns
-------
stream-like object: BufferIOPicklable or TextIOPicklable
when mode is 'r' or 'rt', returns TextIOPicklable;
when mode is 'rb', returns BufferIOPicklable
Raises
------
ValueError
if `mode` is not one of the allowed read modes
Examples
-------
open as context manager::
with pickle_open('filename') as f:
line = f.readline()
open as function::
f = pickle_open('filename')
line = f.readline()
f.close()
See Also
--------
:func:`MDAnalysis.lib.util.anyopen`
:func:`io.open`
.. versionadded:: 2.0.0
"""
if mode not in {'r', 'rt', 'rb'}:
raise ValueError("Only read mode ('r', 'rt', 'rb') "
"files can be pickled.")
name = os.fspath(name)
raw = FileIOPicklable(name)
if mode == 'rb':
return BufferIOPicklable(raw)
elif mode in {'r', 'rt'}:
return TextIOPicklable(raw)
def bz2_pickle_open(name, mode='rb'):
"""Open a bzip2-compressed file in binary or text mode
with pickle function implemented.
This function returns a BZ2Picklable object when given the "rb" or "r"
reading mode, or a BZ2Picklable object wrapped in a TextIOPicklable class
with the "rt" reading mode.
It can be used as a context manager, and replace the built-in
:func:`bz2.open` function in read mode that only returns an
unpicklable file object.
Note
----
Can be only used with read mode.
Parameters
----------
name : str
either a text or byte string giving the name (and the path
if the file isn't in the current working directory) of the file to
be opened.
mode: {'r', 'rt', 'rb'} (optional)
'r': open for reading in binary mode;
'rt': read in text mode;
'rb': read in binary mode; (default)
Returns
-------
stream-like object: BZ2Picklable or TextIOPicklable
when mode is 'rt', returns TextIOPicklable;
when mode is 'r' or 'rb', returns BZ2Picklable
Raises
------
ValueError
if `mode` is not one of the allowed read modes
Examples
-------
open as context manager::
with bz2_pickle_open('filename') as f:
line = f.readline()
open as function::
f = bz2_pickle_open('filename')
line = f.readline()
f.close()
See Also
--------
:func:`io.open`
:func:`bz2.open`
:func:`MDAnalysis.lib.util.anyopen`
:func:`MDAnalysis.lib.picklable_file_io.pickle_open`
:func:`MDAnalysis.lib.picklable_file_io.gzip_pickle_open`
.. versionadded:: 2.0.0
"""
if mode not in {'r', 'rt', 'rb'}:
raise ValueError("Only read mode ('r', 'rt', 'rb') "
"files can be pickled.")
bz_mode = mode.replace("t", "")
binary_file = BZ2Picklable(name, bz_mode)
if "t" in mode:
return TextIOPicklable(binary_file)
else:
return binary_file
def gzip_pickle_open(name, mode='rb'):
"""Open a gzip-compressed file in binary or text mode
with pickle function implemented.
This function returns a GzipPicklable object when given the "rb" or "r"
reading mode, or a GzipPicklable object wrapped in a TextIOPicklable class
with the "rt" reading mode.
It can be used as a context manager, and replace the built-in
:func:`gzip.open` function in read mode that only returns an
unpicklable file object.
Note
----
Can be only used with read mode.
Parameters
----------
name : str
either a text or byte string giving the name (and the path
if the file isn't in the current working directory) of the file to
be opened.
mode: {'r', 'rt', 'rb'} (optional)
'r': open for reading in binary mode;
'rt': read in text mode;
'rb': read in binary mode; (default)
Returns
-------
stream-like object: GzipPicklable or TextIOPicklable
when mode is 'rt', returns TextIOPicklable;
when mode is 'r' or 'rb', returns GzipPicklable
Raises
------
ValueError
if `mode` is not one of the allowed read modes
Examples
-------
open as context manager::
with gzip_pickle_open('filename') as f:
line = f.readline()
open as function::
f = gzip_pickle_open('filename')
line = f.readline()
f.close()
See Also
--------
:func:`io.open`
:func:`gzip.open`
:func:`MDAnalysis.lib.util.anyopen`
:func:`MDAnalysis.lib.picklable_file_io.pickle_open`
:func:`MDAnalysis.lib.picklable_file_io.bz2_pickle_open`
.. versionadded:: 2.0.0
"""
if mode not in {'r', 'rt', 'rb'}:
raise ValueError("Only read mode ('r', 'rt', 'rb') "
"files can be pickled.")
gz_mode = mode.replace("t", "")
binary_file = GzipPicklable(name, gz_mode)
if "t" in mode:
return TextIOPicklable(binary_file)
else:
return binary_file
|
MDAnalysis/mdanalysis
|
package/MDAnalysis/lib/picklable_file_io.py
|
Python
|
gpl-2.0
| 15,899
|
[
"MDAnalysis"
] |
6cb99208694e02848698c9bae59cd8f23fba4d274522834d0895b1725e89947b
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import argparse
import logging
import math
import os
import time
from gluoncv.model_zoo import get_model
import horovod.mxnet as hvd
import mxnet as mx
import numpy as np
from mxnet import autograd, gluon, lr_scheduler
from mxnet.io import DataBatch, DataIter
# Training settings
parser = argparse.ArgumentParser(description='MXNet ImageNet Example',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--use-rec', action='store_true', default=False,
help='use image record iter for data input (default: False)')
parser.add_argument('--data-nthreads', type=int, default=2,
help='number of threads for data decoding (default: 2)')
parser.add_argument('--rec-train', type=str, default='',
help='the training data')
parser.add_argument('--rec-train-idx', type=str, default='',
help='the index of training data')
parser.add_argument('--rec-val', type=str, default='',
help='the validation data')
parser.add_argument('--rec-val-idx', type=str, default='',
help='the index of validation data')
parser.add_argument('--batch-size', type=int, default=128,
help='training batch size per device (default: 128)')
parser.add_argument('--dtype', type=str, default='float32',
help='data type for training (default: float32)')
parser.add_argument('--num-epochs', type=int, default=90,
help='number of training epochs (default: 90)')
parser.add_argument('--lr', type=float, default=0.05,
help='learning rate for a single GPU (default: 0.05)')
parser.add_argument('--momentum', type=float, default=0.9,
help='momentum value for optimizer (default: 0.9)')
parser.add_argument('--wd', type=float, default=0.0001,
help='weight decay rate (default: 0.0001)')
parser.add_argument('--lr-mode', type=str, default='poly',
help='learning rate scheduler mode. Options are step, \
poly and cosine (default: poly)')
parser.add_argument('--lr-decay', type=float, default=0.1,
help='decay rate of learning rate (default: 0.1)')
parser.add_argument('--lr-decay-epoch', type=str, default='40,60',
help='epoches at which learning rate decays (default: 40,60)')
parser.add_argument('--warmup-lr', type=float, default=0.0,
help='starting warmup learning rate (default: 0.0)')
parser.add_argument('--warmup-epochs', type=int, default=10,
help='number of warmup epochs (default: 10)')
parser.add_argument('--last-gamma', action='store_true', default=False,
help='whether to init gamma of the last BN layer in \
each bottleneck to 0 (default: False)')
parser.add_argument('--model', type=str, default='resnet50_v1',
help='type of model to use. see vision_model for options.')
parser.add_argument('--mode', type=str, default='module',
help='mode in which to train the model. options are \
module, gluon (default: module)')
parser.add_argument('--use-pretrained', action='store_true', default=False,
help='load pretrained model weights (default: False)')
parser.add_argument('--no-cuda', action='store_true', default=False,
help='disables CUDA training (default: False)')
parser.add_argument('--eval-epoch', action='store_true', default=False,
help='evaluate validation accuracy after each epoch \
when training in module mode (default: False)')
parser.add_argument('--eval-frequency', type=int, default=0,
help='frequency of evaluating validation accuracy \
when training with gluon mode (default: 0)')
parser.add_argument('--log-interval', type=int, default=0,
help='number of batches to wait before logging (default: 0)')
parser.add_argument('--save-frequency', type=int, default=0,
help='frequency of model saving (default: 0)')
args = parser.parse_args()
logging.basicConfig(level=logging.INFO)
logging.info(args)
# Horovod: initialize Horovod
hvd.init()
num_workers = hvd.size()
rank = hvd.rank()
local_rank = hvd.local_rank()
num_classes = 1000
num_training_samples = 1281167
batch_size = args.batch_size
epoch_size = \
int(math.ceil(int(num_training_samples // num_workers) / batch_size))
if args.lr_mode == 'step':
lr_decay_epoch = [int(i) for i in args.lr_decay_epoch.split(',')]
steps = [epoch_size * x for x in lr_decay_epoch]
lr_sched = lr_scheduler.MultiFactorScheduler(
step=steps,
factor=args.lr_decay,
base_lr=(args.lr * num_workers),
warmup_steps=(args.warmup_epochs * epoch_size),
warmup_begin_lr=args.warmup_lr
)
elif args.lr_mode == 'poly':
lr_sched = lr_scheduler.PolyScheduler(
args.num_epochs * epoch_size,
base_lr=(args.lr * num_workers),
pwr=2,
warmup_steps=(args.warmup_epochs * epoch_size),
warmup_begin_lr=args.warmup_lr
)
elif args.lr_mode == 'cosine':
lr_sched = lr_scheduler.CosineScheduler(
args.num_epochs * epoch_size,
base_lr=(args.lr * num_workers),
warmup_steps=(args.warmup_epochs * epoch_size),
warmup_begin_lr=args.warmup_lr
)
else:
raise ValueError('Invalid lr mode')
# Function for reading data from record file
# For more details about data loading in MXNet, please refer to
# https://mxnet.incubator.apache.org/tutorials/basic/data.html?highlight=imagerecorditer
def get_data_rec(rec_train, rec_train_idx, rec_val, rec_val_idx, batch_size,
data_nthreads):
rec_train = os.path.expanduser(rec_train)
rec_train_idx = os.path.expanduser(rec_train_idx)
rec_val = os.path.expanduser(rec_val)
rec_val_idx = os.path.expanduser(rec_val_idx)
jitter_param = 0.4
lighting_param = 0.1
mean_rgb = [123.68, 116.779, 103.939]
def batch_fn(batch, ctx):
data = batch.data[0].as_in_context(ctx)
label = batch.label[0].as_in_context(ctx)
return data, label
train_data = mx.io.ImageRecordIter(
path_imgrec=rec_train,
path_imgidx=rec_train_idx,
preprocess_threads=data_nthreads,
shuffle=True,
batch_size=batch_size,
label_width=1,
data_shape=(3, 224, 224),
mean_r=mean_rgb[0],
mean_g=mean_rgb[1],
mean_b=mean_rgb[2],
rand_mirror=True,
rand_crop=False,
random_resized_crop=True,
max_aspect_ratio=4. / 3.,
min_aspect_ratio=3. / 4.,
max_random_area=1,
min_random_area=0.08,
verbose=False,
brightness=jitter_param,
saturation=jitter_param,
contrast=jitter_param,
pca_noise=lighting_param,
num_parts=num_workers,
part_index=rank,
device_id=local_rank
)
# Kept each node to use full val data to make it easy to monitor results
val_data = mx.io.ImageRecordIter(
path_imgrec=rec_val,
path_imgidx=rec_val_idx,
preprocess_threads=data_nthreads,
shuffle=False,
batch_size=batch_size,
resize=256,
label_width=1,
rand_crop=False,
rand_mirror=False,
data_shape=(3, 224, 224),
mean_r=mean_rgb[0],
mean_g=mean_rgb[1],
mean_b=mean_rgb[2],
device_id=local_rank
)
return train_data, val_data, batch_fn
# Create data iterator for synthetic data
class SyntheticDataIter(DataIter):
def __init__(self, num_classes, data_shape, max_iter, dtype, ctx):
self.batch_size = data_shape[0]
self.cur_iter = 0
self.max_iter = max_iter
self.dtype = dtype
label = np.random.randint(0, num_classes, [self.batch_size, ])
data = np.random.uniform(-1, 1, data_shape)
self.data = mx.nd.array(data, dtype=self.dtype,
ctx=ctx)
self.label = mx.nd.array(label, dtype=self.dtype,
ctx=ctx)
def __iter__(self):
return self
@property
def provide_data(self):
return [mx.io.DataDesc('data', self.data.shape, self.dtype)]
@property
def provide_label(self):
return [mx.io.DataDesc('softmax_label',
(self.batch_size,), self.dtype)]
def next(self):
self.cur_iter += 1
if self.cur_iter <= self.max_iter:
return DataBatch(data=(self.data,),
label=(self.label,),
pad=0,
index=None,
provide_data=self.provide_data,
provide_label=self.provide_label)
else:
raise StopIteration
def __next__(self):
return self.next()
def reset(self):
self.cur_iter = 0
# Horovod: pin GPU to local rank
context = mx.cpu(local_rank) if args.no_cuda else mx.gpu(local_rank)
if args.use_rec:
# Fetch training and validation data if present
train_data, val_data, batch_fn = get_data_rec(args.rec_train,
args.rec_train_idx,
args.rec_val,
args.rec_val_idx,
batch_size,
args.data_nthreads)
else:
# Otherwise use synthetic data
image_shape = (3, 224, 224)
data_shape = (batch_size,) + image_shape
train_data = SyntheticDataIter(num_classes, data_shape, epoch_size,
np.float32, context)
val_data = None
# Get model from GluonCV model zoo
# https://gluon-cv.mxnet.io/model_zoo/index.html
kwargs = {'ctx': context,
'pretrained': args.use_pretrained,
'classes': num_classes}
if args.last_gamma:
kwargs['last_gamma'] = True
net = get_model(args.model, **kwargs)
net.cast(args.dtype)
# Create initializer
initializer = mx.init.Xavier(rnd_type='gaussian', factor_type="in",
magnitude=2)
def train_gluon():
def evaluate(epoch):
if not args.use_rec:
return
val_data.reset()
acc_top1 = mx.gluon.metric.Accuracy()
acc_top5 = mx.gluon.metric.TopKAccuracy(5)
for _, batch in enumerate(val_data):
data, label = batch_fn(batch, context)
output = net(data.astype(args.dtype, copy=False))
acc_top1.update([label], [output])
acc_top5.update([label], [output])
top1_name, top1_acc = acc_top1.get()
top5_name, top5_acc = acc_top5.get()
logging.info('Epoch[%d] Rank[%d]\tValidation-%s=%f\tValidation-%s=%f',
epoch, rank, top1_name, top1_acc, top5_name, top5_acc)
# Hybridize and initialize model
net.hybridize()
net.initialize(initializer, ctx=context)
# Horovod: fetch and broadcast parameters
params = net.collect_params()
if params is not None:
hvd.broadcast_parameters(params, root_rank=0)
# Create optimizer
optimizer_params = {'wd': args.wd,
'momentum': args.momentum,
'lr_scheduler': lr_sched}
if args.dtype == 'float16':
optimizer_params['multi_precision'] = True
opt = mx.optimizer.create('sgd', **optimizer_params)
# Horovod: create DistributedTrainer, a subclass of gluon.Trainer
trainer = hvd.DistributedTrainer(params, opt)
# Create loss function and train metric
loss_fn = gluon.loss.SoftmaxCrossEntropyLoss()
metric = mx.gluon.metric.Accuracy()
# Train model
for epoch in range(args.num_epochs):
tic = time.time()
if args.use_rec:
train_data.reset()
metric.reset()
btic = time.time()
for nbatch, batch in enumerate(train_data, start=1):
data, label = batch_fn(batch, context)
with autograd.record():
output = net(data.astype(args.dtype, copy=False))
loss = loss_fn(output, label)
loss.backward()
trainer.step(batch_size)
metric.update([label], [output])
if args.log_interval and nbatch % args.log_interval == 0:
name, acc = metric.get()
logging.info('Epoch[%d] Rank[%d] Batch[%d]\t%s=%f\tlr=%f',
epoch, rank, nbatch, name, acc, trainer.learning_rate)
if rank == 0:
batch_speed = num_workers * batch_size * args.log_interval / (time.time() - btic)
logging.info('Epoch[%d] Batch[%d]\tSpeed: %.2f samples/sec',
epoch, nbatch, batch_speed)
btic = time.time()
# Report metrics
elapsed = time.time() - tic
_, acc = metric.get()
logging.info('Epoch[%d] Rank[%d] Batch[%d]\tTime cost=%.2f\tTrain-accuracy=%f',
epoch, rank, nbatch, elapsed, acc)
if rank == 0:
epoch_speed = num_workers * batch_size * nbatch / elapsed
logging.info('Epoch[%d]\tSpeed: %.2f samples/sec', epoch, epoch_speed)
# Evaluate performance
if args.eval_frequency and (epoch + 1) % args.eval_frequency == 0:
evaluate(epoch)
# Save model
if args.save_frequency and (epoch + 1) % args.save_frequency == 0:
net.export('%s-%d' % (args.model, rank), epoch=epoch)
# Evaluate performance at the end of training
evaluate(epoch)
def train_module():
# Create input symbol
data = mx.sym.var('data')
if args.dtype == 'float16':
data = mx.sym.Cast(data=data, dtype=np.float16)
net.cast(np.float16)
# Create output symbol
out = net(data)
if args.dtype == 'float16':
out = mx.sym.Cast(data=out, dtype=np.float32)
softmax = mx.sym.SoftmaxOutput(out, name='softmax')
# Create model
mod = mx.mod.Module(softmax, context=context)
# Initialize parameters
if args.use_pretrained:
arg_params = {}
for x in net.collect_params().values():
x.reset_ctx(mx.cpu())
arg_params[x.name] = x.data()
else:
arg_params = None
aux_params = None
mod.bind(data_shapes=train_data.provide_data,
label_shapes=train_data.provide_label)
mod.init_params(initializer, arg_params=arg_params, aux_params=aux_params)
# Horovod: fetch and broadcast parameters
(arg_params, aux_params) = mod.get_params()
if arg_params is not None:
hvd.broadcast_parameters(arg_params, root_rank=0)
if aux_params is not None:
hvd.broadcast_parameters(aux_params, root_rank=0)
mod.set_params(arg_params=arg_params, aux_params=aux_params)
# Create optimizer
# Note that when using Module API, we need to specify rescale_grad since
# we create optimizer first and wrap it with DistributedOptimizer. For
# Gluon API, it is handled in Trainer.step() function so there is no need
# to specify rescale_grad (see above train_gluon() function).
optimizer_params = {'wd': args.wd,
'momentum': args.momentum,
'rescale_grad': 1.0 / batch_size,
'lr_scheduler': lr_sched}
if args.dtype == 'float16':
optimizer_params['multi_precision'] = True
opt = mx.optimizer.create('sgd', **optimizer_params)
# Horovod: wrap optimizer with DistributedOptimizer
opt = hvd.DistributedOptimizer(opt)
# Setup validation data and callback during training
eval_data = None
if args.eval_epoch:
eval_data = val_data
batch_callback = None
if args.log_interval > 0 and rank == 0:
batch_callback = mx.callback.Speedometer(batch_size * num_workers,
args.log_interval)
epoch_callback = None
if args.save_frequency > 0:
epoch_callback = mx.callback.do_checkpoint(
'%s-%d' % (args.model, rank),
period=args.save_frequency)
# Train model
mod.fit(train_data,
eval_data=eval_data,
num_epoch=args.num_epochs,
kvstore=None,
batch_end_callback=batch_callback,
epoch_end_callback=epoch_callback,
optimizer=opt)
# Evaluate performance if not using synthetic data
if args.use_rec:
acc_top1 = mx.gluon.metric.Accuracy()
acc_top5 = mx.gluon.metric.TopKAccuracy(5)
res = mod.score(val_data, [acc_top1, acc_top5])
for name, val in res:
logging.info('Epoch[%d] Rank[%d] Validation-%s=%f',
args.num_epochs - 1, rank, name, val)
if __name__ == '__main__':
if args.mode == 'module':
train_module()
elif args.mode == 'gluon':
train_gluon()
else:
raise ValueError('Invalid training mode.')
|
zhreshold/mxnet
|
example/distributed_training-horovod/resnet50_imagenet.py
|
Python
|
apache-2.0
| 17,959
|
[
"Gaussian"
] |
a7f2e22497e1aa4576ff039449de51ca9f53be046f3d68d1c872c72efa7b4ffd
|
#===============================================================================
# LICENSE XOT-Framework - CC BY-NC-ND
#===============================================================================
# This work is licenced under the Creative Commons
# Attribution-Non-Commercial-No Derivative Works 3.0 Unported License. To view a
# copy of this licence, visit http://creativecommons.org/licenses/by-nc-nd/3.0/
# or send a letter to Creative Commons, 171 Second Street, Suite 300,
# San Francisco, California 94105, USA.
#===============================================================================
import StringIO
import httplib
class CachedHttpResponse(StringIO.StringIO):
""" An class similar to an urllib2.Response object for cached responses."""
def __init__(self, url, headerValue, bodyValue, code = 200, doProcessing=True):
""" initialises a new CachedHttpResponse instance
Arguments:
url : String - The URL from which the data comes
headerValue : String - The header data that should be cached
bodyValue : String - The body value that should be cached
Keyword Arguments:
code : Integer - The HTTP return code
doProcessing : Boolean - [optional] If set to True, cache values are extracted.
Defaults to True. Use for creating a simple httpresponse
in case a complex one failed.
"""
StringIO.StringIO.__init__(self, bodyValue)
self.url = url
self.headerValue = headerValue
self.bodyValue = bodyValue
# cached responses are always OK
self.code = code
self.msg = "OK"
# now we set the header value as StringIO
self.headers = httplib.HTTPMessage(StringIO.StringIO(headerValue))
if doProcessing:
self.cacheParameters = self.__ExtractCachHeader(self.headers)
def info(self):
""" Returns headers """
return self.headers
def geturl(self):
""" Returns original URL """
return self.url
def SetCachFlag(self, flag, value = True):
""" Sets additional flags to the Headers
Arguments:
flag : String - Name of the header attribute
Keyword Arguments:
value : Object - The value to store. Eventually it will be stored as
an String.
"""
#headerBuffer = "%s%s: True\r\n" % (self.headerValue, flag)
#print headerBuffer
self.headers[flag] = str(value)
self.headers = httplib.HTTPMessage(StringIO.StringIO(str(self.headers)))
return
def __str__(self):
""" Returns a text representation of the response """
return "CachedHttpResponse with status %s (%s) for %s\nCache-Parameters: %s" % (self.code, self.msg, self.url, self.cacheParameters)
def __ExtractCachHeader(self, headers):
""" Extracts the "Cache-Control" header field and returns it's values
as a dictionary.
Arguments
headers : HTTPHeaders - The headers of a HTTP request/response
Returns a dictionary with the Cache-Control parameters. If a parameter
does not have a value, the value True is used as in general the
availability of a parameter means it is valid.
"""
cacheParams = dict()
if headers.has_key("cache-control"):
headerLine = headers['cache-control']
for entry in headerLine.strip().split(","):
#self.__Log("Found Cache Key: '%s'", entry.strip())
if entry.find("=") > 0:
(key, value) = entry.split("=")
try:
cacheParams[key.strip().lower()] = int(value.strip())
except ValueError:
cacheParams[key.strip().lower()] = True
else:
cacheParams[entry.strip().lower()] = True
if headers.has_key("etag"):
#self.__Log("Found Cache Key: '%s'", entry.strip())
cacheParams['etag'] = headers['etag']
return cacheParams
|
SMALLplayer/smallplayer-image-creator
|
storage/.xbmc/addons/net.rieter.xot.smallplayer/resources/libs/cache/cachedhttpresponse.py
|
Python
|
gpl-2.0
| 4,532
|
[
"VisIt"
] |
5813f3cae9ab82405d6d04acb94819a3d5f80a7fe7db7f8cfae5e9e94a085193
|
import numpy as np
from .shared import StaticContainerStore, StaticContainer, unmask_quantity
from openpathsampling.netcdfplus import WeakLRUCache
import openpathsampling as paths
variables = ['statics']
lazy = ['statics']
storables = ['statics']
dimensions = ['n_atoms', 'n_spatial']
_length_unit = "simtk(unit.nanometer)"
_array32 = "ndarray.float32"
schema_entries = [
('statics', [
('coordinates',
'{length_unit}*{array32}({{n_atoms}},{{n_spatial}})'.format(
length_unit=_length_unit, array32=_array32
)),
('box_vectors',
'{length_unit}*{array32}({{n_spatial}},{{n_spatial}})'.format(
length_unit=_length_unit, array32=_array32
)),
('engine', 'uuid'),
]),
]
def netcdfplus_init(store):
static_store = StaticContainerStore()
static_store.set_caching(WeakLRUCache(10000))
name = store.prefix + 'statics'
static_store.set_dimension_prefix_store(store)
store.storage.create_store(name, static_store, False)
store.create_variable(
'statics',
'lazyobj.' + name,
description="the snapshot index (0..n_configuration-1) of "
"snapshot '{idx}'.")
@property
def coordinates(snapshot):
"""
Returns
-------
coordinates: numpy.ndarray, shape=(atoms, 3), dtype=numpy.float32
the atomic coordinates of the configuration. The coordinates are
wrapped in a :class:`openmm.unit.Unit`.
"""
if snapshot.statics is not None:
return unmask_quantity(snapshot.statics.coordinates)
return None
@coordinates.setter
def coordinates(self, value):
if value is not None:
sc = StaticContainer(coordinates=value,
box_vectors=self.box_vectors,
engine=self.engine)
else:
sc = None
self.statics = sc
@property
def box_vectors(snapshot):
"""
Returns
-------
box_vectors: numpy.ndarray, shape=(3, 3), dtype=numpy.float32
the box_vectors of the configuration. The coordinates are wrapped in a
openmm.unit.Unit.
"""
if snapshot.statics is not None:
return unmask_quantity(snapshot.statics.box_vectors)
return None
@box_vectors.setter
def box_vectors(self, value):
if value is not None:
sc = StaticContainer(box_vectors=value,
coordinates=self.coordinates,
engine=self.engine)
else:
sc = None
self.statics = sc
@property
def md(snapshot):
"""
Returns
-------
md : mdtraj.Trajectory
the actual trajectory object. Can be used with all functions from mdtraj
Notes
-----
Rather slow since the topology has to be made each time. Try to avoid
it. This will only work if the engine has an mdtraj_topology property.
"""
if snapshot.statics is not None:
return paths.Trajectory([snapshot]).to_mdtraj()
@property
def xyz(snapshot):
"""
Returns
-------
xyz : numpy.ndarray, shape=(atoms, 3), dtype=numpy.float32
atomic coordinates without dimensions. Be careful.
"""
from openpathsampling.integration_tools import unit as u
coord = snapshot.coordinates
if type(coord) is u.Quantity:
return coord._value
else:
return coord
|
choderalab/openpathsampling
|
openpathsampling/engines/features/statics.py
|
Python
|
lgpl-2.1
| 3,355
|
[
"MDTraj",
"OpenMM"
] |
8c504bf228be85ac5d78d3dd94ea5e2b2e50e1ac1483f8b3f1c3b62e813dc9f1
|
# -*- coding: utf-8 -*-
"""
trueskill
~~~~~~~~~
The video game rating system.
:copyright: (c) 2012-2015 by Heungsub Lee
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import
from itertools import chain
import math
from six import iteritems
from six.moves import map, range, zip
from .__about__ import __version__ # noqa
from .backends import choose_backend
from .factorgraph import (LikelihoodFactor, PriorFactor, SumFactor,
TruncateFactor, Variable)
from .mathematics import Gaussian, Matrix
__all__ = [
# TrueSkill objects
'TrueSkill', 'Rating',
# functions for the global environment
'rate', 'quality', 'rate_1vs1', 'quality_1vs1', 'expose', 'setup',
'global_env',
# default values
'MU', 'SIGMA', 'BETA', 'TAU', 'DRAW_PROBABILITY',
# draw probability helpers
'calc_draw_probability', 'calc_draw_margin',
# deprecated features
'transform_ratings', 'match_quality', 'dynamic_draw_probability',
]
#: Default initial mean of ratings.
MU = 25.
#: Default initial standard deviation of ratings.
SIGMA = MU / 3
#: Default distance that guarantees about 76% chance of winning.
BETA = SIGMA / 2
#: Default dynamic factor.
TAU = SIGMA / 100
#: Default draw probability of the game.
DRAW_PROBABILITY = .10
#: A basis to check reliability of the result.
DELTA = 0.0001
def calc_draw_probability(draw_margin, size, env=None):
"""Calculates a draw-probability from the given ``draw_margin``.
:param draw_margin: the draw-margin.
:param size: the number of players in two comparing teams.
:param env: the :class:`TrueSkill` object. Defaults to the global
environment.
"""
if env is None:
env = global_env()
return 2 * env.cdf(draw_margin / (math.sqrt(size) * env.beta)) - 1
def calc_draw_margin(draw_probability, size, env=None):
"""Calculates a draw-margin from the given ``draw_probability``.
:param draw_probability: the draw-probability.
:param size: the number of players in two comparing teams.
:param env: the :class:`TrueSkill` object. Defaults to the global
environment.
"""
if env is None:
env = global_env()
return env.ppf((draw_probability + 1) / 2.) * math.sqrt(size) * env.beta
def _team_sizes(rating_groups):
"""Makes a size map of each teams."""
team_sizes = [0]
for group in rating_groups:
team_sizes.append(len(group) + team_sizes[-1])
del team_sizes[0]
return team_sizes
def _floating_point_error(env):
if env.backend == 'mpmath':
msg = 'Set "mpmath.mp.dps" to higher'
else:
msg = 'Cannot calculate correctly, set backend to "mpmath"'
return FloatingPointError(msg)
class Rating(Gaussian):
"""Represents a player's skill as Gaussian distrubution.
The default mu and sigma value follows the global environment's settings.
If you don't want to use the global, use :meth:`TrueSkill.create_rating` to
create the rating object.
:param mu: the mean.
:param sigma: the standard deviation.
"""
def __init__(self, mu=None, sigma=None):
if isinstance(mu, tuple):
mu, sigma = mu
elif isinstance(mu, Gaussian):
mu, sigma = mu.mu, mu.sigma
if mu is None:
mu = global_env().mu
if sigma is None:
sigma = global_env().sigma
super(Rating, self).__init__(mu, sigma)
def __int__(self):
return int(self.mu)
def __long__(self):
return long(self.mu)
def __float__(self):
return float(self.mu)
def __iter__(self):
return iter((self.mu, self.sigma))
def __repr__(self):
c = type(self)
args = ('.'.join([c.__module__, c.__name__]), self.mu, self.sigma)
return '%s(mu=%.3f, sigma=%.3f)' % args
class TrueSkill(object):
"""Implements a TrueSkill environment. An environment could have
customized constants. Every games have not same design and may need to
customize TrueSkill constants.
For example, 60% of matches in your game have finished as draw then you
should set ``draw_probability`` to 0.60::
env = TrueSkill(draw_probability=0.60)
For more details of the constants, see `The Math Behind TrueSkill`_ by
Jeff Moser.
.. _The Math Behind TrueSkill:: http://bit.ly/trueskill-math
:param mu: the initial mean of ratings.
:param sigma: the initial standard deviation of ratings. The recommended
value is a third of ``mu``.
:param beta: the distance which guarantees about 76% chance of winning.
The recommended value is a half of ``sigma``.
:param tau: the dynamic factor which restrains a fixation of rating. The
recommended value is ``sigma`` per cent.
:param draw_probability: the draw probability between two teams. It can be
a ``float`` or function which returns a ``float``
by the given two rating (team performance)
arguments and the beta value. If it is a
``float``, the game has fixed draw probability.
Otherwise, the draw probability will be decided
dynamically per each match.
:param backend: the name of a backend which implements cdf, pdf, ppf. See
:mod:`trueskill.backends` for more details. Defaults to
``None``.
"""
def __init__(self, mu=MU, sigma=SIGMA, beta=BETA, tau=TAU,
draw_probability=DRAW_PROBABILITY, backend=None):
self.mu = mu
self.sigma = sigma
self.beta = beta
self.tau = tau
self.draw_probability = draw_probability
self.backend = backend
if isinstance(backend, tuple):
self.cdf, self.pdf, self.ppf = backend
else:
self.cdf, self.pdf, self.ppf = choose_backend(backend)
def create_rating(self, mu=None, sigma=None):
"""Initializes new :class:`Rating` object, but it fixes default mu and
sigma to the environment's.
>>> env = TrueSkill(mu=0, sigma=1)
>>> env.create_rating()
trueskill.Rating(mu=0.000, sigma=1.000)
"""
if mu is None:
mu = self.mu
if sigma is None:
sigma = self.sigma
return Rating(mu, sigma)
def v_win(self, diff, draw_margin):
"""The non-draw version of "V" function. "V" calculates a variation of
a mean.
"""
x = diff - draw_margin
denom = self.cdf(x)
return (self.pdf(x) / denom) if denom else -x
def v_draw(self, diff, draw_margin):
"""The draw version of "V" function."""
abs_diff = abs(diff)
a, b = draw_margin - abs_diff, -draw_margin - abs_diff
denom = self.cdf(a) - self.cdf(b)
numer = self.pdf(b) - self.pdf(a)
return ((numer / denom) if denom else a) * (-1 if diff < 0 else +1)
def w_win(self, diff, draw_margin):
"""The non-draw version of "W" function. "W" calculates a variation of
a standard deviation.
"""
x = diff - draw_margin
v = self.v_win(diff, draw_margin)
w = v * (v + x)
if 0 < w < 1:
return w
raise _floating_point_error(self)
def w_draw(self, diff, draw_margin):
"""The draw version of "W" function."""
abs_diff = abs(diff)
a, b = draw_margin - abs_diff, -draw_margin - abs_diff
denom = self.cdf(a) - self.cdf(b)
if not denom:
raise _floating_point_error(self)
v = self.v_draw(abs_diff, draw_margin)
return (v ** 2) + (a * self.pdf(a) - b * self.pdf(b)) / denom
def validate_rating_groups(self, rating_groups):
"""Validates a ``rating_groups`` argument. It should contain more than
2 groups and all groups must not be empty.
>>> env = TrueSkill()
>>> env.validate_rating_groups([])
Traceback (most recent call last):
...
ValueError: need multiple rating groups
>>> env.validate_rating_groups([(Rating(),)])
Traceback (most recent call last):
...
ValueError: need multiple rating groups
>>> env.validate_rating_groups([(Rating(),), ()])
Traceback (most recent call last):
...
ValueError: each group must contain multiple ratings
>>> env.validate_rating_groups([(Rating(),), (Rating(),)])
... #doctest: +ELLIPSIS
[(truekill.Rating(...),), (trueskill.Rating(...),)]
"""
# check group sizes
if len(rating_groups) < 2:
raise ValueError('Need multiple rating groups')
elif not all(rating_groups):
raise ValueError('Each group must contain multiple ratings')
# check group types
group_types = set(map(type, rating_groups))
if len(group_types) != 1:
raise TypeError('All groups should be same type')
elif group_types.pop() is Rating:
raise TypeError('Rating cannot be a rating group')
# normalize rating_groups
if isinstance(rating_groups[0], dict):
dict_rating_groups = rating_groups
rating_groups = []
keys = []
for dict_rating_group in dict_rating_groups:
rating_group, key_group = [], []
for key, rating in iteritems(dict_rating_group):
rating_group.append(rating)
key_group.append(key)
rating_groups.append(tuple(rating_group))
keys.append(tuple(key_group))
else:
rating_groups = list(rating_groups)
keys = None
return rating_groups, keys
def validate_weights(self, weights, rating_groups, keys=None):
if weights is None:
weights = [(1,) * len(g) for g in rating_groups]
elif isinstance(weights, dict):
weights_dict, weights = weights, []
for x, group in enumerate(rating_groups):
w = []
weights.append(w)
for y, rating in enumerate(group):
if keys is not None:
y = keys[x][y]
w.append(weights_dict.get((x, y), 1))
return weights
def factor_graph_builders(self, rating_groups, ranks, weights):
"""Makes nodes for the TrueSkill factor graph.
Here's an example of a TrueSkill factor graph when 1 vs 2 vs 1 match::
rating_layer: O O O O (PriorFactor)
| | | |
| | | |
perf_layer: O O O O (LikelihoodFactor)
| \ / |
| | |
team_perf_layer: O O O (SumFactor)
\ / \ /
| |
team_diff_layer: O O (SumFactor)
| |
| |
trunc_layer: O O (TruncateFactor)
"""
flatten_ratings = sum(map(tuple, rating_groups), ())
flatten_weights = sum(map(tuple, weights), ())
size = len(flatten_ratings)
group_size = len(rating_groups)
# create variables
rating_vars = [Variable() for x in range(size)]
perf_vars = [Variable() for x in range(size)]
team_perf_vars = [Variable() for x in range(group_size)]
team_diff_vars = [Variable() for x in range(group_size - 1)]
team_sizes = _team_sizes(rating_groups)
# layer builders
def build_rating_layer():
for rating_var, rating in zip(rating_vars, flatten_ratings):
yield PriorFactor(rating_var, rating, self.tau)
def build_perf_layer():
for rating_var, perf_var in zip(rating_vars, perf_vars):
yield LikelihoodFactor(rating_var, perf_var, self.beta ** 2)
def build_team_perf_layer():
for team, team_perf_var in enumerate(team_perf_vars):
if team > 0:
start = team_sizes[team - 1]
else:
start = 0
end = team_sizes[team]
child_perf_vars = perf_vars[start:end]
coeffs = flatten_weights[start:end]
yield SumFactor(team_perf_var, child_perf_vars, coeffs)
def build_team_diff_layer():
for team, team_diff_var in enumerate(team_diff_vars):
yield SumFactor(team_diff_var,
team_perf_vars[team:team + 2], [+1, -1])
def build_trunc_layer():
for x, team_diff_var in enumerate(team_diff_vars):
if callable(self.draw_probability):
# dynamic draw probability
team_perf1, team_perf2 = team_perf_vars[x:x + 2]
args = (Rating(team_perf1), Rating(team_perf2), self)
draw_probability = self.draw_probability(*args)
else:
# static draw probability
draw_probability = self.draw_probability
size = sum(map(len, rating_groups[x:x + 2]))
draw_margin = calc_draw_margin(draw_probability, size, self)
if ranks[x] == ranks[x + 1]: # is a tie?
v_func, w_func = self.v_draw, self.w_draw
else:
v_func, w_func = self.v_win, self.w_win
yield TruncateFactor(team_diff_var,
v_func, w_func, draw_margin)
# build layers
return (build_rating_layer, build_perf_layer, build_team_perf_layer,
build_team_diff_layer, build_trunc_layer)
def run_schedule(self, build_rating_layer, build_perf_layer,
build_team_perf_layer, build_team_diff_layer,
build_trunc_layer, min_delta=DELTA):
"""Sends messages within every nodes of the factor graph until the
result is reliable.
"""
if min_delta <= 0:
raise ValueError('min_delta must be greater than 0')
layers = []
def build(builders):
layers_built = [list(build()) for build in builders]
layers.extend(layers_built)
return layers_built
# gray arrows
layers_built = build([build_rating_layer,
build_perf_layer,
build_team_perf_layer])
rating_layer, perf_layer, team_perf_layer = layers_built
for f in chain(*layers_built):
f.down()
# arrow #1, #2, #3
team_diff_layer, trunc_layer = build([build_team_diff_layer,
build_trunc_layer])
team_diff_len = len(team_diff_layer)
for x in range(10):
if team_diff_len == 1:
# only two teams
team_diff_layer[0].down()
delta = trunc_layer[0].up()
else:
# multiple teams
delta = 0
for x in range(team_diff_len - 1):
team_diff_layer[x].down()
delta = max(delta, trunc_layer[x].up())
team_diff_layer[x].up(1) # up to right variable
for x in range(team_diff_len - 1, 0, -1):
team_diff_layer[x].down()
delta = max(delta, trunc_layer[x].up())
team_diff_layer[x].up(0) # up to left variable
# repeat until to small update
if delta <= min_delta:
break
# up both ends
team_diff_layer[0].up(0)
team_diff_layer[team_diff_len - 1].up(1)
# up the remainder of the black arrows
for f in team_perf_layer:
for x in range(len(f.vars) - 1):
f.up(x)
for f in perf_layer:
f.up()
return layers
def rate(self, rating_groups, ranks=None, weights=None, min_delta=DELTA):
"""Recalculates ratings by the ranking table::
env = TrueSkill() # uses default settings
# create ratings
r1 = env.create_rating(42.222)
r2 = env.create_rating(89.999)
# calculate new ratings
rating_groups = [(r1,), (r2,)]
rated_rating_groups = env.rate(rating_groups, ranks=[0, 1])
# save new ratings
(r1,), (r2,) = rated_rating_groups
``rating_groups`` is a list of rating tuples or dictionaries that
represents each team of the match. You will get a result as same
structure as this argument. Rating dictionaries for this may be useful
to choose specific player's new rating::
# load players from the database
p1 = load_player_from_database('Arpad Emrick Elo')
p2 = load_player_from_database('Mark Glickman')
p3 = load_player_from_database('Heungsub Lee')
# calculate new ratings
rating_groups = [{p1: p1.rating, p2: p2.rating}, {p3: p3.rating}]
rated_rating_groups = env.rate(rating_groups, ranks=[0, 1])
# save new ratings
for player in [p1, p2, p3]:
player.rating = rated_rating_groups[player.team][player]
:param rating_groups: a list of tuples or dictionaries containing
:class:`Rating` objects.
:param ranks: a ranking table. By default, it is same as the order of
the ``rating_groups``.
:param weights: weights of each players for "partial play".
:param min_delta: each loop checks a delta of changes and the loop
will stop if the delta is less then this argument.
:returns: recalculated ratings same structure as ``rating_groups``.
:raises: :exc:`FloatingPointError` occurs when winners have too lower
rating than losers. higher floating-point precision couls
solve this error. set the backend to "mpmath".
.. versionadded:: 0.2
"""
rating_groups, keys = self.validate_rating_groups(rating_groups)
weights = self.validate_weights(weights, rating_groups, keys)
group_size = len(rating_groups)
if ranks is None:
ranks = range(group_size)
elif len(ranks) != group_size:
raise ValueError('Wrong ranks')
# sort rating groups by rank
by_rank = lambda x: x[1][1]
sorting = sorted(enumerate(zip(rating_groups, ranks, weights)),
key=by_rank)
sorted_rating_groups, sorted_ranks, sorted_weights = [], [], []
for x, (g, r, w) in sorting:
sorted_rating_groups.append(g)
sorted_ranks.append(r)
# make weights to be greater than 0
sorted_weights.append(max(min_delta, w_) for w_ in w)
# build factor graph
args = (sorted_rating_groups, sorted_ranks, sorted_weights)
builders = self.factor_graph_builders(*args)
args = builders + (min_delta,)
layers = self.run_schedule(*args)
# make result
rating_layer, team_sizes = layers[0], _team_sizes(sorted_rating_groups)
transformed_groups = []
for start, end in zip([0] + team_sizes[:-1], team_sizes):
group = []
for f in rating_layer[start:end]:
group.append(Rating(float(f.var.mu), float(f.var.sigma)))
transformed_groups.append(tuple(group))
by_hint = lambda x: x[0]
unsorting = sorted(zip((x for x, __ in sorting), transformed_groups),
key=by_hint)
if keys is None:
return [g for x, g in unsorting]
# restore the structure with input dictionary keys
return [dict(zip(keys[x], g)) for x, g in unsorting]
def quality(self, rating_groups, weights=None):
"""Calculates the match quality of the given rating groups. A result
is the draw probability in the association::
env = TrueSkill()
if env.quality([team1, team2, team3]) < 0.50:
print('This match seems to be not so fair')
:param rating_groups: a list of tuples or dictionaries containing
:class:`Rating` objects.
:param weights: weights of each players for "partial play".
.. versionadded:: 0.2
"""
rating_groups, keys = self.validate_rating_groups(rating_groups)
weights = self.validate_weights(weights, rating_groups, keys)
flatten_ratings = sum(map(tuple, rating_groups), ())
flatten_weights = sum(map(tuple, weights), ())
length = len(flatten_ratings)
# a vector of all of the skill means
mean_matrix = Matrix([[r.mu] for r in flatten_ratings])
# a matrix whose diagonal values are the variances (sigma ** 2) of each
# of the players.
def variance_matrix(height, width):
variances = (r.sigma ** 2 for r in flatten_ratings)
for x, variance in enumerate(variances):
yield (x, x), variance
variance_matrix = Matrix(variance_matrix, length, length)
# the player-team assignment and comparison matrix
def rotated_a_matrix(set_height, set_width):
t = 0
for r, (cur, next) in enumerate(zip(rating_groups[:-1],
rating_groups[1:])):
for x in range(t, t + len(cur)):
yield (r, x), flatten_weights[x]
t += 1
x += 1
for x in range(x, x + len(next)):
yield (r, x), -flatten_weights[x]
set_height(r + 1)
set_width(x + 1)
rotated_a_matrix = Matrix(rotated_a_matrix)
a_matrix = rotated_a_matrix.transpose()
# match quality further derivation
_ata = (self.beta ** 2) * rotated_a_matrix * a_matrix
_atsa = rotated_a_matrix * variance_matrix * a_matrix
start = mean_matrix.transpose() * a_matrix
middle = _ata + _atsa
end = rotated_a_matrix * mean_matrix
# make result
e_arg = (-0.5 * start * middle.inverse() * end).determinant()
s_arg = _ata.determinant() / middle.determinant()
return math.exp(e_arg) * math.sqrt(s_arg)
def expose(self, rating):
"""Returns the value of the rating exposure. It starts from 0 and
converges to the mean. Use this as a sort key in a leaderboard::
leaderboard = sorted(ratings, key=env.expose, reverse=True)
.. versionadded:: 0.4
"""
k = self.mu / self.sigma
return rating.mu - k * rating.sigma
def make_as_global(self):
"""Registers the environment as the global environment.
>>> env = TrueSkill(mu=50)
>>> Rating()
trueskill.Rating(mu=25.000, sigma=8.333)
>>> env.make_as_global() #doctest: +ELLIPSIS
trueskill.TrueSkill(mu=50.000, ...)
>>> Rating()
trueskill.Rating(mu=50.000, sigma=8.333)
But if you need just one environment, :func:`setup` is better to use.
"""
return setup(env=self)
def __repr__(self):
c = type(self)
if callable(self.draw_probability):
f = self.draw_probability
draw_probability = '.'.join([f.__module__, f.__name__])
else:
draw_probability = '%.1f%%' % (self.draw_probability * 100)
if self.backend is None:
backend = ''
elif isinstance(self.backend, tuple):
backend = ', backend=...'
else:
backend = ', backend=%r' % self.backend
args = ('.'.join([c.__module__, c.__name__]), self.mu, self.sigma,
self.beta, self.tau, draw_probability, backend)
return ('%s(mu=%.3f, sigma=%.3f, beta=%.3f, tau=%.3f, '
'draw_probability=%s%s)' % args)
def rate_1vs1(rating1, rating2, drawn=False, min_delta=DELTA, env=None):
"""A shortcut to rate just 2 players in a head-to-head match::
alice, bob = Rating(25), Rating(30)
alice, bob = rate_1vs1(alice, bob)
alice, bob = rate_1vs1(alice, bob, drawn=True)
:param rating1: the winner's rating if they didn't draw.
:param rating2: the loser's rating if they didn't draw.
:param drawn: if the players drew, set this to ``True``. Defaults to
``False``.
:param min_delta: will be passed to :meth:`rate`.
:param env: the :class:`TrueSkill` object. Defaults to the global
environment.
:returns: a tuple containing recalculated 2 ratings.
.. versionadded:: 0.2
"""
if env is None:
env = global_env()
ranks = [0, 0 if drawn else 1]
teams = env.rate([(rating1,), (rating2,)], ranks, min_delta=min_delta)
return teams[0][0], teams[1][0]
def quality_1vs1(rating1, rating2, env=None):
"""A shortcut to calculate the match quality between just 2 players in
a head-to-head match::
if quality_1vs1(alice, bob) < 0.50:
print('This match seems to be not so fair')
:param rating1: the rating.
:param rating2: the another rating.
:param env: the :class:`TrueSkill` object. Defaults to the global
environment.
.. versionadded:: 0.2
"""
if env is None:
env = global_env()
return env.quality([(rating1,), (rating2,)])
def global_env():
"""Gets the :class:`TrueSkill` object which is the global environment."""
try:
global_env.__trueskill__
except AttributeError:
# setup the default environment
setup()
return global_env.__trueskill__
def setup(mu=MU, sigma=SIGMA, beta=BETA, tau=TAU,
draw_probability=DRAW_PROBABILITY, backend=None, env=None):
"""Setups the global environment.
:param env: the specific :class:`TrueSkill` object to be the global
environment. It is optional.
>>> Rating()
trueskill.Rating(mu=25.000, sigma=8.333)
>>> setup(mu=50) #doctest: +ELLIPSIS
trueskill.TrueSkill(mu=50.000, ...)
>>> Rating()
trueskill.Rating(mu=50.000, sigma=8.333)
"""
if env is None:
env = TrueSkill(mu, sigma, beta, tau, draw_probability, backend)
global_env.__trueskill__ = env
return env
def rate(rating_groups, ranks=None, weights=None, min_delta=DELTA):
"""A proxy function for :meth:`TrueSkill.rate` of the global environment.
.. versionadded:: 0.2
"""
return global_env().rate(rating_groups, ranks, weights, min_delta)
def quality(rating_groups, weights=None):
"""A proxy function for :meth:`TrueSkill.quality` of the global
environment.
.. versionadded:: 0.2
"""
return global_env().quality(rating_groups, weights)
def expose(rating):
"""A proxy function for :meth:`TrueSkill.expose` of the global environment.
.. versionadded:: 0.4
"""
return global_env().expose(rating)
# Append deprecated methods into :class:`TrueSkill` and :class:`Rating`
from . import deprecated # noqa
from .deprecated import ( # noqa
dynamic_draw_probability, match_quality, transform_ratings)
deprecated.ensure_backward_compatibility(TrueSkill, Rating)
|
Ninjakow/TrueSkill
|
lib/trueskill/__init__.py
|
Python
|
gpl-3.0
| 27,585
|
[
"Gaussian"
] |
5172cb3abb1a2de21a501dc6bb74b7f7d1b593f39f0d19bd1a6c0134f5825980
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.conf import settings
from django.conf.urls import include, url
from django.conf.urls.static import static
from django.contrib import admin
from django.views.generic import TemplateView
urlpatterns = [
url(r'^about/$', TemplateView.as_view(template_name='pages/about.html'), name="about"),
# Django Admin
url(r'^admin/', include(admin.site.urls)),
# User management
url(r'^users/', include("osmchadjango.users.urls", namespace="users")),
url(r'^accounts/', include('allauth.urls')),
# Your stuff: custom urls includes go here
url(r'^', include("osmchadjango.changeset.urls", namespace="changeset")),
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
if settings.DEBUG:
# This allows the error pages to be debugged during development, just visit
# these url in browser to see how these error pages look like.
urlpatterns += [
url(r'^400/$', 'django.views.defaults.bad_request'),
url(r'^403/$', 'django.views.defaults.permission_denied'),
url(r'^404/$', 'django.views.defaults.page_not_found'),
url(r'^500/$', 'django.views.defaults.server_error'),
]
|
JadsonReis/osmcha-django
|
config/urls.py
|
Python
|
gpl-3.0
| 1,228
|
[
"VisIt"
] |
dfc5574e676b125dfab7a098ed9c8bf63c83d174a80a8a1556df248cbf584e87
|
import os
from .stl_general_test import CStlGeneral_Test, CTRexScenario
from trex_stl_lib.api import *
import pprint
def avg (values):
return (sum(values) / float(len(values)))
# performance report object
class PerformanceReport(object):
GOLDEN_NORMAL = 1
GOLDEN_FAIL = 2
GOLDEN_BETTER = 3
def __init__ (self,
scenario,
machine_name,
core_count,
avg_cpu,
avg_gbps,
avg_mpps,
avg_gbps_per_core,
avg_mpps_per_core,
):
self.scenario = scenario
self.machine_name = machine_name
self.core_count = core_count
self.avg_cpu = avg_cpu
self.avg_gbps = avg_gbps
self.avg_mpps = avg_mpps
self.avg_gbps_per_core = avg_gbps_per_core
self.avg_mpps_per_core = avg_mpps_per_core
def show (self):
print("\n")
print("scenario: {0}".format(self.scenario))
print("machine name: {0}".format(self.machine_name))
print("DP core count: {0}".format(self.core_count))
print("average CPU: {0}".format(self.avg_cpu))
print("average Gbps: {0}".format(self.avg_gbps))
print("average Mpps: {0}".format(self.avg_mpps))
print("average pkt size (bytes): {0}".format( (self.avg_gbps * 1000 / 8) / self.avg_mpps))
print("average Gbps per core (at 100% CPU): {0}".format(self.avg_gbps_per_core))
print("average Mpps per core (at 100% CPU): {0}".format(self.avg_mpps_per_core))
def check_golden (self, golden_mpps):
if self.avg_mpps_per_core < golden_mpps['min']:
return self.GOLDEN_FAIL
if self.avg_mpps_per_core > golden_mpps['max']:
return self.GOLDEN_BETTER
return self.GOLDEN_NORMAL
def report_to_analytics(self, ga, golden_mpps):
print("\n* Reporting to GA *\n")
ga.gaAddTestQuery(TestName = self.scenario,
TRexMode = 'stl',
SetupName = self.machine_name,
TestType = 'performance',
Mppspc = self.avg_mpps_per_core,
ActionNumber = os.getenv("BUILD_NUM","n/a"),
GoldenMin = golden_mpps['min'],
GoldenMax = golden_mpps['max'])
ga.emptyAndReportQ()
def norm_senario (self):
s=self.scenario
s='+'.join(s.split(' '));
s='+'.join(s.split('-'));
s='+'.join(s.split(','));
l=s.split('+')
lr=[]
for obj in l:
if len(obj):
lr.append(obj);
s='-'.join(lr);
return(s);
def report_to_elk(self, elk,elk_obj, golden_mpps):
print("\n* Reporting to elk *\n")
elk_obj['test']={ "name" : self.norm_senario(),
"type" : "stateless",
"cores" : self.core_count,
"cpu%" : self.avg_cpu,
"mpps" : self.avg_mpps,
"streams_count" : 1,
"mpps_pc" : self.avg_mpps_per_core,
"gbps_pc" : self.avg_gbps_per_core,
"gbps" : self.avg_gbps,
"avg-pktsize" : ((1000.0*self.avg_gbps/(8.0*self.avg_mpps))),
"latecny" : { "min" : -1.0,
"max" : -1.0,
"avr" : -1.0
}
};
#pprint.pprint(elk_obj);
# push to elk
elk.perf.push_data(elk_obj)
class STLPerformance_Test(CStlGeneral_Test):
"""Tests for stateless client"""
def setUp(self):
CStlGeneral_Test.setUp(self)
self.c = CTRexScenario.stl_trex
self.c.connect()
self.c.reset()
def tearDown (self):
CStlGeneral_Test.tearDown(self)
def build_perf_profile_vm (self, pkt_size, cache_size = None):
size = pkt_size - 4; # HW will add 4 bytes ethernet FCS
src_ip = '16.0.0.1'
dst_ip = '48.0.0.1'
base_pkt = Ether()/IP(src=src_ip,dst=dst_ip)/UDP(dport=12,sport=1025)
pad = max(0, size - len(base_pkt)) * 'x'
vm = STLScVmRaw( [ STLVmFlowVar ( "ip_src", min_value="10.0.0.1", max_value="10.0.0.255", size=4, step=1,op="inc"),
STLVmWrFlowVar (fv_name="ip_src", pkt_offset= "IP.src" ),
STLVmFixIpv4(offset = "IP")
],
cache_size = cache_size
);
pkt = STLPktBuilder(pkt = base_pkt/pad, vm = vm)
return STLStream(packet = pkt, mode = STLTXCont())
def build_perf_profile_syn_attack (self, pkt_size):
size = pkt_size - 4; # HW will add 4 bytes ethernet FCS
# TCP SYN
base_pkt = Ether()/IP(dst="48.0.0.1")/TCP(dport=80,flags="S")
pad = max(0, size - len(base_pkt)) * 'x'
# vm
vm = STLScVmRaw( [ STLVmFlowVar(name="ip_src",
min_value="16.0.0.0",
max_value="18.0.0.254",
size=4, op="random"),
STLVmFlowVar(name="src_port",
min_value=1025,
max_value=65000,
size=2, op="random"),
STLVmWrFlowVar(fv_name="ip_src", pkt_offset= "IP.src" ),
STLVmFixIpv4(offset = "IP"), # fix checksum
STLVmWrFlowVar(fv_name="src_port",
pkt_offset= "TCP.sport") # fix udp len
]
)
pkt = STLPktBuilder(pkt = base_pkt,
vm = vm)
return STLStream(packet = pkt,
random_seed = 0x1234,# can be remove. will give the same random value any run
mode = STLTXCont())
# single CPU, VM, no cache, 64 bytes
def test_performance_vm_single_cpu (self):
setup_cfg = self.get_benchmark_param('cfg')
scenario_cfg = {}
scenario_cfg['name'] = "VM - 64 bytes, single CPU"
scenario_cfg['streams'] = self.build_perf_profile_vm(64)
scenario_cfg['core_count'] = 1
scenario_cfg['mult'] = setup_cfg['mult']
scenario_cfg['mpps_per_core_golden'] = setup_cfg['mpps_per_core_golden']
self.execute_single_scenario(scenario_cfg)
# single CPU, VM, cached, 64 bytes
def test_performance_vm_single_cpu_cached (self):
setup_cfg = self.get_benchmark_param('cfg')
scenario_cfg = {}
scenario_cfg['name'] = "VM - 64 bytes, single CPU, cache size 1024"
scenario_cfg['streams'] = self.build_perf_profile_vm(64, cache_size = 1024)
scenario_cfg['core_count'] = 1
scenario_cfg['mult'] = setup_cfg['mult']
scenario_cfg['mpps_per_core_golden'] = setup_cfg['mpps_per_core_golden']
self.execute_single_scenario(scenario_cfg)
# single CPU, syn attack, 64 bytes
def test_performance_syn_attack_single_cpu (self):
setup_cfg = self.get_benchmark_param('cfg')
scenario_cfg = {}
scenario_cfg['name'] = "syn attack - 64 bytes, single CPU"
scenario_cfg['streams'] = self.build_perf_profile_syn_attack(64)
scenario_cfg['core_count'] = 1
scenario_cfg['mult'] = setup_cfg['mult']
scenario_cfg['mpps_per_core_golden'] = setup_cfg['mpps_per_core_golden']
self.execute_single_scenario(scenario_cfg)
# two CPUs, VM, no cache, 64 bytes
def test_performance_vm_multi_cpus (self):
setup_cfg = self.get_benchmark_param('cfg')
scenario_cfg = {}
scenario_cfg['name'] = "VM - 64 bytes, multi CPUs"
scenario_cfg['streams'] = self.build_perf_profile_vm(64)
scenario_cfg['core_count'] = setup_cfg['core_count']
scenario_cfg['mult'] = setup_cfg['mult']
scenario_cfg['mpps_per_core_golden'] = setup_cfg['mpps_per_core_golden']
self.execute_single_scenario(scenario_cfg)
# multi CPUs, VM, cached, 64 bytes
def test_performance_vm_multi_cpus_cached (self):
setup_cfg = self.get_benchmark_param('cfg')
scenario_cfg = {}
scenario_cfg['name'] = "VM - 64 bytes, multi CPU, cache size 1024"
scenario_cfg['streams'] = self.build_perf_profile_vm(64, cache_size = 1024)
scenario_cfg['core_count'] = setup_cfg['core_count']
scenario_cfg['mult'] = setup_cfg['mult']
scenario_cfg['mpps_per_core_golden'] = setup_cfg['mpps_per_core_golden']
self.execute_single_scenario(scenario_cfg)
# multi CPUs, syn attack, 64 bytes
def test_performance_syn_attack_multi_cpus (self):
setup_cfg = self.get_benchmark_param('cfg')
scenario_cfg = {}
scenario_cfg['name'] = "syn attack - 64 bytes, multi CPUs"
scenario_cfg['streams'] = self.build_perf_profile_syn_attack(64)
scenario_cfg['core_count'] = setup_cfg['core_count']
scenario_cfg['mult'] = setup_cfg['mult']
scenario_cfg['mpps_per_core_golden'] = setup_cfg['mpps_per_core_golden']
self.execute_single_scenario(scenario_cfg)
############################################# test's infra functions ###########################################
def execute_single_scenario (self, scenario_cfg):
golden = scenario_cfg['mpps_per_core_golden']
report = self.execute_single_scenario_iteration(scenario_cfg)
if self.GAManager:
report.report_to_analytics(self.GAManager, golden)
#report to elk
if self.elk:
elk_obj = self.get_elk_obj()
report.report_to_elk(self.elk,elk_obj, golden)
rc = report.check_golden(golden)
if rc == PerformanceReport.GOLDEN_NORMAL or rc == PerformanceReport.GOLDEN_BETTER:
return
print("\n*** Measured Mpps per core '{0}' is lower than expected golden '{1}'".format(report.avg_mpps_per_core, scenario_cfg['mpps_per_core_golden']))
assert 0, "performance failure"
def execute_single_scenario_iteration (self, scenario_cfg):
print("\nExecuting performance scenario: '{0}'\n".format(scenario_cfg['name']))
self.c.reset(ports = [0])
self.c.add_streams(ports = [0], streams = scenario_cfg['streams'])
# use one core
cores_per_port = self.c.system_info.get('dp_core_count_per_port', 0)
if cores_per_port < scenario_cfg['core_count']:
assert 0, "test configuration requires {0} cores but only {1} per port are available".format(scenario_cfg['core_count'], cores_per_port)
core_mask = (2 ** scenario_cfg['core_count']) - 1
self.c.start(ports = [0], mult = scenario_cfg['mult'], core_mask = [core_mask])
# stablize
print("Step 1 - waiting for stabilization... (10 seconds)")
for _ in range(10):
time.sleep(1)
sys.stdout.write('.')
sys.stdout.flush()
print("\n")
samples = {'cpu' : [], 'bps': [], 'pps': []}
# let the server gather samples
print("Step 2 - Waiting for samples... (60 seconds)")
for i in range(0, 3):
# sample bps/pps
for _ in range(0, 20):
stats = self.c.get_stats(ports = 0)
max_queue_full = 100000 if self.is_VM else 10000
if stats['global'][ 'queue_full'] > max_queue_full:
assert 0, "Queue is full need to tune the multiplier"
# CPU results are not valid cannot use them
samples['bps'].append(stats[0]['tx_bps'])
samples['pps'].append(stats[0]['tx_pps'])
time.sleep(1)
sys.stdout.write('.')
sys.stdout.flush()
# sample CPU per core
rc = self.c._transmit('get_utilization')
if not rc:
raise Exception(rc)
data = rc.data()['cpu']
# filter
data = [s for s in data if s['ports'][0] == 0]
assert len(data) == scenario_cfg['core_count'] , "sampling info does not match core count"
for s in data:
samples['cpu'] += s['history']
stats = self.c.get_stats(ports = 0)
self.c.stop(ports = [0])
avg_values = {k:avg(v) for k, v in samples.items()}
avg_cpu = avg_values['cpu'] * scenario_cfg['core_count']
avg_gbps = avg_values['bps'] / 1e9
avg_mpps = avg_values['pps'] / 1e6
avg_gbps_per_core = avg_gbps * (100.0 / avg_cpu)
avg_mpps_per_core = avg_mpps * (100.0 / avg_cpu)
report = PerformanceReport(scenario = scenario_cfg['name'],
machine_name = CTRexScenario.setup_name,
core_count = scenario_cfg['core_count'],
avg_cpu = avg_cpu,
avg_gbps = avg_gbps,
avg_mpps = avg_mpps,
avg_gbps_per_core = avg_gbps_per_core,
avg_mpps_per_core = avg_mpps_per_core)
report.show()
print("")
golden = scenario_cfg['mpps_per_core_golden']
print("golden Mpps per core (at 100% CPU): min: {0}, max {1}".format(golden['min'], golden['max']))
return report
|
kisel/trex-core
|
scripts/automation/regression/stateless_tests/stl_performance_test.py
|
Python
|
apache-2.0
| 14,351
|
[
"Elk"
] |
5f037992447dcb94528e6e980809d28b2c0fee97a4ca1eea6f319d21d1b37305
|
#! /usr/bin/env python
import re
import math
import collections
import numpy as np
import time
import operator
from scipy.io import mmread, mmwrite
from random import randint
from sklearn import cross_validation
from sklearn import linear_model
from sklearn.grid_search import GridSearchCV
from sklearn import preprocessing as pp
from sklearn.svm import SVR
from sklearn.ensemble import GradientBoostingRegressor, GradientBoostingClassifier
from sklearn.ensemble import RandomForestRegressor, RandomForestClassifier
from sklearn.ensemble import ExtraTreesRegressor
from sklearn.decomposition import ProbabilisticPCA, KernelPCA
from sklearn.decomposition import NMF
from sklearn.pipeline import Pipeline
from sklearn.svm import LinearSVC
from sklearn.linear_model import LogisticRegression, Ridge, Lasso, ElasticNet
import scipy.stats as stats
from sklearn import tree
from sklearn.feature_selection import f_regression
from sklearn.metrics import precision_recall_curve
from sklearn.metrics import auc, f1_score
from sklearn.gaussian_process import GaussianProcess
import features
# working directory
dir = '.'
label_index = 770
# load train data
def load_train_fs():
# In the validation process, the training data was randomly shuffled firstly.
# For the prediction process, there is no need to shuffle the dataset.
# Owing to out of memory problem, Gaussian process only use part of training data, the prediction of gaussian process
# may be a little different from the model,which the training data was shuffled.
train_fs = np.genfromtxt(open(dir + '/train_v2_svm_5000.csv','rb'), delimiter=',', skip_header=1)
col_mean = stats.nanmean(train_fs, axis=0)
inds = np.where(np.isnan(train_fs))
train_fs[inds] = np.take(col_mean, inds[1])
train_fs[np.isinf(train_fs)] = 0
return train_fs
# load test data
def load_test_fs():
test_fs = np.genfromtxt(open(dir + '/train_v2.csv','rb'), delimiter=',', skip_header = 1)
col_mean = stats.nanmean(test_fs, axis=0)
inds = np.where(np.isnan(test_fs))
test_fs[inds] = np.take(col_mean, inds[1])
test_fs[np.isinf(test_fs)] = 0
return test_fs
# extract features from test data
def test_type(test_fs):
x_Test = test_fs[:,range(1, label_index)]
return x_Test
# extract features from train data
def train_type(train_fs):
train_x = train_fs[:,range(1, label_index)]
train_y= train_fs[:,-1]
return train_x, train_y
# transform the loss to the binary form
def toLabels(train_y):
labels = np.zeros(len(train_y))
labels[train_y>0] = 1
return labels
# generate the output file based to the predictions
def output_preds(preds):
out_file = dir + '/output_svm_5000.csv'
fs = open(out_file,'w')
fs.write('id,loss\n')
for i in range(len(preds)):
if preds[i] > 100:
preds[i] = 100
elif preds[i] < 0:
preds[i] = 0
strs = str(i+105472) + ',' + str(np.float(preds[i]))
fs.write(strs + '\n');
fs.close()
return
# get the top feature indexes by invoking f_regression
def getTopFeatures(train_x, train_y, n_features=100):
f_val, p_val = f_regression(train_x,train_y)
f_val_dict = {}
p_val_dict = {}
for i in range(len(f_val)):
if math.isnan(f_val[i]):
f_val[i] = 0.0
f_val_dict[i] = f_val[i]
if math.isnan(p_val[i]):
p_val[i] = 0.0
p_val_dict[i] = p_val[i]
sorted_f = sorted(f_val_dict.iteritems(), key=operator.itemgetter(1),reverse=True)
sorted_p = sorted(p_val_dict.iteritems(), key=operator.itemgetter(1),reverse=True)
feature_indexs = []
for i in range(0,n_features):
feature_indexs.append(sorted_f[i][0])
return feature_indexs
# generate the new data, based on which features are generated, and used
def get_data(train_x, feature_indexs, feature_minus_pair_list=[], feature_plus_pair_list=[],
feature_mul_pair_list=[], feature_divide_pair_list = [], feature_pair_sub_mul_list=[],
feature_pair_plus_mul_list = [],feature_pair_sub_divide_list = [], feature_minus2_pair_list = [],feature_mul2_pair_list=[],
feature_sub_square_pair_list=[], feature_square_sub_pair_list=[],feature_square_plus_pair_list=[]):
sub_train_x = train_x[:,feature_indexs]
for i in range(len(feature_minus_pair_list)):
ind_i = feature_minus_pair_list[i][0]
ind_j = feature_minus_pair_list[i][1]
sub_train_x = np.column_stack((sub_train_x, train_x[:,ind_i]-train_x[:,ind_j]))
for i in range(len(feature_plus_pair_list)):
ind_i = feature_plus_pair_list[i][0]
ind_j = feature_plus_pair_list[i][1]
sub_train_x = np.column_stack((sub_train_x, train_x[:,ind_i] + train_x[:,ind_j]))
for i in range(len(feature_mul_pair_list)):
ind_i = feature_mul_pair_list[i][0]
ind_j = feature_mul_pair_list[i][1]
sub_train_x = np.column_stack((sub_train_x, train_x[:,ind_i] * train_x[:,ind_j]))
for i in range(len(feature_divide_pair_list)):
ind_i = feature_divide_pair_list[i][0]
ind_j = feature_divide_pair_list[i][1]
sub_train_x = np.column_stack((sub_train_x, train_x[:,ind_i] / train_x[:,ind_j]))
for i in range(len(feature_pair_sub_mul_list)):
ind_i = feature_pair_sub_mul_list[i][0]
ind_j = feature_pair_sub_mul_list[i][1]
ind_k = feature_pair_sub_mul_list[i][2]
sub_train_x = np.column_stack((sub_train_x, (train_x[:,ind_i]-train_x[:,ind_j]) * train_x[:,ind_k]))
return sub_train_x
# use gbm classifier to predict whether the loan defaults or not
def gbc_classify(train_x, train_y):
feature_indexs = getTopFeatures(train_x, train_y)
sub_x_Train = get_data(train_x, feature_indexs[:16], features.feature_pair_sub_list
,features.feature_pair_plus_list, features.feature_pair_mul_list, features.feature_pair_divide_list[:20],
features.feature_pair_sub_mul_list[:20])
labels = toLabels(train_y)
gbc = GradientBoostingClassifier(n_estimators=3000, max_depth=8)
gbc.fit(sub_x_Train, labels)
return gbc
# use svm to predict the loss, based on the result of gbm classifier
def gbc_svr_predict_part(gbc, train_x, train_y, test_x, feature_pair_sub_list,
feature_pair_plus_list, feature_pair_mul_list, feature_pair_divide_list,
feature_pair_sub_mul_list, feature_pair_sub_list_sf, feature_pair_plus_list2):
feature_indexs = getTopFeatures(train_x, train_y)
sub_x_Train = get_data(train_x, feature_indexs[:16], feature_pair_sub_list
,feature_pair_plus_list, feature_pair_mul_list, feature_pair_divide_list[:20], feature_pair_sub_mul_list[:20])
sub_x_Test = get_data(test_x, feature_indexs[:16], feature_pair_sub_list
,feature_pair_plus_list, feature_pair_mul_list, feature_pair_divide_list[:20], feature_pair_sub_mul_list[:20])
pred_labels = gbc.predict(sub_x_Test)
pred_probs = gbc.predict_proba(sub_x_Test)[:,1]
ind_test = np.where(pred_probs>0.55)[0]
ind_train = np.where(train_y > 0)[0]
ind_train0 = np.where(train_y == 0)[0]
preds_all = np.zeros([len(sub_x_Test)])
flag = (sub_x_Test[:,16] >= 1)
ind_tmp0 = np.where(flag)[0]
ind_tmp = np.where(~flag)[0]
sub_x_Train = get_data(train_x, feature_indexs[:100], feature_pair_sub_list_sf
,feature_pair_plus_list2[:100], feature_pair_mul_list[:40], feature_pair_divide_list, feature_pair_sub_mul_list)
sub_x_Test = get_data(test_x, feature_indexs[:100], feature_pair_sub_list_sf
,feature_pair_plus_list2[:100], feature_pair_mul_list[:40], feature_pair_divide_list, feature_pair_sub_mul_list)
sub_x_Train[:,101] = np.log(1-sub_x_Train[:,101])
sub_x_Test[ind_tmp,101] = np.log(1-sub_x_Test[ind_tmp,101])
scaler = pp.StandardScaler()
scaler.fit(sub_x_Train)
sub_x_Train = scaler.transform(sub_x_Train)
sub_x_Test[ind_tmp] = scaler.transform(sub_x_Test[ind_tmp])
svr = SVR(C=16, kernel='rbf', gamma = 0.000122)
svr.fit(sub_x_Train[ind_train], np.log(train_y[ind_train]))
preds = svr.predict(sub_x_Test[ind_test])
preds_all[ind_test] = np.power(np.e, preds)
preds_all[ind_tmp0] = 0
return preds_all
# use gbm regression to predict the loss, based on the result of gbm classifier
def gbc_gbr_predict_part(gbc, train_x, train_y, test_x, feature_pair_sub_list,
feature_pair_plus_list, feature_pair_mul_list, feature_pair_divide_list,
feature_pair_sub_mul_list, feature_pair_sub_list2):
feature_indexs = getTopFeatures(train_x, train_y)
sub_x_Train = get_data(train_x, feature_indexs[:16], feature_pair_sub_list
,feature_pair_plus_list, feature_pair_mul_list, feature_pair_divide_list[:20],feature_pair_sub_mul_list[:20])
sub_x_Test = get_data(test_x, feature_indexs[:16], feature_pair_sub_list
,feature_pair_plus_list, feature_pair_mul_list, feature_pair_divide_list[:20], feature_pair_sub_mul_list[:20])
pred_labels = gbc.predict(sub_x_Test)
pred_probs = gbc.predict_proba(sub_x_Test)[:,1]
ind_test = np.where(pred_probs>0.55)[0]
ind_train = np.where(train_y > 0)[0]
ind_train0 = np.where(train_y == 0)[0]
preds_all = np.zeros([len(sub_x_Test)])
flag = (sub_x_Test[:,16] >= 1)
ind_tmp0 = np.where(flag)[0]
ind_tmp = np.where(~flag)[0]
sub_x_Train = get_data(train_x, feature_indexs[:16], feature_pair_sub_list2[:70]
,feature_pair_plus_list, feature_pair_mul_list, feature_pair_divide_list, feature_pair_sub_mul_list)
sub_x_Test = get_data(test_x, feature_indexs[:16], feature_pair_sub_list2[:70]
,feature_pair_plus_list, feature_pair_mul_list, feature_pair_divide_list, feature_pair_sub_mul_list)
scaler = pp.StandardScaler()
scaler.fit(sub_x_Train)
sub_x_Train = scaler.transform(sub_x_Train)
sub_x_Test[ind_tmp] = scaler.transform(sub_x_Test[ind_tmp])
gbr1000 = GradientBoostingRegressor(n_estimators=1300, max_depth=4, subsample=0.5, learning_rate=0.05)
gbr1000.fit(sub_x_Train[ind_train], np.log(train_y[ind_train]))
preds = gbr1000.predict(sub_x_Test[ind_test])
preds_all[ind_test] = np.power(np.e, preds)
preds_all[ind_tmp0] = 0
return preds_all
# predict the loss based on the Gaussian process regressor, which has been trained
def gp_predict(clf, x_Test):
size = len(x_Test)
part_size = 3000
cnt = (size-1) / part_size + 1
preds = []
for i in range(cnt):
if i < cnt - 1:
pred_part = clf.predict(x_Test[i*part_size: (i+1) * part_size])
else:
pred_part = clf.predict(x_Test[i*part_size: size])
preds.extend(pred_part)
return np.power(np.e,preds)
# train the gaussian process regressor
def gbc_gp_predict_part(sub_x_Train, train_y, sub_x_Test_part):
#Owing to out of memory, the model was trained by part of training data
#Attention, this part was trained on the ram of more than 96G
sub_x_Train[:,16] = np.log(1-sub_x_Train[:,16])
scaler = pp.StandardScaler()
scaler.fit(sub_x_Train)
sub_x_Train = scaler.transform(sub_x_Train)
ind_train = np.where(train_y>0)[0]
part_size= int(0.7 * len(ind_train))
gp = GaussianProcess(theta0=1e-3, thetaL=1e-5, thetaU=10, corr= 'absolute_exponential')
gp.fit(sub_x_Train[ind_train[:part_size]], np.log(train_y[ind_train[:part_size]]))
flag = (sub_x_Test_part[:,16] >= 1)
ind_tmp0 = np.where(flag)[0]
ind_tmp = np.where(~flag)[0]
sub_x_Test_part[ind_tmp,16] = np.log(1-sub_x_Test_part[ind_tmp,16])
sub_x_Test_part[ind_tmp] = scaler.transform(sub_x_Test_part[ind_tmp])
gp_preds_tmp = gp_predict(gp, sub_x_Test_part[ind_tmp])
gp_preds = np.zeros(len(sub_x_Test_part))
gp_preds[ind_tmp] = gp_preds_tmp
return gp_preds
# use gbm classifier to predict whether the loan defaults or not, then invoke the function gbc_gp_predict_part
def gbc_gp_predict(train_x, train_y, test_x):
feature_indexs = getTopFeatures(train_x, train_y)
sub_x_Train = get_data(train_x, feature_indexs[:16], features.feature_pair_sub_list
,features.feature_pair_plus_list, features.feature_pair_mul_list, features.feature_pair_divide_list[:20])
sub_x_Test = get_data(test_x, feature_indexs[:16], features.feature_pair_sub_list
,features.feature_pair_plus_list, features.feature_pair_mul_list, features.feature_pair_divide_list[:20])
labels = toLabels(train_y)
gbc = GradientBoostingClassifier(n_estimators=3000, max_depth=9)
gbc.fit(sub_x_Train, labels)
pred_probs = gbc.predict_proba(sub_x_Test)[:,1]
ind_test = np.where(pred_probs>0.55)[0]
gp_preds_part = gbc_gp_predict_part(sub_x_Train, train_y, sub_x_Test[ind_test])
gp_preds = np.zeros(len(test_x))
gp_preds[ind_test] = gp_preds_part
return gp_preds
# invoke the function gbc_svr_predict_part
def gbc_svr_predict(gbc, train_x, train_y, test_x):
svr_preds = gbc_svr_predict_part(gbc, train_x, train_y, test_x, features.feature_pair_sub_list, features.feature_pair_plus_list,
features.feature_pair_mul_list, features.feature_pair_divide_list,
features.feature_pair_sub_mul_list, features.feature_pair_sub_list_sf,
features.feature_pair_plus_list2)
return svr_preds
# invoke the function gbc_gbr_predict_part
def gbc_gbr_predict(gbc, train_x, train_y, test_x):
gbr_preds = gbc_gbr_predict_part(gbc, train_x, train_y, test_x, features.feature_pair_sub_list,
features.feature_pair_plus_list, features.feature_pair_mul_list,
features.feature_pair_divide_list, features.feature_pair_sub_mul_list,
features.feature_pair_sub_list2)
return gbr_preds
# the main function
if __name__ == '__main__':
train_fs = load_train_fs()
test_fs = load_test_fs()
train_x, train_y = train_type(train_fs)
test_x = test_type(test_fs)
gbc = gbc_classify(train_x, train_y)
svr_preds = gbc_svr_predict(gbc, train_x, train_y, test_x)
# gbr_preds = gbc_gbr_predict(gbc, train_x, train_y, test_x)
# gp_preds = gbc_gp_predict(train_x, train_y, test_x)
#preds_all = svr_preds * 0.4 + gp_preds * 0.25 + gbr_preds * 0.35
output_preds(svr_preds)
|
Goodideax/CS249
|
predict_svm_combine_1000.py
|
Python
|
bsd-3-clause
| 14,603
|
[
"Gaussian"
] |
bb3bb566631be81e639ce523d48fb8bcef8454075d8c5a5b4e59ca8006002908
|
# coding: utf-8
from __future__ import division, unicode_literals
'''
Created on Oct 24, 2012
@author: shyue
'''
__author__ = "Shyue Ping Ong"
__copyright__ = "Copyright 2011, The Materials Project"
__version__ = "0.1"
__maintainer__ = "Shyue Ping Ong"
__email__ = "[email protected]"
__status__ = "Production"
__date__ = "Oct 24, 2012"
import unittest
import os
from pymatgen.core.structure import Structure
from pymatgen.core.periodic_table import Specie
from pymatgen.analysis.bond_valence import BVAnalyzer
from pymatgen.util.testing import PymatgenTest
test_dir = os.path.join(os.path.dirname(__file__), "..", "..", "..",
'test_files')
class BVAnalyzerTest(PymatgenTest):
def setUp(self):
self.analyzer = BVAnalyzer()
def test_get_valence(self):
s = Structure.from_file(os.path.join(test_dir, "LiMn2O4.json"))
ans = [1, 1, 3, 3, 4, 4, -2, -2, -2, -2, -2, -2, -2, -2]
self.assertEqual(self.analyzer.get_valences(s), ans)
s = self.get_structure("LiFePO4")
ans = [1, 1, 1, 1, 2, 2, 2, 2, 5, 5, 5, 5, -2, -2, -2, -2, -2, -2, -2,
- 2, -2, -2, -2, -2, -2, -2, -2, -2]
self.assertEqual(self.analyzer.get_valences(s), ans)
s = self.get_structure("Li3V2(PO4)3")
ans = [1, 1, 1, 1, 1, 1, 3, 3, 3, 3, 5, 5, 5, 5, 5, 5, -2, -2, -2, -2,
- 2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2,
- 2, -2, -2, -2]
self.assertEqual(self.analyzer.get_valences(s), ans)
s = Structure.from_file(os.path.join(test_dir, "Li4Fe3Mn1(PO4)4.json"))
ans = [1, 1, 1, 1, 2, 2, 2, 2, 5, 5, 5, 5, -2, -2, -2, -2, -2, -2, -2,
- 2, -2, -2, -2, -2, -2, -2, -2, -2]
self.assertEqual(self.analyzer.get_valences(s), ans)
s = self.get_structure("NaFePO4")
ans = [1, 1, 1, 1, 2, 2, 2, 2, 5, 5, 5, 5, -2, -2, -2, -2, -2, -2, -2,
- 2, -2, -2, -2, -2, -2, -2, -2, -2]
self.assertEqual(self.analyzer.get_valences(s), ans)
def test_get_oxi_state_structure(self):
s = Structure.from_file(os.path.join(test_dir, "LiMn2O4.json"))
news = self.analyzer.get_oxi_state_decorated_structure(s)
self.assertIn(Specie("Mn", 3), news.composition.elements)
self.assertIn(Specie("Mn", 4), news.composition.elements)
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.testName']
unittest.main()
|
Dioptas/pymatgen
|
pymatgen/analysis/tests/test_bond_valence.py
|
Python
|
mit
| 2,450
|
[
"pymatgen"
] |
2f4b2c86133d411dc54140c09db1b1121b3397a9d2ec6e9d04da2dd57fcc0f1c
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import textwrap
import py
import pytest
from _pytest.config import PytestPluginManager
from _pytest.main import EXIT_NOTESTSCOLLECTED
from _pytest.main import EXIT_OK
from _pytest.main import EXIT_USAGEERROR
def ConftestWithSetinitial(path):
conftest = PytestPluginManager()
conftest_setinitial(conftest, [path])
return conftest
def conftest_setinitial(conftest, args, confcutdir=None):
class Namespace(object):
def __init__(self):
self.file_or_dir = args
self.confcutdir = str(confcutdir)
self.noconftest = False
self.pyargs = False
conftest._set_initial_conftests(Namespace())
@pytest.mark.usefixtures("_sys_snapshot")
class TestConftestValueAccessGlobal(object):
@pytest.fixture(scope="module", params=["global", "inpackage"])
def basedir(self, request, tmpdir_factory):
tmpdir = tmpdir_factory.mktemp("basedir", numbered=True)
tmpdir.ensure("adir/conftest.py").write("a=1 ; Directory = 3")
tmpdir.ensure("adir/b/conftest.py").write("b=2 ; a = 1.5")
if request.param == "inpackage":
tmpdir.ensure("adir/__init__.py")
tmpdir.ensure("adir/b/__init__.py")
yield tmpdir
def test_basic_init(self, basedir):
conftest = PytestPluginManager()
p = basedir.join("adir")
assert conftest._rget_with_confmod("a", p)[1] == 1
def test_immediate_initialiation_and_incremental_are_the_same(self, basedir):
conftest = PytestPluginManager()
assert not len(conftest._dirpath2confmods)
conftest._getconftestmodules(basedir)
snap1 = len(conftest._dirpath2confmods)
assert snap1 == 1
conftest._getconftestmodules(basedir.join("adir"))
assert len(conftest._dirpath2confmods) == snap1 + 1
conftest._getconftestmodules(basedir.join("b"))
assert len(conftest._dirpath2confmods) == snap1 + 2
def test_value_access_not_existing(self, basedir):
conftest = ConftestWithSetinitial(basedir)
with pytest.raises(KeyError):
conftest._rget_with_confmod("a", basedir)
def test_value_access_by_path(self, basedir):
conftest = ConftestWithSetinitial(basedir)
adir = basedir.join("adir")
assert conftest._rget_with_confmod("a", adir)[1] == 1
assert conftest._rget_with_confmod("a", adir.join("b"))[1] == 1.5
def test_value_access_with_confmod(self, basedir):
startdir = basedir.join("adir", "b")
startdir.ensure("xx", dir=True)
conftest = ConftestWithSetinitial(startdir)
mod, value = conftest._rget_with_confmod("a", startdir)
assert value == 1.5
path = py.path.local(mod.__file__)
assert path.dirpath() == basedir.join("adir", "b")
assert path.purebasename.startswith("conftest")
def test_conftest_in_nonpkg_with_init(tmpdir, _sys_snapshot):
tmpdir.ensure("adir-1.0/conftest.py").write("a=1 ; Directory = 3")
tmpdir.ensure("adir-1.0/b/conftest.py").write("b=2 ; a = 1.5")
tmpdir.ensure("adir-1.0/b/__init__.py")
tmpdir.ensure("adir-1.0/__init__.py")
ConftestWithSetinitial(tmpdir.join("adir-1.0", "b"))
def test_doubledash_considered(testdir):
conf = testdir.mkdir("--option")
conf.ensure("conftest.py")
conftest = PytestPluginManager()
conftest_setinitial(conftest, [conf.basename, conf.basename])
values = conftest._getconftestmodules(conf)
assert len(values) == 1
def test_issue151_load_all_conftests(testdir):
names = "code proj src".split()
for name in names:
p = testdir.mkdir(name)
p.ensure("conftest.py")
conftest = PytestPluginManager()
conftest_setinitial(conftest, names)
d = list(conftest._conftestpath2mod.values())
assert len(d) == len(names)
def test_conftest_global_import(testdir):
testdir.makeconftest("x=3")
p = testdir.makepyfile(
"""
import py, pytest
from _pytest.config import PytestPluginManager
conf = PytestPluginManager()
mod = conf._importconftest(py.path.local("conftest.py"))
assert mod.x == 3
import conftest
assert conftest is mod, (conftest, mod)
subconf = py.path.local().ensure("sub", "conftest.py")
subconf.write("y=4")
mod2 = conf._importconftest(subconf)
assert mod != mod2
assert mod2.y == 4
import conftest
assert conftest is mod2, (conftest, mod)
"""
)
res = testdir.runpython(p)
assert res.ret == 0
def test_conftestcutdir(testdir):
conf = testdir.makeconftest("")
p = testdir.mkdir("x")
conftest = PytestPluginManager()
conftest_setinitial(conftest, [testdir.tmpdir], confcutdir=p)
values = conftest._getconftestmodules(p)
assert len(values) == 0
values = conftest._getconftestmodules(conf.dirpath())
assert len(values) == 0
assert conf not in conftest._conftestpath2mod
# but we can still import a conftest directly
conftest._importconftest(conf)
values = conftest._getconftestmodules(conf.dirpath())
assert values[0].__file__.startswith(str(conf))
# and all sub paths get updated properly
values = conftest._getconftestmodules(p)
assert len(values) == 1
assert values[0].__file__.startswith(str(conf))
def test_conftestcutdir_inplace_considered(testdir):
conf = testdir.makeconftest("")
conftest = PytestPluginManager()
conftest_setinitial(conftest, [conf.dirpath()], confcutdir=conf.dirpath())
values = conftest._getconftestmodules(conf.dirpath())
assert len(values) == 1
assert values[0].__file__.startswith(str(conf))
@pytest.mark.parametrize("name", "test tests whatever .dotdir".split())
def test_setinitial_conftest_subdirs(testdir, name):
sub = testdir.mkdir(name)
subconftest = sub.ensure("conftest.py")
conftest = PytestPluginManager()
conftest_setinitial(conftest, [sub.dirpath()], confcutdir=testdir.tmpdir)
if name not in ("whatever", ".dotdir"):
assert subconftest in conftest._conftestpath2mod
assert len(conftest._conftestpath2mod) == 1
else:
assert subconftest not in conftest._conftestpath2mod
assert len(conftest._conftestpath2mod) == 0
def test_conftest_confcutdir(testdir):
testdir.makeconftest("assert 0")
x = testdir.mkdir("x")
x.join("conftest.py").write(
textwrap.dedent(
"""\
def pytest_addoption(parser):
parser.addoption("--xyz", action="store_true")
"""
)
)
result = testdir.runpytest("-h", "--confcutdir=%s" % x, x)
result.stdout.fnmatch_lines(["*--xyz*"])
assert "warning: could not load initial" not in result.stdout.str()
@pytest.mark.skipif(
not hasattr(py.path.local, "mksymlinkto"),
reason="symlink not available on this platform",
)
def test_conftest_symlink(testdir):
"""Ensure that conftest.py is used for resolved symlinks."""
real = testdir.tmpdir.mkdir("real")
realtests = real.mkdir("app").mkdir("tests")
testdir.tmpdir.join("symlinktests").mksymlinkto(realtests)
testdir.tmpdir.join("symlink").mksymlinkto(real)
testdir.makepyfile(
**{
"real/app/tests/test_foo.py": "def test1(fixture): pass",
"real/conftest.py": textwrap.dedent(
"""
import pytest
print("conftest_loaded")
@pytest.fixture
def fixture():
print("fixture_used")
"""
),
}
)
result = testdir.runpytest("-vs", "symlinktests")
result.stdout.fnmatch_lines(
[
"*conftest_loaded*",
"real/app/tests/test_foo.py::test1 fixture_used",
"PASSED",
]
)
assert result.ret == EXIT_OK
# Should not cause "ValueError: Plugin already registered" (#4174).
result = testdir.runpytest("-vs", "symlink")
assert result.ret == EXIT_OK
realtests.ensure("__init__.py")
result = testdir.runpytest("-vs", "symlinktests/test_foo.py::test1")
result.stdout.fnmatch_lines(
[
"*conftest_loaded*",
"real/app/tests/test_foo.py::test1 fixture_used",
"PASSED",
]
)
assert result.ret == EXIT_OK
@pytest.mark.skipif(
not hasattr(py.path.local, "mksymlinkto"),
reason="symlink not available on this platform",
)
def test_conftest_symlink_files(testdir):
"""Check conftest.py loading when running in directory with symlinks."""
real = testdir.tmpdir.mkdir("real")
source = {
"app/test_foo.py": "def test1(fixture): pass",
"app/__init__.py": "",
"app/conftest.py": textwrap.dedent(
"""
import pytest
print("conftest_loaded")
@pytest.fixture
def fixture():
print("fixture_used")
"""
),
}
testdir.makepyfile(**{"real/%s" % k: v for k, v in source.items()})
# Create a build directory that contains symlinks to actual files
# but doesn't symlink actual directories.
build = testdir.tmpdir.mkdir("build")
build.mkdir("app")
for f in source:
build.join(f).mksymlinkto(real.join(f))
build.chdir()
result = testdir.runpytest("-vs", "app/test_foo.py")
result.stdout.fnmatch_lines(["*conftest_loaded*", "PASSED"])
assert result.ret == EXIT_OK
def test_no_conftest(testdir):
testdir.makeconftest("assert 0")
result = testdir.runpytest("--noconftest")
assert result.ret == EXIT_NOTESTSCOLLECTED
result = testdir.runpytest()
assert result.ret == EXIT_USAGEERROR
def test_conftest_existing_resultlog(testdir):
x = testdir.mkdir("tests")
x.join("conftest.py").write(
textwrap.dedent(
"""\
def pytest_addoption(parser):
parser.addoption("--xyz", action="store_true")
"""
)
)
testdir.makefile(ext=".log", result="") # Writes result.log
result = testdir.runpytest("-h", "--resultlog", "result.log")
result.stdout.fnmatch_lines(["*--xyz*"])
def test_conftest_existing_junitxml(testdir):
x = testdir.mkdir("tests")
x.join("conftest.py").write(
textwrap.dedent(
"""\
def pytest_addoption(parser):
parser.addoption("--xyz", action="store_true")
"""
)
)
testdir.makefile(ext=".xml", junit="") # Writes junit.xml
result = testdir.runpytest("-h", "--junitxml", "junit.xml")
result.stdout.fnmatch_lines(["*--xyz*"])
def test_conftest_import_order(testdir, monkeypatch):
ct1 = testdir.makeconftest("")
sub = testdir.mkdir("sub")
ct2 = sub.join("conftest.py")
ct2.write("")
def impct(p):
return p
conftest = PytestPluginManager()
conftest._confcutdir = testdir.tmpdir
monkeypatch.setattr(conftest, "_importconftest", impct)
assert conftest._getconftestmodules(sub) == [ct1, ct2]
def test_fixture_dependency(testdir, monkeypatch):
ct1 = testdir.makeconftest("")
ct1 = testdir.makepyfile("__init__.py")
ct1.write("")
sub = testdir.mkdir("sub")
sub.join("__init__.py").write("")
sub.join("conftest.py").write(
textwrap.dedent(
"""\
import pytest
@pytest.fixture
def not_needed():
assert False, "Should not be called!"
@pytest.fixture
def foo():
assert False, "Should not be called!"
@pytest.fixture
def bar(foo):
return 'bar'
"""
)
)
subsub = sub.mkdir("subsub")
subsub.join("__init__.py").write("")
subsub.join("test_bar.py").write(
textwrap.dedent(
"""\
import pytest
@pytest.fixture
def bar():
return 'sub bar'
def test_event_fixture(bar):
assert bar == 'sub bar'
"""
)
)
result = testdir.runpytest("sub")
result.stdout.fnmatch_lines(["*1 passed*"])
def test_conftest_found_with_double_dash(testdir):
sub = testdir.mkdir("sub")
sub.join("conftest.py").write(
textwrap.dedent(
"""\
def pytest_addoption(parser):
parser.addoption("--hello-world", action="store_true")
"""
)
)
p = sub.join("test_hello.py")
p.write("def test_hello(): pass")
result = testdir.runpytest(str(p) + "::test_hello", "-h")
result.stdout.fnmatch_lines(
"""
*--hello-world*
"""
)
class TestConftestVisibility(object):
def _setup_tree(self, testdir): # for issue616
# example mostly taken from:
# https://mail.python.org/pipermail/pytest-dev/2014-September/002617.html
runner = testdir.mkdir("empty")
package = testdir.mkdir("package")
package.join("conftest.py").write(
textwrap.dedent(
"""\
import pytest
@pytest.fixture
def fxtr():
return "from-package"
"""
)
)
package.join("test_pkgroot.py").write(
textwrap.dedent(
"""\
def test_pkgroot(fxtr):
assert fxtr == "from-package"
"""
)
)
swc = package.mkdir("swc")
swc.join("__init__.py").ensure()
swc.join("conftest.py").write(
textwrap.dedent(
"""\
import pytest
@pytest.fixture
def fxtr():
return "from-swc"
"""
)
)
swc.join("test_with_conftest.py").write(
textwrap.dedent(
"""\
def test_with_conftest(fxtr):
assert fxtr == "from-swc"
"""
)
)
snc = package.mkdir("snc")
snc.join("__init__.py").ensure()
snc.join("test_no_conftest.py").write(
textwrap.dedent(
"""\
def test_no_conftest(fxtr):
assert fxtr == "from-package" # No local conftest.py, so should
# use value from parent dir's
"""
)
)
print("created directory structure:")
for x in testdir.tmpdir.visit():
print(" " + x.relto(testdir.tmpdir))
return {"runner": runner, "package": package, "swc": swc, "snc": snc}
# N.B.: "swc" stands for "subdir with conftest.py"
# "snc" stands for "subdir no [i.e. without] conftest.py"
@pytest.mark.parametrize(
"chdir,testarg,expect_ntests_passed",
[
# Effective target: package/..
("runner", "..", 3),
("package", "..", 3),
("swc", "../..", 3),
("snc", "../..", 3),
# Effective target: package
("runner", "../package", 3),
("package", ".", 3),
("swc", "..", 3),
("snc", "..", 3),
# Effective target: package/swc
("runner", "../package/swc", 1),
("package", "./swc", 1),
("swc", ".", 1),
("snc", "../swc", 1),
# Effective target: package/snc
("runner", "../package/snc", 1),
("package", "./snc", 1),
("swc", "../snc", 1),
("snc", ".", 1),
],
)
def test_parsefactories_relative_node_ids(
self, testdir, chdir, testarg, expect_ntests_passed
):
"""#616"""
dirs = self._setup_tree(testdir)
print("pytest run in cwd: %s" % (dirs[chdir].relto(testdir.tmpdir)))
print("pytestarg : %s" % (testarg))
print("expected pass : %s" % (expect_ntests_passed))
with dirs[chdir].as_cwd():
reprec = testdir.inline_run(testarg, "-q", "--traceconfig")
reprec.assertoutcome(passed=expect_ntests_passed)
@pytest.mark.parametrize(
"confcutdir,passed,error", [(".", 2, 0), ("src", 1, 1), (None, 1, 1)]
)
def test_search_conftest_up_to_inifile(testdir, confcutdir, passed, error):
"""Test that conftest files are detected only up to an ini file, unless
an explicit --confcutdir option is given.
"""
root = testdir.tmpdir
src = root.join("src").ensure(dir=1)
src.join("pytest.ini").write("[pytest]")
src.join("conftest.py").write(
textwrap.dedent(
"""\
import pytest
@pytest.fixture
def fix1(): pass
"""
)
)
src.join("test_foo.py").write(
textwrap.dedent(
"""\
def test_1(fix1):
pass
def test_2(out_of_reach):
pass
"""
)
)
root.join("conftest.py").write(
textwrap.dedent(
"""\
import pytest
@pytest.fixture
def out_of_reach(): pass
"""
)
)
args = [str(src)]
if confcutdir:
args = ["--confcutdir=%s" % root.join(confcutdir)]
result = testdir.runpytest(*args)
match = ""
if passed:
match += "*%d passed*" % passed
if error:
match += "*%d error*" % error
result.stdout.fnmatch_lines(match)
def test_issue1073_conftest_special_objects(testdir):
testdir.makeconftest(
"""\
class DontTouchMe(object):
def __getattr__(self, x):
raise Exception('cant touch me')
x = DontTouchMe()
"""
)
testdir.makepyfile(
"""\
def test_some():
pass
"""
)
res = testdir.runpytest()
assert res.ret == 0
def test_conftest_exception_handling(testdir):
testdir.makeconftest(
"""\
raise ValueError()
"""
)
testdir.makepyfile(
"""\
def test_some():
pass
"""
)
res = testdir.runpytest()
assert res.ret == 4
assert "raise ValueError()" in [line.strip() for line in res.errlines]
def test_hook_proxy(testdir):
"""Session's gethookproxy() would cache conftests incorrectly (#2016).
It was decided to remove the cache altogether.
"""
testdir.makepyfile(
**{
"root/demo-0/test_foo1.py": "def test1(): pass",
"root/demo-a/test_foo2.py": "def test1(): pass",
"root/demo-a/conftest.py": """\
def pytest_ignore_collect(path, config):
return True
""",
"root/demo-b/test_foo3.py": "def test1(): pass",
"root/demo-c/test_foo4.py": "def test1(): pass",
}
)
result = testdir.runpytest()
result.stdout.fnmatch_lines(
["*test_foo1.py*", "*test_foo3.py*", "*test_foo4.py*", "*3 passed*"]
)
def test_required_option_help(testdir):
testdir.makeconftest("assert 0")
x = testdir.mkdir("x")
x.join("conftest.py").write(
textwrap.dedent(
"""\
def pytest_addoption(parser):
parser.addoption("--xyz", action="store_true", required=True)
"""
)
)
result = testdir.runpytest("-h", x)
assert "argument --xyz is required" not in result.stdout.str()
assert "general:" in result.stdout.str()
|
cloudera/hue
|
desktop/core/ext-py/pytest-4.6.11/testing/test_conftest.py
|
Python
|
apache-2.0
| 19,666
|
[
"VisIt"
] |
8f964641b09ac376cb669e34c22dbebec369f601aca2edf5f8055604d239b99d
|
import os, sys, re
import optparse
import shutil
import pandas
import numpy
import subprocess
#import time
#####################################
#This is the wrapper for luciphor2.
#It will be responsible for taking command line arguments and building a
#configuration file for running of luciphor2 to help localize PTMs on
#identified modified peptides, and ultimately producing an estimate of
#false localization of the modifciations.
#
#VERSION 1.10A
version="1.20A"
#DATE: 12/22/2015
date="12/22/2015"
#####################################
print "-----------------------------------------------------------------------"
print "Welcome to the lucphor2 wrapper for Galaxy, Wohlschlegel Lab UCLA"
print "Written by William Barshop"
print "Version: ",version
print "Date: ",date
####################################
#Argument parsing! So much fun!
#We'll use OptParse even though some
#people really rave about argparse...
#
#
# NB: With Optparse, if an option is
# not specified, it will take a
# value of None
####################################
#print sys.argv,"THESE ARE THE ARGS"
parser = optparse.OptionParser()
parser.add_option("--pout",action="store",type="string",dest="operation_folder")
parser.add_option("--hcd",action="store_true",dest="hcd")
parser.add_option("--cid",action="store_true",dest="cid")
parser.add_option("--ppm",action="store_true",dest="ppm")
parser.add_option("--ms2tol",action="store", type="float", dest="ms2tol")
parser.add_option("--minmz",action="store", type="float", dest="min_mz")
parser.add_option("--fixed",action="store", type="string", dest="fixed_mods")
parser.add_option("--target",action="store",type="string",dest="target_mods")
parser.add_option("--variable",action="store",type="string",dest="variable_mods")
parser.add_option("--nl",action="store",type="string",dest="neutral_loss")
parser.add_option("--dm",action="store",type="float",dest="decoy_mass")
parser.add_option("--dnl",action="store",type="string",dest="decoy_neutral_loss")
parser.add_option("--mc",action="store",type="int",dest="max_charge")
parser.add_option("--min_psms",action="store",type="int",dest="min_psms")
parser.add_option("--ml",action="store",type="int",dest="max_length")
parser.add_option("--mp",action="store",type="int",dest="max_num_permutation")
parser.add_option("--mst",action="store",type="float",dest="model_score_threshold")
parser.add_option("--st",action="store",type="float",dest="score_threshold")
parser.add_option("--nt",action="store",type="int",dest="num_threads")
parser.add_option("--mzml",action="store",type="string",dest="mzml_files")
parser.add_option("--expgroups",action="store",type="string",dest="exp_group_file")
(options,args) = parser.parse_args()
aa_masses = {'A' : 71.037114, 'R' : 156.101111, 'N' : 114.042927, 'D' : 115.026943, 'C' : 103.009185, 'E' : 129.042593, 'Q' : 128.058578, 'G' : 57.021464, 'H' : 137.058912, 'I' : 113.084064, 'L' : 113.084064, 'K' : 128.094963, 'M' : 131.040485, 'F' : 147.068414, 'P' : 97.052764, 'S' : 87.032028, 'T' : 101.047679, 'U' : 150.95363, 'W' : 186.079313, 'Y' : 163.06332, 'V' : 99.068414 }
######### I scraped these AA masses from Matrix, who proudly bring us mascot....
#########HERE I NEED TO
#########CRAWL THROUGH THE POUT FOLDER AND
#########GET A LIST OF ALL THE SPECTRAL FILES AND SUUUCH
#OKKKKKAYYYYY Let's get a list of all the MZML files we got!
mzml_dict={}
for eachfile in options.mzml_files.split(","):
if eachfile in mzml_dict:
print "Somehow you gave me the same file twice.... Just a warning, but you might want to take a look at what's going on!"
else:
mzml_dict[eachfile]={}
#### Okay, now we'll need to read in the percolator experimental groups so that we
#### can build the input files for Luciphor2 appropriately! (that means get the groups right...)
if options.exp_group_file is None:
print "I need to know how to group the experiments to properly build my Luciphor input files... Gimmie the file!"
sys.exit(2)
# I'm lazy, so let's just read the file into a dataframe.
#print options.exp_group_file,type(options.exp_group_file)
#with open(options.exp_group_file,'r') as expfile:
group_information = pandas.read_csv(options.exp_group_file,sep='\t')
#print group_information,"this was group info...."
#print type(group_information)
run_dict={}
#And then for easy access, we'll make a dict of it.
for index,row in group_information.iterrows():
run_dict[row['Crux File Integer']]=row
#for each in run_dict.keys():
# print type(each)
infiles = []
for root, subFolders, files in os.walk(options.operation_folder):
for eachfile in files:
if 'target.psms.txt' in eachfile:
infiles.append(str(os.path.join(root,eachfile)))
dataframe_vector=[]
for eachfile in infiles:
newdf=pandas.DataFrame.from_csv(eachfile,sep='\t',index_col=False)
dataframe_vector.append(newdf)
del newdf
combined_results=pandas.concat(dataframe_vector)
#del dataframe_vector
#######Okay, so now we have all our percolator PSMs loaded into a
#######single pandas dataframe. We'll take that concatetnated dataframe
#######and then iterate over all the results and place in the file names
#######while constructing the input files.
theruns=[]
#for each_run in group_information['Original File Name']:
# thisrun=each_run+".mzML"
# theruns.append(thisrun)
combined_results['file_idx']=combined_results['file_idx'].astype(str)
combined_results['file']=combined_results['file'].astype(str)
#for each_run in group_information['Original File Name']:
# thisrun=str(each_run)+".mzML"
new_results=[]
for each_idx in group_information['Crux File Integer']:
#print type(each_idx),type(combined_results['file_idx'])
mask = combined_results[(combined_results.file_idx == str(each_idx))]
mask['file']=run_dict[int(each_idx)]['Original File Name']+".mzML"
new_results.append(mask)
#print run_dict[int(each_idx)]['Original File Name']+".mzML"
combined_results=pandas.concat(new_results)
output_results=pandas.concat(new_results)
####### Okay, now we've taken care of file names, let's just go ahead and build us some luciphor inputs!
luci_input_writers={}
luci_input_files=[] #This is just a list of the files we'll have to run luciphor on!
for eachgroup in set(group_information['Fractionation Group ID String']):
mywriter=open(options.operation_folder+eachgroup+".pin_out/crux-output/"+"luciphor_spectra_"+eachgroup+".tsv",'w')
luci_input_files.append(options.operation_folder+eachgroup+".pin_out/crux-output/"+"luciphor_spectra_"+eachgroup+".tsv")
mywriter.write("srcFile\tscanNum\tcharge\tPSMscore\tpeptide\tmodSites\n")
luci_input_writers[eachgroup]=mywriter
run_to_group={}
for index,row in group_information.iterrows(): #['Original File Name']:
thisrun = row['Original File Name']+".mzML"
thisgroup = row['Fractionation Group ID String']
run_to_group[thisrun]=thisgroup
target_masses = []
non_decimal = re.compile(r'[^\d.]+')
for eachmod in options.target_mods.split(","):
if eachmod is not "":
target_masses.append(non_decimal.sub('',eachmod))
target_masses=list(set(target_masses))
global target_mass_conversion #CRAPPY WORKAROUND....
target_mass_conversion={}
for each in target_masses:
target_mass_conversion[float(each)]=str(each)
#We're also going to actually put the rest of our mod masses in there, too...
for each in options.fixed_mods.split(","):
#print each
if each is not "":
target_mass_conversion[float(non_decimal.sub('',each))]=str(non_decimal.sub('',each))
for each in options.variable_mods.split(","):
#print each
if each is not "":
target_mass_conversion[float(non_decimal.sub('',each))]=str(non_decimal.sub('',each))
rounded_target_masses=[str(float(each)) for each in target_masses]
#print "These are the rounded targets...",rounded_target_masses
combined_results['percolator q-value']=1.0-combined_results['percolator q-value'] #Luciphor expects PSM scores to increase... So we'll go ahead and give the score as 1-q.
print target_mass_conversion,"this is target mass conv."
regex = re.compile('[^a-zA-Z]')
for each_idx in group_information['Crux File Integer']:
mask = combined_results[(combined_results['file_idx'] == str(each_idx))]
#print '|'.join(target_masses),"This is target masses...."
mask2=mask[(mask['sequence'].str.contains('|'.join(rounded_target_masses)))] #numpy.any(mods in mask.sequence for mods in target_masses)]
#mask=mask.mask(applyme)
#print mask,"THIS IS MASK"
#mask2 = mask[mask.sequence.contains('|'.join(target_masses))]
#print mask2,"this is mask2<------------------"
### OKAY, NOW YOU'LL NEED TO WRITE THE OUTPUT FOR THIS FILE
for index,eachrow in mask2.iterrows():
thispep=eachrow['sequence']
fullseqlen=len(thispep)
unmodpeplen=len(regex.sub('',thispep))
newpep=[]
moddict={}
adjustment=0
actualindex=0
nummodsadded=0
#print "---------------------------"
#print "Working on :",thispep
i=0
while i < len(thispep):
#print "now...",thispep[i]
#print "i is...",i
#print "actual index is...",actualindex
if thispep[i].isalpha():
newpep.append(thispep[i])
actualindex+=1
else:
buf=""
while (i < len(thispep) and (not thispep[i].isalpha())):
buf+=thispep[i]
i+=1
#print target_mass_conversion
for eachmod in target_mass_conversion:
if str(eachmod) in buf:
buf=buf.replace(str(eachmod),target_mass_conversion[eachmod])
break
#print "Adding ",buf,"at",actualindex-1+nummodsadded
#print str(float(buf.replace('[','').replace(']',''))),"and",+aa_masses[regex.sub('',thispep)[actualindex-1+nummodsadded]]
moddict[actualindex-1+nummodsadded]=str(float(buf.replace('[','').replace(']',''))+aa_masses[regex.sub('',thispep)[actualindex-1+nummodsadded]]) #,regex.sub('',thispep)[actualindex-1+nummodsadded]) #### I REPLACED realstart with ACTUALINDEX here
nummodsadded+=1
i+=1
#print "---------------------------"
modstr=""
for eachmodposition in sorted(moddict.keys()):
modseq=moddict[eachmodposition]
modstr+=str(eachmodposition)+"="+modseq+","
modstr=modstr[:-1]
luci_input_writers[run_to_group[eachrow['file']]].write(str(eachrow['file'])+'\t'+str(eachrow['scan'])+'\t'+str(eachrow['charge'])+'\t'+str(eachrow['percolator q-value'])+'\t'+regex.sub('',eachrow['sequence'])+'\t'+modstr+"\n")
#mask = combined_results
#for eachgroup in set(group_information['Fractionation Group ID String']):
# myreader=open(options.operation_folder+"luciphor_spectra_"+eachgroup+".tsv",'rb')
# i=0
# for eachline in myreader:
# i+=1
# #print eachline
# if i>50:
# break
##### THIS WILL WRITE THE COMMON SECTION OF THE CONFIGURATION FILE
with open("shared_config.txt",'w') as configfile:
configfile.write("SPECTRUM_SUFFIX = mzML\n")
configfile.write("INPUT_TYPE = 1\n")
#### HCD OR CID ####
if options.hcd:
configfile.write("ALGORITHM = 1\n")
elif options.cid:
configfile.write("ALGORITHM = 0\n")
else:
print "You gotta tell me if it's HCD or CID data..."
sys.exit(2)
####################
configfile.write("TSV_HEADER = 1\n")
#### MODEL PSMS REQUIREMENTS ####
if options.min_psms:
configfile.write("MIN_NUM_PSMS_MODEL = "+str(options.min_psms)+"\n")
#################################
#### MASS ACCURACY STUFFS ####
configfile.write("MS2_TOL = "+str(options.ms2tol)+"\n")
if options.ppm:
configfile.write("MS2_TOL_UNITS = 1\n")
else:
configfile.write("MS2_TOL_UNITS = 0\n")
##############################
#### MIN MZ ####
configfile.write("MIN_MZ = "+str(options.min_mz)+"\n")
#################################
#configfile.write("WRITE_MATCHED_PEAKS_FILE = 1\n")
#### FIXED MODS ####
for eachmod in options.fixed_mods.split(","):
if eachmod is not "":
configfile.write("FIXED MOD = "+eachmod.strip()+"\n")
####################
#### VARIABLE MODS, UNSCORED ####
for eachmod in options.variable_mods.split(","):
if eachmod is not "":
configfile.write("VAR_MOD = "+eachmod.strip()+"\n")
#################################
#### TARGET MODS, SCORED ####
notargets=True
for eachmod in options.target_mods.split(","):
if eachmod is not "":
notargets=False
configfile.write("TARGET_MOD = "+eachmod.strip()+"\n")
if notargets:
print "What? No target modifications!? Give me something to score!"
sys.exit(2)
#############################
#### NEUTRAL LOSSES, SCORED ####
for eachnl in options.neutral_loss.split(","):
if eachnl is not "":
configfile.write("NL = "+eachnl.strip()+"\n")
################################
#### DECOY MASS SETTING ####
configfile.write("DECOY_MASS = "+str(options.decoy_mass)+"\n")
if options.decoy_mass is None:
print "No decoy mass? How am I supposed to calculate an FLR without a decoy mass!?"
sys.exit(2)
############################
#### DECOY NEUTRAL LOSSES ####
for eachnl in options.decoy_neutral_loss.split(","):
if eachnl is not "":
configfile.write("DECOY_NL = "+eachnl+"\n")
##############################
configfile.write("MAX_CHARGE_STATE = "+str(options.max_charge)+"\n")
configfile.write("MAX_PEP_LEN = "+str(options.max_length)+"\n")
configfile.write("MAX_NUM_PERM ="+str(options.max_num_permutation)+"\n")
configfile.write("SELECTION_METHOD = 0\n") #THIS SCRIPT IS HARD CODED FOR USE WITH PERCOLATOR SCORES...
configfile.write("MODELING_SCORE_THRESHOLD = "+str(1.0-float(options.model_score_threshold))+"\n")
configfile.write("SCORING_THRESHOLD = "+str(1.0-float(options.score_threshold))+"\n")
configfile.write("MIN_NUM_PSMS_MODEL = 50\n")
configfile.write("MOD_PEP_REP = 0\n")
configfile.write("NUM_THREADS = "+str(options.num_threads)+"\n")
configfile.write("RUN_MODE = 0\n")
##### ABOVE THIS, FOR EACH RUN, WE WILL HAVE TO APPEND THE
##### SPECTRUM_PATH AND INPUT_DATA SECTIONS AND OUTPUT_FILE
##### now: maybe just input_data and output_file?
## the plan:
#foreachgroup in thegroups
## 1chdir to the folder
## 2write full config
## 3run
## 4fix psms output file
basedir=os.getcwd()
#We'll copy the mzML files to the proper folders...
#files = [f for f in os.listdir('.')]
#for f in files:
# print f
for eachrun in run_to_group:
#thislink=os.readlink(eachrun)
#shutil.copy(thislink,options.operation_folder+run_to_group[eachrun]+".pin_out/crux-output/"+eachrun)
#thislink=os.readlink(eachrun)
shutil.copy(eachrun,options.operation_folder+run_to_group[eachrun]+".pin_out/crux-output/"+eachrun)
#And we'll go ahead and start the ball rolling!
os.chdir(basedir)
for eachgroup in set(group_information['Fractionation Group ID String']):
os.chdir(basedir)
shutil.copy("shared_config.txt",options.operation_folder+eachgroup+".pin_out/crux-output/luciphor_cfg_"+eachgroup+".txt")
for eachwriter in luci_input_writers:
luci_input_writers[eachwriter].close()
for eachgroup in set(group_information['Fractionation Group ID String']):
os.chdir(basedir)
shutil.copy("shared_config.txt",options.operation_folder+eachgroup+".pin_out/crux-output/luciphor_cfg_"+eachgroup+".txt")
os.chdir(options.operation_folder+eachgroup+".pin_out/crux-output/")
with open("luciphor_cfg_"+eachgroup+".txt",'a') as file_writer:
filestr="luciphor_spectra_"+eachgroup+".tsv"
outstr="luciphor_output_"+eachgroup+".tsv"
file_writer.write("SPECTRUM_PATH="+str(os.getcwd())+"\n")
file_writer.write("INPUT_DATA="+filestr+"\n")
file_writer.write("OUTPUT_FILE="+outstr+"\n")
processes=[]
#for eachgroup in set(group_information['Fractionation Group ID String']):
# os.chdir(basedir)
# #shutil.copy("shared_config.txt",options.operation_folder+eachgroup+".pin_out/crux-output/luciphor_cfg_"+eachgroup+".txt")
# os.chdir(options.operation_folder+eachgroup+".pin_out/crux-output/")
# #with open("luciphor_cfg_"+eachgroup+".txt",'a') as file_writer:
# # filestr="luciphor_spectra_"+eachgroup+".tsv"
# # outstr="luciphor_output_"+eachgroup+".tsv"
# # file_writer.write("SPECTRUM_PATH="+str(os.getcwd())+"\n")
# # file_writer.write("INPUT_DATA="+filestr+"\n")
# # file_writer.write("OUTPUT_FILE="+outstr+"\n")
# command = "java -jar /galaxy-central/tools/wohl-proteomics/luciphor2/lucXor.jar "+"luciphor_cfg_"+eachgroup+".txt"
# print "RUNNING: ",command,"in folder",str(os.getcwd())
# #os.system(command)
# p=(subprocess.Popen(command,shell=True)
# p.wait()
# if p is not 0:
# p=subprocess.Popen(command,shell=True)
# p.wait()
# if p is not 0:
# p=subprocess.Popen(command,shell=True)
# p.wait()
with open(os.devnull, "w") as fnull:
for eachgroup in set(group_information['Fractionation Group ID String']):
os.chdir(basedir)
os.chdir(options.operation_folder+eachgroup+".pin_out/crux-output/")
#processes.append(subprocess.Popen(command,shell=True)
command = "java -jar /galaxy-central/tools/wohl-proteomics/luciphor2/lucXor.jar "+"luciphor_cfg_"+eachgroup+".txt"
processes.append(subprocess.Popen(command.split()))
#processes.append(subprocess.Popen(command.split(),stdout=fnull, stderr=fnull)) # These pipes will hide the output from luciphor... this is only for DEBUG DEBUG DEBUG purposes...
#out, err = p.communicate()
#p=os.system(command)
#print "Luciphor exited with a status of ",str(os.system(command))
#time.sleep(20)
for each_proc in processes:
each_proc.wait()
###### INPUT-FILE CLEANUP ######
os.chdir(basedir)
for eachrun in run_to_group:
## #thislink = os.readlink(eachrun)
## os.unlink(options.operation_folder+run_to_group[eachrun]+".pin_out/crux-output/"+eachrun)
os.remove(options.operation_folder+run_to_group[eachrun]+".pin_out/crux-output/"+eachrun)
##### READ IN OUTPUT FILES #####
output_results['file_idx']=output_results['file_idx'].astype(str)
output_results['file']=output_results['file'].astype(str)
def extractScanNum(x):
return str(x['specId'].split(".")[1])
def extractFileName(x):
return str(x['specId'].split(".")[0])
group_to_luci_results={}
for eachgroup in set(group_information['Fractionation Group ID String']):
os.chdir(basedir)
os.chdir(options.operation_folder+eachgroup+".pin_out/crux-output/")
luci_result=pandas.read_csv("luciphor_output_"+str(eachgroup)+".tsv",sep='\t')
#scan_extracted=luci_result['specId'].split(".")[1]
scan_extracted=luci_result.apply(extractScanNum,axis=1)
fname_extracted=luci_result.apply(extractFileName,axis=1)
luci_result['scan']=scan_extracted
luci_result['file']=fname_extracted+".mzML"
#luci_result.assign(scan = lambda x: x['specId'].split(".")[1])
#for each,row in luci_result.iterrows():
# print each,row
#sys.exit(2)###################################################
group_to_luci_results[eachgroup]=luci_result
group_to_runids={}
for index,row in group_information.iterrows():
if row['Fractionation Group ID String'] in group_to_runids:
group_to_runids[row['Fractionation Group ID String']].append(str(row['Crux File Integer']))
else:
#print row,"THIS IS ROW!2"
group_to_runids[row['Fractionation Group ID String']]=[str(row['Crux File Integer'])]
##### We'll read in the raw inputs, and then append extra luciphor information to them! #####
groups_output={}
for each_group in group_to_runids:
groups_output[each_group]=output_results[(output_results['file_idx'].str.contains('|'.join(group_to_runids[each_group])))]
def replaceMods(input):
modDict=target_mass_conversion
#print "input",input
output=input
#print modDict,"This is modDict!"
for eachmod in modDict:
if str(eachmod) in output and str(modDict[eachmod]) not in output:
if float(eachmod)>0.0:
output=output.replace(str(eachmod),"+"+str(modDict[eachmod]))
else:
output=output.replace(str(eachmod),str(modDict[eachmod]))
#print "output",output
return output
##### We'll use this dict to convert fileidx to file names
fileidx_to_file={}
for index,row in group_information.iterrows():
thisrun = row['Original File Name']
thisidx = str(row['Crux File Integer'])
fileidx_to_file[thisidx]=thisrun
modsite_to_modmass={}
modsite_to_modmass_fixed={}
modsite_to_modmass_variable={}
for eachmod in options.target_mods.split(","):
modsite_to_modmass[eachmod[:1]]=non_decimal.sub('',eachmod)
for eachmod in options.fixed_mods.split(","):
modsite_to_modmass_fixed[eachmod[:1]]=non_decimal.sub('',eachmod)
for eachmod in options.variable_mods.split(","): ###### NB: At the moment, I'm not sure how LuciPhor handles these...
modsite_to_modmass_variable[eachmod[:1]]=non_decimal.sub('',eachmod)###### So I'll make this for later, but I don't know what to do with it... yet...
#print "This is the modsite dict!",modsite_to_modmass
##### We'll have to cram the luciphor results into the appropriate files... and modify the outputs!
modified_output={}
#### DEBUG LOGGING:
#logwriter=open(options.operation_folder+"luci_log.txt",'wb')
##########################################NEWNEWNEWNEWNEWNEWNEWNEW
def combineFileNameAndScanLuci(x):
return str(x['specId'].split(".")[0]+".mzML."+x['specId'].split(".")[1])
def combineFileNameAndScanPerco(x):
return str(x['file']+"."+str(x['scan']))
newlist=[]
for eachgroup in groups_output:
newlist.append(groups_output[eachgroup])
combined_groups=pandas.concat(newlist)#This is the combined percolator results...
combined_fname_scans=combined_groups.apply(combineFileNameAndScanPerco,axis=1)
combined_groups['unique_name']=combined_fname_scans
del newlist
newlist=[]
for eachgroup in group_to_luci_results:
newlist.append(group_to_luci_results[eachgroup])
combined_luci_groups=pandas.concat(newlist)#This is the combined luciphor results...
combined_fname_scans_luci=combined_luci_groups.apply(combineFileNameAndScanLuci,axis=1)
combined_luci_groups['unique_name']=combined_fname_scans_luci
del newlist
combined_luci_groups=combined_luci_groups.drop('scan',1)
combined_luci_groups=combined_luci_groups.drop('file',1)
merged_df=pandas.merge(combined_groups,combined_luci_groups, on="unique_name",sort=True,how="outer",indicator=False)
merged_df['numPPS'].fillna("0")
merged_df['numRPS'].fillna("0")
merged_df=merged_df.drop_duplicates(subset='unique_name')
merged_df['luci_sequence']=""
#merge_numPPS_mask=merged_df[merged_df['numPPS']>=1]
for index,eachrow in merged_df.iterrows():
if eachrow['numRPS'] < 1:
zz=0
newPep=[]
while zz<len(eachrow['sequence']):
if not eachrow['sequence'][zz]=='[':
newPep.append(eachrow['sequence'][zz])
else:
buffer=newPep.pop()
while zz<len(eachrow['sequence']) and eachrow['sequence'][zz]!=']':
buffer+=eachrow['sequence'][zz]
zz+=1
buffer+=eachrow['sequence'][zz]
buffer=buffer[:2]+"+"+target_mass_conversion[buffer[2:-1]]+"]" #We'll cram in the full mass accuracy mod as given in the settings...
newPep.append(buffer)
zz+=1
merged_df.loc[index,'luci_sequence']=''.join(newPep)
elif eachrow['numRPS'] >= 1:
zz=0
newPep=[]
while zz<len(eachrow['predictedPep1']):
if not eachrow['predictedPep1'][zz].islower():
site=eachrow['predictedPep1'][zz]
if eachrow['predictedPep1'][zz] in modsite_to_modmass_fixed:
if float(modsite_to_modmass_fixed[site])>0.0:
newPep.append(site+"[+"+modsite_to_modmass_fixed[site]+"]")
else:
newPep.append(site+"["+modsite_to_modmass_fixed[site]+"]")
else:
newPep.append(eachrow['predictedPep1'][zz])
else:
modsite=eachrow['predictedPep1'][zz].upper()
if modsite in modsite_to_modmass:
if float(modsite_to_modmass[modsite])>0.0:
newPep.append(modsite+"[+"+modsite_to_modmass[modsite]+"]")
else:
newPep.append(modsite+"["+modsite_to_modmass[modsite]+"]")
if modsite in modsite_to_modmass_fixed:
if float(modsite_to_modmass_fixed[modsite])>0.0:
newPep.append("[+"+modsite_to_modmass_fixed[modsite]+"]")
else:
newPep.append("["+modsite_to_modmass_fixed[modsite]+"]")
elif site in modsite_to_modmass_variable:
#if site in modsite_to_modmass_variable: ##### IS THIS HOW LUCIPHOR HANDLES VARIABLE MODS?
if float(modsite_to_modmass_variable[site])>0.0:
newPep.append("[+"+modsite_to_modmass_variable[site]+"]")
else:
newPep.append("["+modsite_to_modmass_variable[site]+"]")
zz+=1
merged_df.loc[index,'luci_sequence'] = ''.join(newPep)
merged_df.rename(columns={"specId":"luci_specId","peptide":"luci_peptide","pepProphet":"q-1","predictedPep1":"luci_predictedPep1","predictedPep2":"luci_predictedPep2","numPPS":"luci_numPPS","numRPS":"luci_numRPS","deltaScore":"luci_deltaScore","pep1score":"luci_pep1score","pep2score":"luci_pep2score","globalFLR":"luci_globalFLR","localFLR":"luci_localFLR"}, inplace=True)
merged_df=merged_df.drop('unique_name',1)
for each_idx in set(merged_df['file_idx']):
modified_output[each_idx]=merged_df[merged_df['file_idx']==str(each_idx)]
##########################################NEWNEWNEWNEWNEWNEWNEWNEW
##########################################NEWNEWNEWNEWNEWNEWNEWNEW
'''
for each_group_df in groups_output:
modify_me=groups_output[each_group_df].copy(deep=True)
fixed_sequences=modify_me['sequence'].apply(replaceMods)
modify_me['sequence']=fixed_sequences
for each_idx in set(modify_me['file_idx']):
one_run_modify=modify_me[modify_me['file_idx']==str(each_idx)].copy(deep=True)#deep=True) # 10/14/2015 added copy ....
one_run_modify['scan']=one_run_modify['scan'].astype(int)
temp=group_to_luci_results[each_group_df].copy(deep=True)
temp['scan']=temp['scan'].astype(int)
print set(temp['file']),"runs in group",each_group_df
one_run_luciphor=temp[temp['file'].str.contains(fileidx_to_file[str(each_idx)])]
print one_run_luciphor,"this is after filtering for<------------"+fileidx_to_file[str(each_idx)]
merged_df=pandas.merge(one_run_modify,one_run_luciphor, on="scan",sort=True,how="outer",indicator=False)
merged_df['numPPS'].fillna("0")
merged_df['numRPS'].fillna("0")
os.chdir(basedir)
#We'll iterate first over every scan in the merged output.
for index,eachrow in merged_df.iterrows():
if eachrow['numPPS'] >= 1:
zz=0
newPep=[]
while zz<len(eachrow['predictedPep1']):
if not eachrow['predictedPep1'][zz].islower():
site=eachrow['predictedPep1'][zz]
if eachrow['predictedPep1'][zz] in modsite_to_modmass_fixed:
if float(modsite_to_modmass_fixed[site])>0.0:
newPep.append(site+"[+"+modsite_to_modmass_fixed[site]+"]")
else:
newPep.append(site+"["+modsite_to_modmass_fixed[site]+"]")
else:
newPep.append(eachrow['predictedPep1'][zz])
else:
modsite=eachrow['predictedPep1'][zz].upper()
if modsite in modsite_to_modmass:
if float(modsite_to_modmass[modsite])>0.0:
newPep.append(modsite+"[+"+modsite_to_modmass[modsite]+"]")
else:
newPep.append(modsite+"["+modsite_to_modmass[modsite]+"]")
if modsite in modsite_to_modmass_fixed:
if float(modsite_to_modmass_fixed[modsite])>0.0:
newPep.append("[+"+modsite_to_modmass_fixed[modsite]+"]")
else:
newPep.append("["+modsite_to_modmass_fixed[modsite]+"]")
elif site in modsite_to_modmass_variable:
#if site in modsite_to_modmass_variable: ##### IS THIS HOW LUCIPHOR HANDLES VARIABLE MODS?
if float(modsite_to_modmass_variable[site])>0.0:
newPep.append("[+"+modsite_to_modmass_variable[site]+"]")
else:
newPep.append("["+modsite_to_modmass_variable[site]+"]")
zz+=1
merged_df.loc[index,'sequence'] = ''.join(newPep)
merged_df.rename(columns={"specId":"luci_specId","peptide":"luci_peptide","pepProphet":"q-1","predictedPep1":"luci_predictedPep1","predictedPep2":"luci_predictedPep2","numPPS":"luci_numPPS","numRPS":"luci_numRPS","deltaScore":"luci_deltaScore","pep1score":"luci_pep1score","pep2score":"luci_pep2score","globalFLR":"luci_globalFLR","localFLR":"luci_localFLR"}, inplace=True)
modified_output[each_idx]=merged_df
'''
###########################################################################
idx_to_group={}
for index,row in group_information.iterrows(): #['Original File Name']:
thisidx = str(row['Crux File Integer'])
thisgroup = row['Fractionation Group ID String']
idx_to_group[thisidx]=thisgroup
assembled_output={}
for eachkey in modified_output:
group=idx_to_group[eachkey]
if group in assembled_output:
assembled_output[group].append(modified_output[eachkey])
else:
assembled_output[group]=[modified_output[eachkey]]
#print assembled_output.keys(),"these are the keys"
###### Now we'll go ahead and write out each group to a file in the approp. location!
for eachgroup in assembled_output:
os.chdir(basedir)
os.chdir(options.operation_folder+eachgroup+".pin_out/crux-output/")
groupdf=pandas.concat(assembled_output[eachgroup])
shutil.move(eachgroup+".percolator.target.psms.txt",eachgroup+".percolator.target.psms_uncorrected.txt")
groupdf.to_csv(eachgroup+".percolator.target.psms.txt",sep="\t",index=False)
#for eachgroup in set(group_information['Fractionation Group ID String']):
# os.chdir(basedir)
# shutil.copy("shared_config.txt",options.operation_folder+eachgroup+".pin_out/crux-output/luciphor_cfg_"+eachgroup+".txt")
# os.chdir(options.operation_folder+eachgroup+".pin_out/crux-output/")
# with open("luciphor_cfg_"+eachgroup+".txt",'a') as file_writer:
#####for eachgroup in set(group_information['Fractionation Group ID String']):
##### pass
#for eachgroup in set(group_information['Fractionation Group ID String']):
# mywriter=open(options.operation_folder+eachgroup+".pin_out/crux-output/"+"luciphor_spectra_"+eachgroup+".tsv",'w')
# luci_input_files.append(options.operation_folder+eachgroup+".pin_out/crux-output/"+"luciphor_spectra_"+eachgroup+".tsv")
#shutil.copy("shared_config.txt","/home/galaxy/shared_config.txt")
#parser.print_usage()
#print options.max_length
#for each in options:
# print each
#logwriter.close()
print "-----------------------------------------------------------------------"
|
wohllab/milkyway_proteomics
|
galaxy_milkyway_files/tools/wohl-proteomics/luciphor2/luciphor_wrapper.py
|
Python
|
mit
| 32,505
|
[
"Galaxy"
] |
c11766ca47439895d1fd65c55e9b0b183b60347cf5d54a716ee60a433dc8d229
|
import ast
import datetime
import re
import time
from collections import defaultdict
from datetime import timedelta
from typing import (
AbstractSet,
Any,
Callable,
DefaultDict,
Dict,
Iterable,
List,
Optional,
Sequence,
Set,
Tuple,
TypeVar,
Union,
)
import django.contrib.auth
from bitfield import BitField
from bitfield.types import BitHandler
from django.conf import settings
from django.contrib.auth.models import AbstractBaseUser, PermissionsMixin, UserManager
from django.contrib.postgres.fields import JSONField
from django.core.exceptions import ValidationError
from django.core.validators import MinLengthValidator, RegexValidator, URLValidator, validate_email
from django.db import models, transaction
from django.db.models import CASCADE, Manager, Q, Sum
from django.db.models.query import QuerySet
from django.db.models.signals import post_delete, post_save
from django.utils.timezone import now as timezone_now
from django.utils.translation import ugettext_lazy as _
from confirmation import settings as confirmation_settings
from zerver.lib import cache
from zerver.lib.cache import (
active_non_guest_user_ids_cache_key,
active_user_ids_cache_key,
bot_dict_fields,
bot_dicts_in_realm_cache_key,
bot_profile_cache_key,
bulk_cached_fetch,
cache_delete,
cache_set,
cache_with_key,
flush_message,
flush_realm,
flush_stream,
flush_submessage,
flush_used_upload_space_cache,
flush_user_profile,
get_realm_used_upload_space_cache_key,
get_stream_cache_key,
realm_alert_words_automaton_cache_key,
realm_alert_words_cache_key,
realm_user_dict_fields,
realm_user_dicts_cache_key,
user_profile_by_api_key_cache_key,
user_profile_by_email_cache_key,
user_profile_by_id_cache_key,
user_profile_cache_key,
)
from zerver.lib.exceptions import JsonableError
from zerver.lib.pysa import mark_sanitized
from zerver.lib.timestamp import datetime_to_timestamp
from zerver.lib.types import (
DisplayRecipientT,
ExtendedFieldElement,
ExtendedValidator,
FieldElement,
ProfileData,
ProfileDataElementBase,
RealmUserValidator,
UserFieldElement,
Validator,
)
from zerver.lib.utils import generate_random_token, make_safe_digest
from zerver.lib.validator import (
check_date,
check_int,
check_list,
check_long_string,
check_short_string,
check_url,
validate_choice_field,
)
MAX_TOPIC_NAME_LENGTH = 60
MAX_MESSAGE_LENGTH = 10000
MAX_LANGUAGE_ID_LENGTH: int = 50
STREAM_NAMES = TypeVar('STREAM_NAMES', Sequence[str], AbstractSet[str])
def query_for_ids(query: QuerySet, user_ids: List[int], field: str) -> QuerySet:
'''
This function optimizes searches of the form
`user_profile_id in (1, 2, 3, 4)` by quickly
building the where clauses. Profiling shows significant
speedups over the normal Django-based approach.
Use this very carefully! Also, the caller should
guard against empty lists of user_ids.
'''
assert(user_ids)
clause = f'{field} IN %s'
query = query.extra(
where=[clause], params=(tuple(user_ids),),
)
return query
# Doing 1000 remote cache requests to get_display_recipient is quite slow,
# so add a local cache as well as the remote cache cache.
#
# This local cache has a lifetime of just a single request; it is
# cleared inside `flush_per_request_caches` in our middleware. It
# could be replaced with smarter bulk-fetching logic that deduplicates
# queries for the same recipient; this is just a convenient way to
# write that code.
per_request_display_recipient_cache: Dict[int, DisplayRecipientT] = {}
def get_display_recipient_by_id(recipient_id: int, recipient_type: int,
recipient_type_id: Optional[int]) -> DisplayRecipientT:
"""
returns: an object describing the recipient (using a cache).
If the type is a stream, the type_id must be an int; a string is returned.
Otherwise, type_id may be None; an array of recipient dicts is returned.
"""
# Have to import here, to avoid circular dependency.
from zerver.lib.display_recipient import get_display_recipient_remote_cache
if recipient_id not in per_request_display_recipient_cache:
result = get_display_recipient_remote_cache(recipient_id, recipient_type, recipient_type_id)
per_request_display_recipient_cache[recipient_id] = result
return per_request_display_recipient_cache[recipient_id]
def get_display_recipient(recipient: 'Recipient') -> DisplayRecipientT:
return get_display_recipient_by_id(
recipient.id,
recipient.type,
recipient.type_id,
)
def get_realm_emoji_cache_key(realm: 'Realm') -> str:
return f'realm_emoji:{realm.id}'
def get_active_realm_emoji_cache_key(realm: 'Realm') -> str:
return f'active_realm_emoji:{realm.id}'
# This simple call-once caching saves ~500us in auth_enabled_helper,
# which is a significant optimization for common_context. Note that
# these values cannot change in a running production system, but do
# regularly change within unit tests; we address the latter by calling
# clear_supported_auth_backends_cache in our standard tearDown code.
supported_backends: Optional[Set[type]] = None
def supported_auth_backends() -> Set[type]:
global supported_backends
# Caching temporarily disabled for debugging
supported_backends = django.contrib.auth.get_backends()
assert supported_backends is not None
return supported_backends
def clear_supported_auth_backends_cache() -> None:
global supported_backends
supported_backends = None
class Realm(models.Model):
MAX_REALM_NAME_LENGTH = 40
MAX_REALM_SUBDOMAIN_LENGTH = 40
INVITES_STANDARD_REALM_DAILY_MAX = 3000
MESSAGE_VISIBILITY_LIMITED = 10000
AUTHENTICATION_FLAGS = ['Google', 'Email', 'GitHub', 'LDAP', 'Dev',
'RemoteUser', 'AzureAD', 'SAML', 'GitLab', 'Apple']
SUBDOMAIN_FOR_ROOT_DOMAIN = ''
id: int = models.AutoField(auto_created=True, primary_key=True, verbose_name='ID')
# User-visible display name and description used on e.g. the organization homepage
name: Optional[str] = models.CharField(max_length=MAX_REALM_NAME_LENGTH, null=True)
description: str = models.TextField(default="")
# A short, identifier-like name for the organization. Used in subdomains;
# e.g. on a server at example.com, an org with string_id `foo` is reached
# at `foo.example.com`.
string_id: str = models.CharField(max_length=MAX_REALM_SUBDOMAIN_LENGTH, unique=True)
date_created: datetime.datetime = models.DateTimeField(default=timezone_now)
deactivated: bool = models.BooleanField(default=False)
# See RealmDomain for the domains that apply for a given organization.
emails_restricted_to_domains: bool = models.BooleanField(default=False)
invite_required: bool = models.BooleanField(default=True)
invite_by_admins_only: bool = models.BooleanField(default=False)
_max_invites: Optional[int] = models.IntegerField(null=True, db_column='max_invites')
disallow_disposable_email_addresses: bool = models.BooleanField(default=True)
authentication_methods: BitHandler = BitField(
flags=AUTHENTICATION_FLAGS, default=2**31 - 1,
)
# Whether the organization has enabled inline image and URL previews.
inline_image_preview: bool = models.BooleanField(default=True)
inline_url_embed_preview: bool = models.BooleanField(default=False)
# Whether digest emails are enabled for the organization.
digest_emails_enabled: bool = models.BooleanField(default=False)
# Day of the week on which the digest is sent (default: Tuesday).
digest_weekday: int = models.SmallIntegerField(default=1)
send_welcome_emails: bool = models.BooleanField(default=True)
message_content_allowed_in_email_notifications: bool = models.BooleanField(default=True)
mandatory_topics: bool = models.BooleanField(default=False)
add_emoji_by_admins_only: bool = models.BooleanField(default=False)
name_changes_disabled: bool = models.BooleanField(default=False)
email_changes_disabled: bool = models.BooleanField(default=False)
avatar_changes_disabled: bool = models.BooleanField(default=False)
POLICY_MEMBERS_ONLY = 1
POLICY_ADMINS_ONLY = 2
POLICY_FULL_MEMBERS_ONLY = 3
COMMON_POLICY_TYPES = [
POLICY_MEMBERS_ONLY,
POLICY_ADMINS_ONLY,
POLICY_FULL_MEMBERS_ONLY,
]
# Who in the organization is allowed to create streams.
create_stream_policy: int = models.PositiveSmallIntegerField(
default=POLICY_MEMBERS_ONLY)
# Who in the organization is allowed to invite other users to streams.
invite_to_stream_policy: int = models.PositiveSmallIntegerField(
default=POLICY_MEMBERS_ONLY)
USER_GROUP_EDIT_POLICY_MEMBERS = 1
USER_GROUP_EDIT_POLICY_ADMINS = 2
user_group_edit_policy: int = models.PositiveSmallIntegerField(
default=USER_GROUP_EDIT_POLICY_MEMBERS)
USER_GROUP_EDIT_POLICY_TYPES = [
USER_GROUP_EDIT_POLICY_MEMBERS,
USER_GROUP_EDIT_POLICY_ADMINS,
]
PRIVATE_MESSAGE_POLICY_UNLIMITED = 1
PRIVATE_MESSAGE_POLICY_DISABLED = 2
private_message_policy: int = models.PositiveSmallIntegerField(
default=PRIVATE_MESSAGE_POLICY_UNLIMITED)
PRIVATE_MESSAGE_POLICY_TYPES = [
PRIVATE_MESSAGE_POLICY_UNLIMITED,
PRIVATE_MESSAGE_POLICY_DISABLED,
]
# Who in the organization has access to users' actual email
# addresses. Controls whether the UserProfile.email field is the
# same as UserProfile.delivery_email, or is instead garbage.
EMAIL_ADDRESS_VISIBILITY_EVERYONE = 1
EMAIL_ADDRESS_VISIBILITY_MEMBERS = 2
EMAIL_ADDRESS_VISIBILITY_ADMINS = 3
EMAIL_ADDRESS_VISIBILITY_NOBODY = 4
email_address_visibility: int = models.PositiveSmallIntegerField(
default=EMAIL_ADDRESS_VISIBILITY_EVERYONE,
)
EMAIL_ADDRESS_VISIBILITY_TYPES = [
EMAIL_ADDRESS_VISIBILITY_EVERYONE,
# The MEMBERS level is not yet implemented on the backend.
## EMAIL_ADDRESS_VISIBILITY_MEMBERS,
EMAIL_ADDRESS_VISIBILITY_ADMINS,
EMAIL_ADDRESS_VISIBILITY_NOBODY,
]
# Threshold in days for new users to create streams, and potentially take
# some other actions.
waiting_period_threshold: int = models.PositiveIntegerField(default=0)
allow_message_deleting: bool = models.BooleanField(default=False)
DEFAULT_MESSAGE_CONTENT_DELETE_LIMIT_SECONDS = 600 # if changed, also change in admin.js, setting_org.js
message_content_delete_limit_seconds: int = models.IntegerField(
default=DEFAULT_MESSAGE_CONTENT_DELETE_LIMIT_SECONDS,
)
allow_message_editing: bool = models.BooleanField(default=True)
DEFAULT_MESSAGE_CONTENT_EDIT_LIMIT_SECONDS = 600 # if changed, also change in admin.js, setting_org.js
message_content_edit_limit_seconds: int = models.IntegerField(
default=DEFAULT_MESSAGE_CONTENT_EDIT_LIMIT_SECONDS,
)
# Whether users have access to message edit history
allow_edit_history: bool = models.BooleanField(default=True)
DEFAULT_COMMUNITY_TOPIC_EDITING_LIMIT_SECONDS = 86400
allow_community_topic_editing: bool = models.BooleanField(default=True)
# Defaults for new users
default_twenty_four_hour_time: bool = models.BooleanField(default=False)
default_language: str = models.CharField(default='en', max_length=MAX_LANGUAGE_ID_LENGTH)
DEFAULT_NOTIFICATION_STREAM_NAME = 'general'
INITIAL_PRIVATE_STREAM_NAME = 'core team'
STREAM_EVENTS_NOTIFICATION_TOPIC = _('stream events')
notifications_stream: Optional["Stream"] = models.ForeignKey(
"Stream", related_name="+", null=True, blank=True, on_delete=CASCADE,
)
signup_notifications_stream: Optional["Stream"] = models.ForeignKey(
"Stream", related_name="+", null=True, blank=True, on_delete=CASCADE,
)
MESSAGE_RETENTION_SPECIAL_VALUES_MAP = {
'forever': -1,
}
# For old messages being automatically deleted
message_retention_days: int = models.IntegerField(null=False, default=-1)
# When non-null, all but the latest this many messages in the organization
# are inaccessible to users (but not deleted).
message_visibility_limit: Optional[int] = models.IntegerField(null=True)
# Messages older than this message ID in the organization are inaccessible.
first_visible_message_id: int = models.IntegerField(default=0)
# Valid org_types are {CORPORATE, COMMUNITY}
CORPORATE = 1
COMMUNITY = 2
org_type: int = models.PositiveSmallIntegerField(default=CORPORATE)
UPGRADE_TEXT_STANDARD = _("Available on Zulip Standard. Upgrade to access.")
# plan_type controls various features around resource/feature
# limitations for a Zulip organization on multi-tenant installations
# like Zulip Cloud.
SELF_HOSTED = 1
LIMITED = 2
STANDARD = 3
STANDARD_FREE = 4
plan_type: int = models.PositiveSmallIntegerField(default=SELF_HOSTED)
# This value is also being used in static/js/settings_bots.bot_creation_policy_values.
# On updating it here, update it there as well.
BOT_CREATION_EVERYONE = 1
BOT_CREATION_LIMIT_GENERIC_BOTS = 2
BOT_CREATION_ADMINS_ONLY = 3
bot_creation_policy: int = models.PositiveSmallIntegerField(default=BOT_CREATION_EVERYONE)
BOT_CREATION_POLICY_TYPES = [
BOT_CREATION_EVERYONE,
BOT_CREATION_LIMIT_GENERIC_BOTS,
BOT_CREATION_ADMINS_ONLY,
]
# See upload_quota_bytes; don't interpret upload_quota_gb directly.
UPLOAD_QUOTA_LIMITED = 5
UPLOAD_QUOTA_STANDARD = 50
upload_quota_gb: Optional[int] = models.IntegerField(null=True)
VIDEO_CHAT_PROVIDERS = {
'disabled': {
'name': "None",
'id': 0,
},
'jitsi_meet': {
'name': "Jitsi Meet",
'id': 1,
},
# ID 2 was used for the now-deleted Google Hangouts.
# ID 3 reserved for optional Zoom, see below.
}
if settings.VIDEO_ZOOM_CLIENT_ID is not None and settings.VIDEO_ZOOM_CLIENT_SECRET is not None:
VIDEO_CHAT_PROVIDERS['zoom'] = {
'name': "Zoom",
'id': 3,
}
if settings.BIG_BLUE_BUTTON_SECRET is not None and settings.BIG_BLUE_BUTTON_URL is not None:
VIDEO_CHAT_PROVIDERS['big_blue_button'] = {
'name': "Big Blue Button",
'id': 4
}
video_chat_provider: int = models.PositiveSmallIntegerField(default=VIDEO_CHAT_PROVIDERS['jitsi_meet']['id'])
default_code_block_language: Optional[str] = models.TextField(null=True, default=None)
# Define the types of the various automatically managed properties
property_types: Dict[str, Union[type, Tuple[type, ...]]] = dict(
add_emoji_by_admins_only=bool,
allow_edit_history=bool,
allow_message_deleting=bool,
bot_creation_policy=int,
create_stream_policy=int,
invite_to_stream_policy=int,
default_language=str,
default_twenty_four_hour_time = bool,
description=str,
digest_emails_enabled=bool,
disallow_disposable_email_addresses=bool,
email_address_visibility=int,
email_changes_disabled=bool,
invite_required=bool,
invite_by_admins_only=bool,
inline_image_preview=bool,
inline_url_embed_preview=bool,
mandatory_topics=bool,
message_retention_days=(int, type(None)),
name=str,
name_changes_disabled=bool,
avatar_changes_disabled=bool,
emails_restricted_to_domains=bool,
send_welcome_emails=bool,
message_content_allowed_in_email_notifications=bool,
video_chat_provider=int,
waiting_period_threshold=int,
digest_weekday=int,
private_message_policy=int,
user_group_edit_policy=int,
default_code_block_language=(str, type(None)),
message_content_delete_limit_seconds=int,
)
DIGEST_WEEKDAY_VALUES = [0, 1, 2, 3, 4, 5, 6]
# Icon is the square mobile icon.
ICON_FROM_GRAVATAR = 'G'
ICON_UPLOADED = 'U'
ICON_SOURCES = (
(ICON_FROM_GRAVATAR, 'Hosted by Gravatar'),
(ICON_UPLOADED, 'Uploaded by administrator'),
)
icon_source: str = models.CharField(
default=ICON_FROM_GRAVATAR, choices=ICON_SOURCES, max_length=1,
)
icon_version: int = models.PositiveSmallIntegerField(default=1)
# Logo is the horizontal logo we show in top-left of webapp navbar UI.
LOGO_DEFAULT = 'D'
LOGO_UPLOADED = 'U'
LOGO_SOURCES = (
(LOGO_DEFAULT, 'Default to Zulip'),
(LOGO_UPLOADED, 'Uploaded by administrator'),
)
logo_source: str = models.CharField(
default=LOGO_DEFAULT, choices=LOGO_SOURCES, max_length=1,
)
logo_version: int = models.PositiveSmallIntegerField(default=1)
night_logo_source: str = models.CharField(
default=LOGO_DEFAULT, choices=LOGO_SOURCES, max_length=1,
)
night_logo_version: int = models.PositiveSmallIntegerField(default=1)
def authentication_methods_dict(self) -> Dict[str, bool]:
"""Returns the a mapping from authentication flags to their status,
showing only those authentication flags that are supported on
the current server (i.e. if EmailAuthBackend is not configured
on the server, this will not return an entry for "Email")."""
# This mapping needs to be imported from here due to the cyclic
# dependency.
from zproject.backends import AUTH_BACKEND_NAME_MAP
ret: Dict[str, bool] = {}
supported_backends = [backend.__class__ for backend in supported_auth_backends()]
# `authentication_methods` is a bitfield.types.BitHandler, not
# a true dict; since it is still python2- and python3-compat,
# `iteritems` is its method to iterate over its contents.
for k, v in self.authentication_methods.iteritems():
backend = AUTH_BACKEND_NAME_MAP[k]
if backend in supported_backends:
ret[k] = v
return ret
def __str__(self) -> str:
return f"<Realm: {self.string_id} {self.id}>"
@cache_with_key(get_realm_emoji_cache_key, timeout=3600*24*7)
def get_emoji(self) -> Dict[str, Dict[str, Iterable[str]]]:
return get_realm_emoji_uncached(self)
@cache_with_key(get_active_realm_emoji_cache_key, timeout=3600*24*7)
def get_active_emoji(self) -> Dict[str, Dict[str, Iterable[str]]]:
return get_active_realm_emoji_uncached(self)
def get_admin_users_and_bots(self) -> Sequence['UserProfile']:
"""Use this in contexts where we want administrative users as well as
bots with administrator privileges, like send_event calls for
notifications to all administrator users.
"""
# TODO: Change return type to QuerySet[UserProfile]
return UserProfile.objects.filter(realm=self, is_active=True,
role__in=[UserProfile.ROLE_REALM_ADMINISTRATOR,
UserProfile.ROLE_REALM_OWNER])
def get_human_admin_users(self) -> QuerySet:
"""Use this in contexts where we want only human users with
administrative privileges, like sending an email to all of a
realm's administrators (bots don't have real email addresses).
"""
# TODO: Change return type to QuerySet[UserProfile]
return UserProfile.objects.filter(realm=self, is_bot=False, is_active=True,
role__in=[UserProfile.ROLE_REALM_ADMINISTRATOR,
UserProfile.ROLE_REALM_OWNER])
def get_human_billing_admin_users(self) -> Sequence['UserProfile']:
return UserProfile.objects.filter(Q(role=UserProfile.ROLE_REALM_OWNER) | Q(is_billing_admin=True),
realm=self, is_bot=False, is_active=True)
def get_active_users(self) -> Sequence['UserProfile']:
# TODO: Change return type to QuerySet[UserProfile]
return UserProfile.objects.filter(realm=self, is_active=True).select_related()
def get_human_owner_users(self) -> QuerySet:
return UserProfile.objects.filter(realm=self, is_bot=False,
role=UserProfile.ROLE_REALM_OWNER,
is_active=True)
def get_bot_domain(self) -> str:
return get_fake_email_domain()
def get_notifications_stream(self) -> Optional['Stream']:
if self.notifications_stream is not None and not self.notifications_stream.deactivated:
return self.notifications_stream
return None
def get_signup_notifications_stream(self) -> Optional['Stream']:
if self.signup_notifications_stream is not None and not self.signup_notifications_stream.deactivated:
return self.signup_notifications_stream
return None
@property
def max_invites(self) -> int:
if self._max_invites is None:
return settings.INVITES_DEFAULT_REALM_DAILY_MAX
return self._max_invites
@max_invites.setter
def max_invites(self, value: Optional[int]) -> None:
self._max_invites = value
def upload_quota_bytes(self) -> Optional[int]:
if self.upload_quota_gb is None:
return None
# We describe the quota to users in "GB" or "gigabytes", but actually apply
# it as gibibytes (GiB) to be a bit more generous in case of confusion.
return self.upload_quota_gb << 30
@cache_with_key(get_realm_used_upload_space_cache_key, timeout=3600*24*7)
def currently_used_upload_space_bytes(self) -> int:
used_space = Attachment.objects.filter(realm=self).aggregate(Sum('size'))['size__sum']
if used_space is None:
return 0
return used_space
def ensure_not_on_limited_plan(self) -> None:
if self.plan_type == Realm.LIMITED:
raise JsonableError(self.UPGRADE_TEXT_STANDARD)
@property
def subdomain(self) -> str:
return self.string_id
@property
def display_subdomain(self) -> str:
"""Likely to be temporary function to avoid signup messages being sent
to an empty topic"""
if self.string_id == "":
return "."
return self.string_id
@property
def uri(self) -> str:
return settings.EXTERNAL_URI_SCHEME + self.host
@property
def host(self) -> str:
# Use mark sanitized to prevent false positives from Pysa thinking that
# the host is user controlled.
return mark_sanitized(self.host_for_subdomain(self.subdomain))
@staticmethod
def host_for_subdomain(subdomain: str) -> str:
if subdomain == Realm.SUBDOMAIN_FOR_ROOT_DOMAIN:
return settings.EXTERNAL_HOST
default_host = f"{subdomain}.{settings.EXTERNAL_HOST}"
return settings.REALM_HOSTS.get(subdomain, default_host)
@property
def is_zephyr_mirror_realm(self) -> bool:
return self.string_id == "zephyr"
@property
def webathena_enabled(self) -> bool:
return self.is_zephyr_mirror_realm
@property
def presence_disabled(self) -> bool:
return self.is_zephyr_mirror_realm
class Meta:
permissions = (
('administer', "Administer a realm"),
('api_super_user', "Can send messages as other users for mirroring"),
)
post_save.connect(flush_realm, sender=Realm)
def get_realm(string_id: str) -> Realm:
return Realm.objects.get(string_id=string_id)
def name_changes_disabled(realm: Optional[Realm]) -> bool:
if realm is None:
return settings.NAME_CHANGES_DISABLED
return settings.NAME_CHANGES_DISABLED or realm.name_changes_disabled
def avatar_changes_disabled(realm: Realm) -> bool:
return settings.AVATAR_CHANGES_DISABLED or realm.avatar_changes_disabled
class RealmDomain(models.Model):
"""For an organization with emails_restricted_to_domains enabled, the list of
allowed domains"""
id: int = models.AutoField(auto_created=True, primary_key=True, verbose_name='ID')
realm: Realm = models.ForeignKey(Realm, on_delete=CASCADE)
# should always be stored lowercase
domain: str = models.CharField(max_length=80, db_index=True)
allow_subdomains: bool = models.BooleanField(default=False)
class Meta:
unique_together = ("realm", "domain")
# These functions should only be used on email addresses that have
# been validated via django.core.validators.validate_email
#
# Note that we need to use some care, since can you have multiple @-signs; e.g.
# "tabbott@test"@zulip.com
# is valid email address
def email_to_username(email: str) -> str:
return "@".join(email.split("@")[:-1]).lower()
# Returns the raw domain portion of the desired email address
def email_to_domain(email: str) -> str:
return email.split("@")[-1].lower()
class DomainNotAllowedForRealmError(Exception):
pass
class DisposableEmailError(Exception):
pass
class EmailContainsPlusError(Exception):
pass
def get_realm_domains(realm: Realm) -> List[Dict[str, str]]:
return list(realm.realmdomain_set.values('domain', 'allow_subdomains'))
class RealmEmoji(models.Model):
id: int = models.AutoField(auto_created=True, primary_key=True, verbose_name='ID')
author: Optional["UserProfile"] = models.ForeignKey(
"UserProfile", blank=True, null=True, on_delete=CASCADE,
)
realm: Realm = models.ForeignKey(Realm, on_delete=CASCADE)
name: str = models.TextField(validators=[
MinLengthValidator(1),
# The second part of the regex (negative lookbehind) disallows names
# ending with one of the punctuation characters.
RegexValidator(regex=r'^[0-9a-z.\-_]+(?<![.\-_])$',
message=_("Invalid characters in emoji name"))])
# The basename of the custom emoji's filename; see PATH_ID_TEMPLATE for the full path.
file_name: Optional[str] = models.TextField(db_index=True, null=True, blank=True)
deactivated: bool = models.BooleanField(default=False)
PATH_ID_TEMPLATE = "{realm_id}/emoji/images/{emoji_file_name}"
def __str__(self) -> str:
return f"<RealmEmoji({self.realm.string_id}): {self.id} {self.name} {self.deactivated} {self.file_name}>"
def get_realm_emoji_dicts(realm: Realm,
only_active_emojis: bool=False) -> Dict[str, Dict[str, Any]]:
query = RealmEmoji.objects.filter(realm=realm).select_related('author')
if only_active_emojis:
query = query.filter(deactivated=False)
d = {}
from zerver.lib.emoji import get_emoji_url
for realm_emoji in query.all():
author_id = None
if realm_emoji.author:
author_id = realm_emoji.author_id
emoji_url = get_emoji_url(realm_emoji.file_name, realm_emoji.realm_id)
d[str(realm_emoji.id)] = dict(id=str(realm_emoji.id),
name=realm_emoji.name,
source_url=emoji_url,
deactivated=realm_emoji.deactivated,
author_id=author_id)
return d
def get_realm_emoji_uncached(realm: Realm) -> Dict[str, Dict[str, Any]]:
return get_realm_emoji_dicts(realm)
def get_active_realm_emoji_uncached(realm: Realm) -> Dict[str, Dict[str, Any]]:
realm_emojis = get_realm_emoji_dicts(realm, only_active_emojis=True)
d = {}
for emoji_id, emoji_dict in realm_emojis.items():
d[emoji_dict['name']] = emoji_dict
return d
def flush_realm_emoji(sender: Any, **kwargs: Any) -> None:
realm = kwargs['instance'].realm
cache_set(get_realm_emoji_cache_key(realm),
get_realm_emoji_uncached(realm),
timeout=3600*24*7)
cache_set(get_active_realm_emoji_cache_key(realm),
get_active_realm_emoji_uncached(realm),
timeout=3600*24*7)
post_save.connect(flush_realm_emoji, sender=RealmEmoji)
post_delete.connect(flush_realm_emoji, sender=RealmEmoji)
def filter_pattern_validator(value: str) -> None:
regex = re.compile(r'^(?:(?:[\w\-#_= /:]*|[+]|[!])(\(\?P<\w+>.+\)))+$')
error_msg = _('Invalid filter pattern. Valid characters are %s.') % (
'[ a-zA-Z_#=/:+!-]',)
if not regex.match(str(value)):
raise ValidationError(error_msg)
try:
re.compile(value)
except re.error:
# Regex is invalid
raise ValidationError(error_msg)
def filter_format_validator(value: str) -> None:
regex = re.compile(r'^([\.\/:a-zA-Z0-9#_?=&;-]+%\(([a-zA-Z0-9_-]+)\)s)+[/a-zA-Z0-9#_?=&;-]*$')
if not regex.match(value):
raise ValidationError(_('Invalid URL format string.'))
class RealmFilter(models.Model):
"""Realm-specific regular expressions to automatically linkify certain
strings inside the Markdown processor. See "Custom filters" in the settings UI.
"""
id: int = models.AutoField(auto_created=True, primary_key=True, verbose_name='ID')
realm: Realm = models.ForeignKey(Realm, on_delete=CASCADE)
pattern: str = models.TextField(validators=[filter_pattern_validator])
url_format_string: str = models.TextField(validators=[URLValidator(), filter_format_validator])
class Meta:
unique_together = ("realm", "pattern")
def __str__(self) -> str:
return f"<RealmFilter({self.realm.string_id}): {self.pattern} {self.url_format_string}>"
def get_realm_filters_cache_key(realm_id: int) -> str:
return f'{cache.KEY_PREFIX}:all_realm_filters:{realm_id}'
# We have a per-process cache to avoid doing 1000 remote cache queries during page load
per_request_realm_filters_cache: Dict[int, List[Tuple[str, str, int]]] = {}
def realm_in_local_realm_filters_cache(realm_id: int) -> bool:
return realm_id in per_request_realm_filters_cache
def realm_filters_for_realm(realm_id: int) -> List[Tuple[str, str, int]]:
if not realm_in_local_realm_filters_cache(realm_id):
per_request_realm_filters_cache[realm_id] = realm_filters_for_realm_remote_cache(realm_id)
return per_request_realm_filters_cache[realm_id]
@cache_with_key(get_realm_filters_cache_key, timeout=3600*24*7)
def realm_filters_for_realm_remote_cache(realm_id: int) -> List[Tuple[str, str, int]]:
filters = []
for realm_filter in RealmFilter.objects.filter(realm_id=realm_id):
filters.append((realm_filter.pattern, realm_filter.url_format_string, realm_filter.id))
return filters
def all_realm_filters() -> Dict[int, List[Tuple[str, str, int]]]:
filters: DefaultDict[int, List[Tuple[str, str, int]]] = defaultdict(list)
for realm_filter in RealmFilter.objects.all():
filters[realm_filter.realm_id].append((realm_filter.pattern,
realm_filter.url_format_string,
realm_filter.id))
return filters
def flush_realm_filter(sender: Any, **kwargs: Any) -> None:
realm_id = kwargs['instance'].realm_id
cache_delete(get_realm_filters_cache_key(realm_id))
try:
per_request_realm_filters_cache.pop(realm_id)
except KeyError:
pass
post_save.connect(flush_realm_filter, sender=RealmFilter)
post_delete.connect(flush_realm_filter, sender=RealmFilter)
def flush_per_request_caches() -> None:
global per_request_display_recipient_cache
per_request_display_recipient_cache = {}
global per_request_realm_filters_cache
per_request_realm_filters_cache = {}
# The Recipient table is used to map Messages to the set of users who
# received the message. It is implemented as a set of triples (id,
# type_id, type). We have 3 types of recipients: Huddles (for group
# private messages), UserProfiles (for 1:1 private messages), and
# Streams. The recipient table maps a globally unique recipient id
# (used by the Message table) to the type-specific unique id (the
# stream id, user_profile id, or huddle id).
class Recipient(models.Model):
id: int = models.AutoField(auto_created=True, primary_key=True, verbose_name='ID')
type_id: int = models.IntegerField(db_index=True)
type: int = models.PositiveSmallIntegerField(db_index=True)
# Valid types are {personal, stream, huddle}
PERSONAL = 1
STREAM = 2
HUDDLE = 3
class Meta:
unique_together = ("type", "type_id")
# N.B. If we used Django's choice=... we would get this for free (kinda)
_type_names = {
PERSONAL: 'personal',
STREAM: 'stream',
HUDDLE: 'huddle'}
def type_name(self) -> str:
# Raises KeyError if invalid
return self._type_names[self.type]
def __str__(self) -> str:
display_recipient = get_display_recipient(self)
return f"<Recipient: {display_recipient} ({self.type_id}, {self.type})>"
class UserProfile(AbstractBaseUser, PermissionsMixin):
USERNAME_FIELD = 'email'
MAX_NAME_LENGTH = 100
MIN_NAME_LENGTH = 2
API_KEY_LENGTH = 32
NAME_INVALID_CHARS = ['*', '`', "\\", '>', '"', '@']
DEFAULT_BOT = 1
"""
Incoming webhook bots are limited to only sending messages via webhooks.
Thus, it is less of a security risk to expose their API keys to third-party services,
since they can't be used to read messages.
"""
INCOMING_WEBHOOK_BOT = 2
# This value is also being used in static/js/settings_bots.js.
# On updating it here, update it there as well.
OUTGOING_WEBHOOK_BOT = 3
"""
Embedded bots run within the Zulip server itself; events are added to the
embedded_bots queue and then handled by a QueueProcessingWorker.
"""
EMBEDDED_BOT = 4
BOT_TYPES = {
DEFAULT_BOT: 'Generic bot',
INCOMING_WEBHOOK_BOT: 'Incoming webhook',
OUTGOING_WEBHOOK_BOT: 'Outgoing webhook',
EMBEDDED_BOT: 'Embedded bot',
}
SERVICE_BOT_TYPES = [
OUTGOING_WEBHOOK_BOT,
EMBEDDED_BOT,
]
id: int = models.AutoField(auto_created=True, primary_key=True, verbose_name='ID')
# For historical reasons, Zulip has two email fields. The
# `delivery_email` field is the user's email address, where all
# email notifications will be sent, and is used for all
# authentication use cases.
#
# The `email` field is the same as delivery_email in organizations
# with EMAIL_ADDRESS_VISIBILITY_EVERYONE. For other
# organizations, it will be a unique value of the form
# [email protected]. This field exists for backwards
# compatibility in Zulip APIs where users are referred to by their
# email address, not their ID; it should be used in all API use cases.
#
# Both fields are unique within a realm (in a case-insensitive fashion).
delivery_email: str = models.EmailField(blank=False, db_index=True)
email: str = models.EmailField(blank=False, db_index=True)
realm: Realm = models.ForeignKey(Realm, on_delete=CASCADE)
# Foreign key to the Recipient object for PERSONAL type messages to this user.
recipient = models.ForeignKey(Recipient, null=True, on_delete=models.SET_NULL)
# The user's name. We prefer the model of a full_name
# over first+last because cultures vary on how many
# names one has, whether the family name is first or last, etc.
# It also allows organizations to encode a bit of non-name data in
# the "name" attribute if desired, like gender pronouns,
# graduation year, etc.
full_name: str = models.CharField(max_length=MAX_NAME_LENGTH)
date_joined: datetime.datetime = models.DateTimeField(default=timezone_now)
tos_version: Optional[str] = models.CharField(null=True, max_length=10)
api_key: str = models.CharField(max_length=API_KEY_LENGTH)
# Whether the user has access to server-level administrator pages, like /activity
is_staff: bool = models.BooleanField(default=False)
# For a normal user, this is True unless the user or an admin has
# deactivated their account. The name comes from Django; this field
# isn't related to presence or to whether the user has recently used Zulip.
#
# See also `long_term_idle`.
is_active: bool = models.BooleanField(default=True, db_index=True)
is_billing_admin: bool = models.BooleanField(default=False, db_index=True)
is_bot: bool = models.BooleanField(default=False, db_index=True)
bot_type: Optional[int] = models.PositiveSmallIntegerField(null=True, db_index=True)
bot_owner: Optional["UserProfile"] = models.ForeignKey('self', null=True, on_delete=models.SET_NULL)
# Each role has a superset of the permissions of the next higher
# numbered role. When adding new roles, leave enough space for
# future roles to be inserted between currently adjacent
# roles. These constants appear in RealmAuditLog.extra_data, so
# changes to them will require a migration of RealmAuditLog.
ROLE_REALM_OWNER = 100
ROLE_REALM_ADMINISTRATOR = 200
# ROLE_MODERATOR = 300
ROLE_MEMBER = 400
ROLE_GUEST = 600
role: int = models.PositiveSmallIntegerField(default=ROLE_MEMBER, db_index=True)
ROLE_TYPES = [
ROLE_REALM_OWNER,
ROLE_REALM_ADMINISTRATOR,
ROLE_MEMBER,
ROLE_GUEST,
]
# Whether the user has been "soft-deactivated" due to weeks of inactivity.
# For these users we avoid doing UserMessage table work, as an optimization
# for large Zulip organizations with lots of single-visit users.
long_term_idle: bool = models.BooleanField(default=False, db_index=True)
# When we last added basic UserMessage rows for a long_term_idle user.
last_active_message_id: Optional[int] = models.IntegerField(null=True)
# Mirror dummies are fake (!is_active) users used to provide
# message senders in our cross-protocol Zephyr<->Zulip content
# mirroring integration, so that we can display mirrored content
# like native Zulip messages (with a name + avatar, etc.).
is_mirror_dummy: bool = models.BooleanField(default=False)
# API super users are allowed to forge messages as sent by another
# user and to send to private streams; also used for Zephyr/Jabber mirroring.
is_api_super_user: bool = models.BooleanField(default=False, db_index=True)
### Notifications settings. ###
# Stream notifications.
enable_stream_desktop_notifications: bool = models.BooleanField(default=False)
enable_stream_email_notifications: bool = models.BooleanField(default=False)
enable_stream_push_notifications: bool = models.BooleanField(default=False)
enable_stream_audible_notifications: bool = models.BooleanField(default=False)
notification_sound: str = models.CharField(max_length=20, default='zulip')
wildcard_mentions_notify: bool = models.BooleanField(default=True)
# PM + @-mention notifications.
enable_desktop_notifications: bool = models.BooleanField(default=True)
pm_content_in_desktop_notifications: bool = models.BooleanField(default=True)
enable_sounds: bool = models.BooleanField(default=True)
enable_offline_email_notifications: bool = models.BooleanField(default=True)
message_content_in_email_notifications: bool = models.BooleanField(default=True)
enable_offline_push_notifications: bool = models.BooleanField(default=True)
enable_online_push_notifications: bool = models.BooleanField(default=True)
DESKTOP_ICON_COUNT_DISPLAY_MESSAGES = 1
DESKTOP_ICON_COUNT_DISPLAY_NOTIFIABLE = 2
DESKTOP_ICON_COUNT_DISPLAY_NONE = 3
desktop_icon_count_display: int = models.PositiveSmallIntegerField(
default=DESKTOP_ICON_COUNT_DISPLAY_MESSAGES)
enable_digest_emails: bool = models.BooleanField(default=True)
enable_login_emails: bool = models.BooleanField(default=True)
realm_name_in_notifications: bool = models.BooleanField(default=False)
presence_enabled: bool = models.BooleanField(default=True)
# Used for rate-limiting certain automated messages generated by bots
last_reminder: Optional[datetime.datetime] = models.DateTimeField(default=None, null=True)
# Minutes to wait before warning a bot owner that their bot sent a message
# to a nonexistent stream
BOT_OWNER_STREAM_ALERT_WAITPERIOD = 1
# API rate limits, formatted as a comma-separated list of range:max pairs
rate_limits: str = models.CharField(default="", max_length=100)
# Hours to wait before sending another email to a user
EMAIL_REMINDER_WAITPERIOD = 24
# Default streams for some deprecated/legacy classes of bot users.
default_sending_stream: Optional["Stream"] = models.ForeignKey(
"zerver.Stream", null=True, related_name="+", on_delete=CASCADE,
)
default_events_register_stream: Optional["Stream"] = models.ForeignKey(
"zerver.Stream", null=True, related_name="+", on_delete=CASCADE,
)
default_all_public_streams: bool = models.BooleanField(default=False)
# UI vars
enter_sends: Optional[bool] = models.BooleanField(null=True, default=False)
left_side_userlist: bool = models.BooleanField(default=False)
# display settings
default_language: str = models.CharField(default='en', max_length=MAX_LANGUAGE_ID_LENGTH)
dense_mode: bool = models.BooleanField(default=True)
fluid_layout_width: bool = models.BooleanField(default=False)
high_contrast_mode: bool = models.BooleanField(default=False)
translate_emoticons: bool = models.BooleanField(default=False)
twenty_four_hour_time: bool = models.BooleanField(default=False)
starred_message_counts: bool = models.BooleanField(default=False)
COLOR_SCHEME_AUTOMATIC = 1
COLOR_SCHEME_NIGHT = 2
COLOR_SCHEME_LIGHT = 3
COLOR_SCHEME_CHOICES = [
COLOR_SCHEME_AUTOMATIC,
COLOR_SCHEME_NIGHT,
COLOR_SCHEME_LIGHT
]
color_scheme: int = models.PositiveSmallIntegerField(default=COLOR_SCHEME_AUTOMATIC)
# UI setting controlling Zulip's behavior of demoting in the sort
# order and graying out streams with no recent traffic. The
# default behavior, automatic, enables this behavior once a user
# is subscribed to 30+ streams in the webapp.
DEMOTE_STREAMS_AUTOMATIC = 1
DEMOTE_STREAMS_ALWAYS = 2
DEMOTE_STREAMS_NEVER = 3
DEMOTE_STREAMS_CHOICES = [
DEMOTE_STREAMS_AUTOMATIC,
DEMOTE_STREAMS_ALWAYS,
DEMOTE_STREAMS_NEVER,
]
demote_inactive_streams: int = models.PositiveSmallIntegerField(default=DEMOTE_STREAMS_AUTOMATIC)
# A timezone name from the `tzdata` database, as found in pytz.all_timezones.
#
# The longest existing name is 32 characters long, so max_length=40 seems
# like a safe choice.
#
# In Django, the convention is to use an empty string instead of NULL/None
# for text-based fields. For more information, see
# https://docs.djangoproject.com/en/1.10/ref/models/fields/#django.db.models.Field.null.
timezone: str = models.CharField(max_length=40, default='')
# Emojisets
GOOGLE_EMOJISET = 'google'
GOOGLE_BLOB_EMOJISET = 'google-blob'
TEXT_EMOJISET = 'text'
TWITTER_EMOJISET = 'twitter'
EMOJISET_CHOICES = ((GOOGLE_EMOJISET, "Google modern"),
(GOOGLE_BLOB_EMOJISET, "Google classic"),
(TWITTER_EMOJISET, "Twitter"),
(TEXT_EMOJISET, "Plain text"))
emojiset: str = models.CharField(default=GOOGLE_BLOB_EMOJISET, choices=EMOJISET_CHOICES, max_length=20)
AVATAR_FROM_GRAVATAR = 'G'
AVATAR_FROM_USER = 'U'
AVATAR_SOURCES = (
(AVATAR_FROM_GRAVATAR, 'Hosted by Gravatar'),
(AVATAR_FROM_USER, 'Uploaded by user'),
)
avatar_source: str = models.CharField(default=AVATAR_FROM_GRAVATAR, choices=AVATAR_SOURCES, max_length=1)
avatar_version: int = models.PositiveSmallIntegerField(default=1)
avatar_hash: Optional[str] = models.CharField(null=True, max_length=64)
TUTORIAL_WAITING = 'W'
TUTORIAL_STARTED = 'S'
TUTORIAL_FINISHED = 'F'
TUTORIAL_STATES = ((TUTORIAL_WAITING, "Waiting"),
(TUTORIAL_STARTED, "Started"),
(TUTORIAL_FINISHED, "Finished"))
tutorial_status: str = models.CharField(default=TUTORIAL_WAITING, choices=TUTORIAL_STATES, max_length=1)
# Contains serialized JSON of the form:
# [("step 1", true), ("step 2", false)]
# where the second element of each tuple is if the step has been
# completed.
onboarding_steps: str = models.TextField(default='[]')
zoom_token: Optional[object] = JSONField(default=None, null=True)
objects: UserManager = UserManager()
# Define the types of the various automatically managed properties
property_types = dict(
color_scheme=int,
default_language=str,
demote_inactive_streams=int,
dense_mode=bool,
emojiset=str,
fluid_layout_width=bool,
high_contrast_mode=bool,
left_side_userlist=bool,
starred_message_counts=bool,
timezone=str,
translate_emoticons=bool,
twenty_four_hour_time=bool,
)
notification_setting_types = dict(
enable_desktop_notifications=bool,
enable_digest_emails=bool,
enable_login_emails=bool,
enable_offline_email_notifications=bool,
enable_offline_push_notifications=bool,
enable_online_push_notifications=bool,
enable_sounds=bool,
enable_stream_desktop_notifications=bool,
enable_stream_email_notifications=bool,
enable_stream_push_notifications=bool,
enable_stream_audible_notifications=bool,
wildcard_mentions_notify=bool,
message_content_in_email_notifications=bool,
notification_sound=str,
pm_content_in_desktop_notifications=bool,
desktop_icon_count_display=int,
realm_name_in_notifications=bool,
presence_enabled=bool,
)
ROLE_ID_TO_NAME_MAP = {
ROLE_REALM_OWNER: _("Organization owner"),
ROLE_REALM_ADMINISTRATOR: _("Organization administrator"),
ROLE_MEMBER: _("Member"),
ROLE_GUEST: _("Guest"),
}
def get_role_name(self) -> str:
return self.ROLE_ID_TO_NAME_MAP[self.role]
@property
def profile_data(self) -> ProfileData:
values = CustomProfileFieldValue.objects.filter(user_profile=self)
user_data = {v.field_id: {"value": v.value, "rendered_value": v.rendered_value} for v in values}
data: ProfileData = []
for field in custom_profile_fields_for_realm(self.realm_id):
field_values = user_data.get(field.id, None)
if field_values:
value, rendered_value = field_values.get("value"), field_values.get("rendered_value")
else:
value, rendered_value = None, None
field_type = field.field_type
if value is not None:
converter = field.FIELD_CONVERTERS[field_type]
value = converter(value)
field_data = field.as_dict()
data.append({
'id': field_data['id'],
'name': field_data['name'],
'type': field_data['type'],
'hint': field_data['hint'],
'field_data': field_data['field_data'],
'order': field_data['order'],
'value': value,
'rendered_value': rendered_value,
})
return data
def can_admin_user(self, target_user: 'UserProfile') -> bool:
"""Returns whether this user has permission to modify target_user"""
if target_user.bot_owner == self:
return True
elif self.is_realm_admin and self.realm == target_user.realm:
return True
else:
return False
def __str__(self) -> str:
return f"<UserProfile: {self.email} {self.realm}>"
@property
def is_new_member(self) -> bool:
diff = (timezone_now() - self.date_joined).days
if diff < self.realm.waiting_period_threshold:
return True
return False
@property
def is_realm_admin(self) -> bool:
return self.role == UserProfile.ROLE_REALM_ADMINISTRATOR or \
self.role == UserProfile.ROLE_REALM_OWNER
@is_realm_admin.setter
def is_realm_admin(self, value: bool) -> None:
if value:
self.role = UserProfile.ROLE_REALM_ADMINISTRATOR
elif self.role == UserProfile.ROLE_REALM_ADMINISTRATOR:
# We need to be careful to not accidentally change
# ROLE_GUEST to ROLE_MEMBER here.
self.role = UserProfile.ROLE_MEMBER
@property
def has_billing_access(self) -> bool:
return self.is_realm_owner or self.is_billing_admin
@property
def is_realm_owner(self) -> bool:
return self.role == UserProfile.ROLE_REALM_OWNER
@property
def is_guest(self) -> bool:
return self.role == UserProfile.ROLE_GUEST
@is_guest.setter
def is_guest(self, value: bool) -> None:
if value:
self.role = UserProfile.ROLE_GUEST
elif self.role == UserProfile.ROLE_GUEST:
# We need to be careful to not accidentally change
# ROLE_REALM_ADMINISTRATOR to ROLE_MEMBER here.
self.role = UserProfile.ROLE_MEMBER
@property
def is_incoming_webhook(self) -> bool:
return self.bot_type == UserProfile.INCOMING_WEBHOOK_BOT
@property
def allowed_bot_types(self) -> List[int]:
allowed_bot_types = []
if self.is_realm_admin or \
not self.realm.bot_creation_policy == Realm.BOT_CREATION_LIMIT_GENERIC_BOTS:
allowed_bot_types.append(UserProfile.DEFAULT_BOT)
allowed_bot_types += [
UserProfile.INCOMING_WEBHOOK_BOT,
UserProfile.OUTGOING_WEBHOOK_BOT,
]
if settings.EMBEDDED_BOTS_ENABLED:
allowed_bot_types.append(UserProfile.EMBEDDED_BOT)
return allowed_bot_types
@staticmethod
def emojiset_choices() -> List[Dict[str, str]]:
return [dict(key=emojiset[0], text=emojiset[1]) for emojiset in UserProfile.EMOJISET_CHOICES]
@staticmethod
def emails_from_ids(user_ids: Sequence[int]) -> Dict[int, str]:
rows = UserProfile.objects.filter(id__in=user_ids).values('id', 'email')
return {row['id']: row['email'] for row in rows}
def email_address_is_realm_public(self) -> bool:
if self.realm.email_address_visibility == Realm.EMAIL_ADDRESS_VISIBILITY_EVERYONE:
return True
if self.is_bot:
return True
return False
def has_permission(self, policy_name: str) -> bool:
if policy_name not in ['create_stream_policy', 'invite_to_stream_policy']:
raise AssertionError("Invalid policy")
if self.is_realm_admin:
return True
policy_value = getattr(self.realm, policy_name)
if policy_value == Realm.POLICY_ADMINS_ONLY:
return False
if self.is_guest:
return False
if policy_value == Realm.POLICY_MEMBERS_ONLY:
return True
return not self.is_new_member
def can_create_streams(self) -> bool:
return self.has_permission('create_stream_policy')
def can_subscribe_other_users(self) -> bool:
return self.has_permission('invite_to_stream_policy')
def can_access_public_streams(self) -> bool:
return not (self.is_guest or self.realm.is_zephyr_mirror_realm)
def can_access_all_realm_members(self) -> bool:
return not (self.realm.is_zephyr_mirror_realm or self.is_guest)
def major_tos_version(self) -> int:
if self.tos_version is not None:
return int(self.tos_version.split('.')[0])
else:
return -1
def format_requestor_for_logs(self) -> str:
return "{}@{}".format(self.id, self.realm.string_id or 'root')
def set_password(self, password: Optional[str]) -> None:
if password is None:
self.set_unusable_password()
return
from zproject.backends import check_password_strength
if not check_password_strength(password):
raise PasswordTooWeakError
super().set_password(password)
class PasswordTooWeakError(Exception):
pass
class UserGroup(models.Model):
id: int = models.AutoField(auto_created=True, primary_key=True, verbose_name='ID')
name: str = models.CharField(max_length=100)
members: Manager = models.ManyToManyField(UserProfile, through='UserGroupMembership')
realm: Realm = models.ForeignKey(Realm, on_delete=CASCADE)
description: str = models.TextField(default='')
class Meta:
unique_together = (('realm', 'name'),)
class UserGroupMembership(models.Model):
id: int = models.AutoField(auto_created=True, primary_key=True, verbose_name='ID')
user_group: UserGroup = models.ForeignKey(UserGroup, on_delete=CASCADE)
user_profile: UserProfile = models.ForeignKey(UserProfile, on_delete=CASCADE)
class Meta:
unique_together = (('user_group', 'user_profile'),)
def receives_offline_push_notifications(user_profile: UserProfile) -> bool:
return (user_profile.enable_offline_push_notifications and
not user_profile.is_bot)
def receives_offline_email_notifications(user_profile: UserProfile) -> bool:
return (user_profile.enable_offline_email_notifications and
not user_profile.is_bot)
def receives_online_notifications(user_profile: UserProfile) -> bool:
return (user_profile.enable_online_push_notifications and
not user_profile.is_bot)
def receives_stream_notifications(user_profile: UserProfile) -> bool:
return (user_profile.enable_stream_push_notifications and
not user_profile.is_bot)
def remote_user_to_email(remote_user: str) -> str:
if settings.SSO_APPEND_DOMAIN is not None:
remote_user += "@" + settings.SSO_APPEND_DOMAIN
return remote_user
# Make sure we flush the UserProfile object from our remote cache
# whenever we save it.
post_save.connect(flush_user_profile, sender=UserProfile)
class PreregistrationUser(models.Model):
# Data on a partially created user, before the completion of
# registration. This is used in at least three major code paths:
# * Realm creation, in which case realm is None.
#
# * Invitations, in which case referred_by will always be set.
#
# * Social authentication signup, where it's used to store data
# from the authentication step and pass it to the registration
# form.
id: int = models.AutoField(auto_created=True, primary_key=True, verbose_name='ID')
email: str = models.EmailField()
# If the pre-registration process provides a suggested full name for this user,
# store it here to use it to prepopulate the Full Name field in the registration form:
full_name: Optional[str] = models.CharField(max_length=UserProfile.MAX_NAME_LENGTH, null=True)
full_name_validated: bool = models.BooleanField(default=False)
referred_by: Optional[UserProfile] = models.ForeignKey(UserProfile, null=True, on_delete=CASCADE)
streams: Manager = models.ManyToManyField('Stream')
invited_at: datetime.datetime = models.DateTimeField(auto_now=True)
realm_creation: bool = models.BooleanField(default=False)
# Indicates whether the user needs a password. Users who were
# created via SSO style auth (e.g. GitHub/Google) generally do not.
password_required: bool = models.BooleanField(default=True)
# status: whether an object has been confirmed.
# if confirmed, set to confirmation.settings.STATUS_ACTIVE
status: int = models.IntegerField(default=0)
# The realm should only ever be None for PreregistrationUser
# objects created as part of realm creation.
realm: Optional[Realm] = models.ForeignKey(Realm, null=True, on_delete=CASCADE)
# Changes to INVITED_AS should also be reflected in
# settings_invites.invited_as_values in
# static/js/settings_invites.js
INVITE_AS = dict(
REALM_OWNER = 100,
REALM_ADMIN = 200,
MEMBER = 400,
GUEST_USER = 600,
)
invited_as: int = models.PositiveSmallIntegerField(default=INVITE_AS['MEMBER'])
def filter_to_valid_prereg_users(query: QuerySet) -> QuerySet:
days_to_activate = settings.INVITATION_LINK_VALIDITY_DAYS
active_value = confirmation_settings.STATUS_ACTIVE
revoked_value = confirmation_settings.STATUS_REVOKED
lowest_datetime = timezone_now() - datetime.timedelta(days=days_to_activate)
return query.exclude(status__in=[active_value, revoked_value]).filter(
invited_at__gte=lowest_datetime)
class MultiuseInvite(models.Model):
id: int = models.AutoField(auto_created=True, primary_key=True, verbose_name='ID')
referred_by: UserProfile = models.ForeignKey(UserProfile, on_delete=CASCADE) # Optional[UserProfile]
streams: Manager = models.ManyToManyField('Stream')
realm: Realm = models.ForeignKey(Realm, on_delete=CASCADE)
invited_as: int = models.PositiveSmallIntegerField(default=PreregistrationUser.INVITE_AS['MEMBER'])
class EmailChangeStatus(models.Model):
id: int = models.AutoField(auto_created=True, primary_key=True, verbose_name='ID')
new_email: str = models.EmailField()
old_email: str = models.EmailField()
updated_at: datetime.datetime = models.DateTimeField(auto_now=True)
user_profile: UserProfile = models.ForeignKey(UserProfile, on_delete=CASCADE)
# status: whether an object has been confirmed.
# if confirmed, set to confirmation.settings.STATUS_ACTIVE
status: int = models.IntegerField(default=0)
realm: Realm = models.ForeignKey(Realm, on_delete=CASCADE)
class AbstractPushDeviceToken(models.Model):
APNS = 1
GCM = 2
KINDS = (
(APNS, 'apns'),
(GCM, 'gcm'),
)
kind: int = models.PositiveSmallIntegerField(choices=KINDS)
# The token is a unique device-specific token that is
# sent to us from each device:
# - APNS token if kind == APNS
# - GCM registration id if kind == GCM
token: str = models.CharField(max_length=4096, db_index=True)
# TODO: last_updated should be renamed date_created, since it is
# no longer maintained as a last_updated value.
last_updated: datetime.datetime = models.DateTimeField(auto_now=True)
# [optional] Contains the app id of the device if it is an iOS device
ios_app_id: Optional[str] = models.TextField(null=True)
class Meta:
abstract = True
class PushDeviceToken(AbstractPushDeviceToken):
id: int = models.AutoField(auto_created=True, primary_key=True, verbose_name='ID')
# The user whose device this is
user: UserProfile = models.ForeignKey(UserProfile, db_index=True, on_delete=CASCADE)
class Meta:
unique_together = ("user", "kind", "token")
def generate_email_token_for_stream() -> str:
return generate_random_token(32)
class Stream(models.Model):
MAX_NAME_LENGTH = 60
MAX_DESCRIPTION_LENGTH = 1024
id: int = models.AutoField(auto_created=True, primary_key=True, verbose_name='ID')
name: str = models.CharField(max_length=MAX_NAME_LENGTH, db_index=True)
realm: Realm = models.ForeignKey(Realm, db_index=True, on_delete=CASCADE)
date_created: datetime.datetime = models.DateTimeField(default=timezone_now)
deactivated: bool = models.BooleanField(default=False)
description: str = models.CharField(max_length=MAX_DESCRIPTION_LENGTH, default='')
rendered_description: str = models.TextField(default='')
# Foreign key to the Recipient object for STREAM type messages to this stream.
recipient = models.ForeignKey(Recipient, null=True, on_delete=models.SET_NULL)
invite_only: Optional[bool] = models.BooleanField(null=True, default=False)
history_public_to_subscribers: bool = models.BooleanField(default=False)
# Whether this stream's content should be published by the web-public archive features
is_web_public: bool = models.BooleanField(default=False)
STREAM_POST_POLICY_EVERYONE = 1
STREAM_POST_POLICY_ADMINS = 2
STREAM_POST_POLICY_RESTRICT_NEW_MEMBERS = 3
# TODO: Implement policy to restrict posting to a user group or admins.
# Who in the organization has permission to send messages to this stream.
stream_post_policy: int = models.PositiveSmallIntegerField(default=STREAM_POST_POLICY_EVERYONE)
STREAM_POST_POLICY_TYPES = [
STREAM_POST_POLICY_EVERYONE,
STREAM_POST_POLICY_ADMINS,
STREAM_POST_POLICY_RESTRICT_NEW_MEMBERS,
]
# The unique thing about Zephyr public streams is that we never list their
# users. We may try to generalize this concept later, but for now
# we just use a concrete field. (Zephyr public streams aren't exactly like
# invite-only streams--while both are private in terms of listing users,
# for Zephyr we don't even list users to stream members, yet membership
# is more public in the sense that you don't need a Zulip invite to join.
# This field is populated directly from UserProfile.is_zephyr_mirror_realm,
# and the reason for denormalizing field is performance.
is_in_zephyr_realm: bool = models.BooleanField(default=False)
# Used by the e-mail forwarder. The e-mail RFC specifies a maximum
# e-mail length of 254, and our max stream length is 30, so we
# have plenty of room for the token.
email_token: str = models.CharField(
max_length=32, default=generate_email_token_for_stream, unique=True,
)
# For old messages being automatically deleted.
# Value NULL means "use retention policy of the realm".
# Value -1 means "disable retention policy for this stream unconditionally".
# Non-negative values have the natural meaning of "archive messages older than <value> days".
MESSAGE_RETENTION_SPECIAL_VALUES_MAP = {
'forever': -1,
'realm_default': None,
}
message_retention_days: Optional[int] = models.IntegerField(null=True, default=None)
# The very first message ID in the stream. Used to help clients
# determine whether they might need to display "more topics" for a
# stream based on what messages they have cached.
first_message_id: Optional[int] = models.IntegerField(null=True, db_index=True)
def __str__(self) -> str:
return f"<Stream: {self.name}>"
def is_public(self) -> bool:
# All streams are private in Zephyr mirroring realms.
return not self.invite_only and not self.is_in_zephyr_realm
def is_history_realm_public(self) -> bool:
return self.is_public()
def is_history_public_to_subscribers(self) -> bool:
return self.history_public_to_subscribers
class Meta:
unique_together = ("name", "realm")
# Stream fields included whenever a Stream object is provided to
# Zulip clients via the API. A few details worth noting:
# * "id" is represented as "stream_id" in most API interfaces.
# * "email_token" is not realm-public and thus is not included here.
# * is_in_zephyr_realm is a backend-only optimization.
# * "deactivated" streams are filtered from the API entirely.
# * "realm" and "recipient" are not exposed to clients via the API.
API_FIELDS = [
"name",
"id",
"description",
"rendered_description",
"invite_only",
"is_web_public",
"stream_post_policy",
"history_public_to_subscribers",
"first_message_id",
"message_retention_days",
"date_created",
]
@staticmethod
def get_client_data(query: QuerySet) -> List[Dict[str, Any]]:
query = query.only(*Stream.API_FIELDS)
return [row.to_dict() for row in query]
def to_dict(self) -> Dict[str, Any]:
result = {}
for field_name in self.API_FIELDS:
if field_name == "id":
result['stream_id'] = self.id
continue
elif field_name == "date_created":
result['date_created'] = datetime_to_timestamp(self.date_created)
continue
result[field_name] = getattr(self, field_name)
result['is_announcement_only'] = self.stream_post_policy == Stream.STREAM_POST_POLICY_ADMINS
return result
post_save.connect(flush_stream, sender=Stream)
post_delete.connect(flush_stream, sender=Stream)
class MutedTopic(models.Model):
id: int = models.AutoField(auto_created=True, primary_key=True, verbose_name='ID')
user_profile: UserProfile = models.ForeignKey(UserProfile, on_delete=CASCADE)
stream: Stream = models.ForeignKey(Stream, on_delete=CASCADE)
recipient: Recipient = models.ForeignKey(Recipient, on_delete=CASCADE)
topic_name: str = models.CharField(max_length=MAX_TOPIC_NAME_LENGTH)
# The default value for date_muted is a few weeks before tracking
# of when topics were muted was first introduced. It's designed
# to be obviously incorrect so that users can tell it's backfilled data.
date_muted: datetime.datetime = models.DateTimeField(default=datetime.datetime(2020, 1, 1, 0, 0, tzinfo=datetime.timezone.utc))
class Meta:
unique_together = ('user_profile', 'stream', 'topic_name')
def __str__(self) -> str:
return (f"<MutedTopic: ({self.user_profile.email}, {self.stream.name}, {self.topic_name}, {self.date_muted})>")
class Client(models.Model):
id: int = models.AutoField(auto_created=True, primary_key=True, verbose_name='ID')
name: str = models.CharField(max_length=30, db_index=True, unique=True)
def __str__(self) -> str:
return f"<Client: {self.name}>"
get_client_cache: Dict[str, Client] = {}
def get_client(name: str) -> Client:
# Accessing KEY_PREFIX through the module is necessary
# because we need the updated value of the variable.
cache_name = cache.KEY_PREFIX + name
if cache_name not in get_client_cache:
result = get_client_remote_cache(name)
get_client_cache[cache_name] = result
return get_client_cache[cache_name]
def get_client_cache_key(name: str) -> str:
return f'get_client:{make_safe_digest(name)}'
@cache_with_key(get_client_cache_key, timeout=3600*24*7)
def get_client_remote_cache(name: str) -> Client:
(client, _) = Client.objects.get_or_create(name=name)
return client
@cache_with_key(get_stream_cache_key, timeout=3600*24*7)
def get_realm_stream(stream_name: str, realm_id: int) -> Stream:
return Stream.objects.select_related().get(
name__iexact=stream_name.strip(), realm_id=realm_id)
def stream_name_in_use(stream_name: str, realm_id: int) -> bool:
return Stream.objects.filter(
name__iexact=stream_name.strip(),
realm_id=realm_id,
).exists()
def get_active_streams(realm: Optional[Realm]) -> QuerySet:
# TODO: Change return type to QuerySet[Stream]
# NOTE: Return value is used as a QuerySet, so cannot currently be Sequence[QuerySet]
"""
Return all streams (including invite-only streams) that have not been deactivated.
"""
return Stream.objects.filter(realm=realm, deactivated=False)
def get_stream(stream_name: str, realm: Realm) -> Stream:
'''
Callers that don't have a Realm object already available should use
get_realm_stream directly, to avoid unnecessarily fetching the
Realm object.
'''
return get_realm_stream(stream_name, realm.id)
def get_stream_by_id_in_realm(stream_id: int, realm: Realm) -> Stream:
return Stream.objects.select_related().get(id=stream_id, realm=realm)
def bulk_get_streams(realm: Realm, stream_names: STREAM_NAMES) -> Dict[str, Any]:
def fetch_streams_by_name(stream_names: List[str]) -> Sequence[Stream]:
#
# This should be just
#
# Stream.objects.select_related().filter(name__iexact__in=stream_names,
# realm_id=realm_id)
#
# But chaining __in and __iexact doesn't work with Django's
# ORM, so we have the following hack to construct the relevant where clause
where_clause = "upper(zerver_stream.name::text) IN (SELECT upper(name) FROM unnest(%s) AS name)"
return get_active_streams(realm).select_related().extra(
where=[where_clause],
params=(list(stream_names),))
def stream_name_to_cache_key(stream_name: str) -> str:
return get_stream_cache_key(stream_name, realm.id)
def stream_to_lower_name(stream: Stream) -> str:
return stream.name.lower()
return bulk_cached_fetch(
stream_name_to_cache_key,
fetch_streams_by_name,
[stream_name.lower() for stream_name in stream_names],
id_fetcher=stream_to_lower_name,
)
def get_huddle_recipient(user_profile_ids: Set[int]) -> Recipient:
# The caller should ensure that user_profile_ids includes
# the sender. Note that get_huddle hits the cache, and then
# we hit another cache to get the recipient. We may want to
# unify our caching strategy here.
huddle = get_huddle(list(user_profile_ids))
return huddle.recipient
def get_huddle_user_ids(recipient: Recipient) -> List[int]:
assert(recipient.type == Recipient.HUDDLE)
return Subscription.objects.filter(
recipient=recipient,
).order_by('user_profile_id').values_list('user_profile_id', flat=True)
def bulk_get_huddle_user_ids(recipients: List[Recipient]) -> Dict[int, List[int]]:
"""
Takes a list of huddle-type recipients, returns a dict
mapping recipient id to list of user ids in the huddle.
"""
assert all(recipient.type == Recipient.HUDDLE for recipient in recipients)
if not recipients:
return {}
subscriptions = Subscription.objects.filter(
recipient__in=recipients,
).order_by('user_profile_id')
result_dict: Dict[int, List[int]] = {}
for recipient in recipients:
result_dict[recipient.id] = [subscription.user_profile_id
for subscription in subscriptions
if subscription.recipient_id == recipient.id]
return result_dict
class AbstractMessage(models.Model):
sender: UserProfile = models.ForeignKey(UserProfile, on_delete=CASCADE)
recipient: Recipient = models.ForeignKey(Recipient, on_delete=CASCADE)
# The message's topic.
#
# Early versions of Zulip called this concept a "subject", as in an email
# "subject line", before changing to "topic" in 2013 (commit dac5a46fa).
# UI and user documentation now consistently say "topic". New APIs and
# new code should generally also say "topic".
#
# See also the `topic_name` method on `Message`.
subject: str = models.CharField(max_length=MAX_TOPIC_NAME_LENGTH, db_index=True)
content: str = models.TextField()
rendered_content: Optional[str] = models.TextField(null=True)
rendered_content_version: Optional[int] = models.IntegerField(null=True)
date_sent: datetime.datetime = models.DateTimeField('date sent', db_index=True)
sending_client: Client = models.ForeignKey(Client, on_delete=CASCADE)
last_edit_time: Optional[datetime.datetime] = models.DateTimeField(null=True)
# A JSON-encoded list of objects describing any past edits to this
# message, oldest first.
edit_history: Optional[str] = models.TextField(null=True)
has_attachment: bool = models.BooleanField(default=False, db_index=True)
has_image: bool = models.BooleanField(default=False, db_index=True)
has_link: bool = models.BooleanField(default=False, db_index=True)
class Meta:
abstract = True
def __str__(self) -> str:
display_recipient = get_display_recipient(self.recipient)
return f"<{self.__class__.__name__}: {display_recipient} / {self.subject} / {self.sender}>"
class ArchiveTransaction(models.Model):
id: int = models.AutoField(auto_created=True, primary_key=True, verbose_name='ID')
timestamp: datetime.datetime = models.DateTimeField(default=timezone_now, db_index=True)
# Marks if the data archived in this transaction has been restored:
restored: bool = models.BooleanField(default=False, db_index=True)
type: int = models.PositiveSmallIntegerField(db_index=True)
# Valid types:
RETENTION_POLICY_BASED = 1 # Archiving was executed due to automated retention policies
MANUAL = 2 # Archiving was run manually, via move_messages_to_archive function
# ForeignKey to the realm with which objects archived in this transaction are associated.
# If type is set to MANUAL, this should be null.
realm: Optional[Realm] = models.ForeignKey(Realm, null=True, on_delete=CASCADE)
def __str__(self) -> str:
return "ArchiveTransaction id: {id}, type: {type}, realm: {realm}, timestamp: {timestamp}".format(
id=self.id,
type="MANUAL" if self.type == self.MANUAL else "RETENTION_POLICY_BASED",
realm=self.realm.string_id if self.realm else None,
timestamp=self.timestamp,
)
class ArchivedMessage(AbstractMessage):
"""Used as a temporary holding place for deleted messages before they
are permanently deleted. This is an important part of a robust
'message retention' feature.
"""
id: int = models.AutoField(auto_created=True, primary_key=True, verbose_name='ID')
archive_transaction: ArchiveTransaction = models.ForeignKey(ArchiveTransaction, on_delete=CASCADE)
class Message(AbstractMessage):
id: int = models.AutoField(auto_created=True, primary_key=True, verbose_name='ID')
def topic_name(self) -> str:
"""
Please start using this helper to facilitate an
eventual switch over to a separate topic table.
"""
return self.subject
def set_topic_name(self, topic_name: str) -> None:
self.subject = topic_name
def is_stream_message(self) -> bool:
'''
Find out whether a message is a stream message by
looking up its recipient.type. TODO: Make this
an easier operation by denormalizing the message
type onto Message, either explicitly (message.type)
or implicitly (message.stream_id is not None).
'''
return self.recipient.type == Recipient.STREAM
def get_realm(self) -> Realm:
return self.sender.realm
def save_rendered_content(self) -> None:
self.save(update_fields=["rendered_content", "rendered_content_version"])
@staticmethod
def need_to_render_content(rendered_content: Optional[str],
rendered_content_version: Optional[int],
markdown_version: int) -> bool:
return (rendered_content is None or
rendered_content_version is None or
rendered_content_version < markdown_version)
def sent_by_human(self) -> bool:
"""Used to determine whether a message was sent by a full Zulip UI
style client (and thus whether the message should be treated
as sent by a human and automatically marked as read for the
sender). The purpose of this distinction is to ensure that
message sent to the user by e.g. a Google Calendar integration
using the user's own API key don't get marked as read
automatically.
"""
sending_client = self.sending_client.name.lower()
return (sending_client in ('zulipandroid', 'zulipios', 'zulipdesktop',
'zulipmobile', 'zulipelectron', 'zulipterminal', 'snipe',
'website', 'ios', 'android')) or (
'desktop app' in sending_client)
@staticmethod
def is_status_message(content: str, rendered_content: str) -> bool:
"""
"status messages" start with /me and have special rendering:
/me loves chocolate -> Full Name loves chocolate
"""
if content.startswith('/me '):
return True
return False
def get_context_for_message(message: Message) -> Sequence[Message]:
# TODO: Change return type to QuerySet[Message]
return Message.objects.filter(
recipient_id=message.recipient_id,
subject=message.subject,
id__lt=message.id,
date_sent__gt=message.date_sent - timedelta(minutes=15),
).order_by('-id')[:10]
post_save.connect(flush_message, sender=Message)
class AbstractSubMessage(models.Model):
# We can send little text messages that are associated with a regular
# Zulip message. These can be used for experimental widgets like embedded
# games, surveys, mini threads, etc. These are designed to be pretty
# generic in purpose.
sender: UserProfile = models.ForeignKey(UserProfile, on_delete=CASCADE)
msg_type: str = models.TextField()
content: str = models.TextField()
class Meta:
abstract = True
class SubMessage(AbstractSubMessage):
id: int = models.AutoField(auto_created=True, primary_key=True, verbose_name='ID')
message: Message = models.ForeignKey(Message, on_delete=CASCADE)
@staticmethod
def get_raw_db_rows(needed_ids: List[int]) -> List[Dict[str, Any]]:
fields = ['id', 'message_id', 'sender_id', 'msg_type', 'content']
query = SubMessage.objects.filter(message_id__in=needed_ids).values(*fields)
query = query.order_by('message_id', 'id')
return list(query)
class ArchivedSubMessage(AbstractSubMessage):
id: int = models.AutoField(auto_created=True, primary_key=True, verbose_name='ID')
message: ArchivedMessage = models.ForeignKey(ArchivedMessage, on_delete=CASCADE)
post_save.connect(flush_submessage, sender=SubMessage)
class Draft(models.Model):
""" Server-side storage model for storing drafts so that drafts can be synced across
multiple clients/devices.
"""
user_profile: UserProfile = models.ForeignKey(UserProfile, on_delete=models.CASCADE)
recipient: Optional[Recipient] = models.ForeignKey(Recipient, null=True, on_delete=models.SET_NULL)
topic: str = models.CharField(max_length=MAX_TOPIC_NAME_LENGTH, db_index=True)
content: str = models.TextField() # Length should not exceed MAX_MESSAGE_LENGTH
last_edit_time: datetime.datetime = models.DateTimeField(db_index=True)
def __str__(self) -> str:
return f"<{self.__class__.__name__}: {self.user_profile.email} / {self.id} / {self.last_edit_time}>"
def to_dict(self) -> Dict[str, Any]: # nocoverage # Will be added in a later commit.
if self.recipient is None:
_type = ""
to = []
elif self.recipient.type == Recipient.STREAM:
_type = "stream"
to = [self.recipient.type_id]
else:
_type = "private"
if self.recipient.type == Recipient.PERSONAL:
to = [self.recipient.type_id]
else:
to = []
for r in get_display_recipient(self.recipient):
assert(not isinstance(r, str)) # It will only be a string for streams
if not r["id"] == self.user_profile_id:
to.append(r["id"])
return {
"type": _type,
"to": to,
"topic": self.topic,
"content": self.content,
"timestamp": self.last_edit_time.timestamp(),
}
class AbstractReaction(models.Model):
"""For emoji reactions to messages (and potentially future reaction types).
Emoji are surprisingly complicated to implement correctly. For details
on how this subsystem works, see:
https://zulip.readthedocs.io/en/latest/subsystems/emoji.html
"""
user_profile: UserProfile = models.ForeignKey(UserProfile, on_delete=CASCADE)
# The user-facing name for an emoji reaction. With emoji aliases,
# there may be multiple accepted names for a given emoji; this
# field encodes which one the user selected.
emoji_name: str = models.TextField()
UNICODE_EMOJI = 'unicode_emoji'
REALM_EMOJI = 'realm_emoji'
ZULIP_EXTRA_EMOJI = 'zulip_extra_emoji'
REACTION_TYPES = ((UNICODE_EMOJI, _("Unicode emoji")),
(REALM_EMOJI, _("Custom emoji")),
(ZULIP_EXTRA_EMOJI, _("Zulip extra emoji")))
reaction_type: str = models.CharField(default=UNICODE_EMOJI, choices=REACTION_TYPES, max_length=30)
# A string that uniquely identifies a particular emoji. The format varies
# by type:
#
# * For Unicode emoji, a dash-separated hex encoding of the sequence of
# Unicode codepoints that define this emoji in the Unicode
# specification. For examples, see "non_qualified" or "unified" in the
# following data, with "non_qualified" taking precedence when both present:
# https://raw.githubusercontent.com/iamcal/emoji-data/master/emoji_pretty.json
#
# * For realm emoji (aka user uploaded custom emoji), the ID
# (in ASCII decimal) of the RealmEmoji object.
#
# * For "Zulip extra emoji" (like :zulip:), the filename of the emoji.
emoji_code: str = models.TextField()
class Meta:
abstract = True
unique_together = (("user_profile", "message", "emoji_name"),
("user_profile", "message", "reaction_type", "emoji_code"))
class Reaction(AbstractReaction):
id: int = models.AutoField(auto_created=True, primary_key=True, verbose_name='ID')
message: Message = models.ForeignKey(Message, on_delete=CASCADE)
@staticmethod
def get_raw_db_rows(needed_ids: List[int]) -> List[Dict[str, Any]]:
fields = ['message_id', 'emoji_name', 'emoji_code', 'reaction_type',
'user_profile__email', 'user_profile__id', 'user_profile__full_name']
return Reaction.objects.filter(message_id__in=needed_ids).values(*fields)
def __str__(self) -> str:
return f"{self.user_profile.email} / {self.message.id} / {self.emoji_name}"
class ArchivedReaction(AbstractReaction):
id: int = models.AutoField(auto_created=True, primary_key=True, verbose_name='ID')
message: ArchivedMessage = models.ForeignKey(ArchivedMessage, on_delete=CASCADE)
# Whenever a message is sent, for each user subscribed to the
# corresponding Recipient object, we add a row to the UserMessage
# table indicating that that user received that message. This table
# allows us to quickly query any user's last 1000 messages to generate
# the home view.
#
# Additionally, the flags field stores metadata like whether the user
# has read the message, starred or collapsed the message, was
# mentioned in the message, etc.
#
# UserMessage is the largest table in a Zulip installation, even
# though each row is only 4 integers.
class AbstractUserMessage(models.Model):
id: int = models.BigAutoField(primary_key=True)
user_profile: UserProfile = models.ForeignKey(UserProfile, on_delete=CASCADE)
# The order here is important! It's the order of fields in the bitfield.
ALL_FLAGS = [
'read',
'starred',
'collapsed',
'mentioned',
'wildcard_mentioned',
# These next 4 flags are from features that have since been removed.
'summarize_in_home',
'summarize_in_stream',
'force_expand',
'force_collapse',
# Whether the message contains any of the user's alert words.
'has_alert_word',
# The historical flag is used to mark messages which the user
# did not receive when they were sent, but later added to
# their history via e.g. starring the message. This is
# important accounting for the "Subscribed to stream" dividers.
'historical',
# Whether the message is a private message; this flag is a
# denormalization of message.recipient.type to support an
# efficient index on UserMessage for a user's private messages.
'is_private',
# Whether we've sent a push notification to the user's mobile
# devices for this message that has not been revoked.
'active_mobile_push_notification',
]
# Certain flags are used only for internal accounting within the
# Zulip backend, and don't make sense to expose to the API.
NON_API_FLAGS = {"is_private", "active_mobile_push_notification"}
# Certain additional flags are just set once when the UserMessage
# row is created.
NON_EDITABLE_FLAGS = {
# These flags are bookkeeping and don't make sense to edit.
"has_alert_word",
"mentioned",
"wildcard_mentioned",
"historical",
# Unused flags can't be edited.
"force_expand",
"force_collapse",
"summarize_in_home",
"summarize_in_stream",
}
flags: BitHandler = BitField(flags=ALL_FLAGS, default=0)
class Meta:
abstract = True
unique_together = ("user_profile", "message")
@staticmethod
def where_unread() -> str:
# Use this for Django ORM queries to access unread message.
# This custom SQL plays nice with our partial indexes. Grep
# the code for example usage.
return 'flags & 1 = 0'
@staticmethod
def where_starred() -> str:
# Use this for Django ORM queries to access starred messages.
# This custom SQL plays nice with our partial indexes. Grep
# the code for example usage.
#
# The key detail is that e.g.
# UserMessage.objects.filter(user_profile=user_profile, flags=UserMessage.flags.starred)
# will generate a query involving `flags & 2 = 2`, which doesn't match our index.
return 'flags & 2 <> 0'
@staticmethod
def where_active_push_notification() -> str:
# See where_starred for documentation.
return 'flags & 4096 <> 0'
def flags_list(self) -> List[str]:
flags = int(self.flags)
return self.flags_list_for_flags(flags)
@staticmethod
def flags_list_for_flags(val: int) -> List[str]:
'''
This function is highly optimized, because it actually slows down
sending messages in a naive implementation.
'''
flags = []
mask = 1
for flag in UserMessage.ALL_FLAGS:
if (val & mask) and flag not in AbstractUserMessage.NON_API_FLAGS:
flags.append(flag)
mask <<= 1
return flags
def __str__(self) -> str:
display_recipient = get_display_recipient(self.message.recipient)
return f"<{self.__class__.__name__}: {display_recipient} / {self.user_profile.email} ({self.flags_list()})>"
class UserMessage(AbstractUserMessage):
message: Message = models.ForeignKey(Message, on_delete=CASCADE)
def get_usermessage_by_message_id(user_profile: UserProfile, message_id: int) -> Optional[UserMessage]:
try:
return UserMessage.objects.select_related().get(user_profile=user_profile,
message__id=message_id)
except UserMessage.DoesNotExist:
return None
class ArchivedUserMessage(AbstractUserMessage):
"""Used as a temporary holding place for deleted UserMessages objects
before they are permanently deleted. This is an important part of
a robust 'message retention' feature.
"""
message: Message = models.ForeignKey(ArchivedMessage, on_delete=CASCADE)
class AbstractAttachment(models.Model):
file_name: str = models.TextField(db_index=True)
# path_id is a storage location agnostic representation of the path of the file.
# If the path of a file is http://localhost:9991/user_uploads/a/b/abc/temp_file.py
# then its path_id will be a/b/abc/temp_file.py.
path_id: str = models.TextField(db_index=True, unique=True)
owner: UserProfile = models.ForeignKey(UserProfile, on_delete=CASCADE)
realm: Optional[Realm] = models.ForeignKey(Realm, blank=True, null=True, on_delete=CASCADE)
create_time: datetime.datetime = models.DateTimeField(
default=timezone_now, db_index=True,
)
# Size of the uploaded file, in bytes
size: int = models.IntegerField()
# The two fields below lets us avoid looking up the corresponding
# messages/streams to check permissions before serving these files.
# Whether this attachment has been posted to a public stream, and
# thus should be available to all non-guest users in the
# organization (even if they weren't a recipient of a message
# linking to it).
is_realm_public: bool = models.BooleanField(default=False)
# Whether this attachment has been posted to a web-public stream,
# and thus should be available to everyone on the internet, even
# if the person isn't logged in.
is_web_public: bool = models.BooleanField(default=False)
class Meta:
abstract = True
def __str__(self) -> str:
return f"<{self.__class__.__name__}: {self.file_name}>"
class ArchivedAttachment(AbstractAttachment):
"""Used as a temporary holding place for deleted Attachment objects
before they are permanently deleted. This is an important part of
a robust 'message retention' feature.
"""
id: int = models.AutoField(auto_created=True, primary_key=True, verbose_name='ID')
messages: Manager = models.ManyToManyField(ArchivedMessage)
class Attachment(AbstractAttachment):
id: int = models.AutoField(auto_created=True, primary_key=True, verbose_name='ID')
messages: Manager = models.ManyToManyField(Message)
def is_claimed(self) -> bool:
return self.messages.count() > 0
def to_dict(self) -> Dict[str, Any]:
return {
'id': self.id,
'name': self.file_name,
'path_id': self.path_id,
'size': self.size,
# convert to JavaScript-style UNIX timestamp so we can take
# advantage of client timezones.
'create_time': int(time.mktime(self.create_time.timetuple()) * 1000),
'messages': [{
'id': m.id,
'date_sent': int(time.mktime(m.date_sent.timetuple()) * 1000),
} for m in self.messages.all()],
}
post_save.connect(flush_used_upload_space_cache, sender=Attachment)
post_delete.connect(flush_used_upload_space_cache, sender=Attachment)
def validate_attachment_request(user_profile: UserProfile, path_id: str) -> Optional[bool]:
try:
attachment = Attachment.objects.get(path_id=path_id)
except Attachment.DoesNotExist:
return None
if user_profile == attachment.owner:
# If you own the file, you can access it.
return True
if (attachment.is_realm_public and attachment.realm == user_profile.realm and
user_profile.can_access_public_streams()):
# Any user in the realm can access realm-public files
return True
messages = attachment.messages.all()
if UserMessage.objects.filter(user_profile=user_profile, message__in=messages).exists():
# If it was sent in a private message or private stream
# message, then anyone who received that message can access it.
return True
# The user didn't receive any of the messages that included this
# attachment. But they might still have access to it, if it was
# sent to a stream they are on where history is public to
# subscribers.
# These are subscriptions to a stream one of the messages was sent to
relevant_stream_ids = Subscription.objects.filter(
user_profile=user_profile,
active=True,
recipient__type=Recipient.STREAM,
recipient__in=[m.recipient_id for m in messages]).values_list("recipient__type_id", flat=True)
if len(relevant_stream_ids) == 0:
return False
return Stream.objects.filter(id__in=relevant_stream_ids,
history_public_to_subscribers=True).exists()
def get_old_unclaimed_attachments(weeks_ago: int) -> Sequence[Attachment]:
# TODO: Change return type to QuerySet[Attachment]
delta_weeks_ago = timezone_now() - datetime.timedelta(weeks=weeks_ago)
old_attachments = Attachment.objects.filter(messages=None, create_time__lt=delta_weeks_ago)
return old_attachments
class Subscription(models.Model):
id: int = models.AutoField(auto_created=True, primary_key=True, verbose_name='ID')
user_profile: UserProfile = models.ForeignKey(UserProfile, on_delete=CASCADE)
recipient: Recipient = models.ForeignKey(Recipient, on_delete=CASCADE)
# Whether the user has since unsubscribed. We mark Subscription
# objects as inactive, rather than deleting them, when a user
# unsubscribes, so we can preserve user customizations like
# notification settings, stream color, etc., if the user later
# resubscribes.
active: bool = models.BooleanField(default=True)
ROLE_STREAM_ADMINISTRATOR = 20
ROLE_MEMBER = 50
ROLE_TYPES = [
ROLE_STREAM_ADMINISTRATOR,
ROLE_MEMBER,
]
role: int = models.PositiveSmallIntegerField(default=ROLE_MEMBER, db_index=True)
# Whether this user had muted this stream.
is_muted: Optional[bool] = models.BooleanField(null=True, default=False)
DEFAULT_STREAM_COLOR = "#c2c2c2"
color: str = models.CharField(max_length=10, default=DEFAULT_STREAM_COLOR)
pin_to_top: bool = models.BooleanField(default=False)
# These fields are stream-level overrides for the user's default
# configuration for notification, configured in UserProfile. The
# default, None, means we just inherit the user-level default.
desktop_notifications: Optional[bool] = models.BooleanField(null=True, default=None)
audible_notifications: Optional[bool] = models.BooleanField(null=True, default=None)
push_notifications: Optional[bool] = models.BooleanField(null=True, default=None)
email_notifications: Optional[bool] = models.BooleanField(null=True, default=None)
wildcard_mentions_notify: Optional[bool] = models.BooleanField(null=True, default=None)
class Meta:
unique_together = ("user_profile", "recipient")
def __str__(self) -> str:
return f"<Subscription: {self.user_profile} -> {self.recipient}>"
@property
def is_stream_admin(self) -> bool:
return self.role == Subscription.ROLE_STREAM_ADMINISTRATOR
# Subscription fields included whenever a Subscription object is provided to
# Zulip clients via the API. A few details worth noting:
# * These fields will generally be merged with Stream.API_FIELDS
# data about the stream.
# * "user_profile" is usually implied as full API access to Subscription
# is primarily done for the current user; API access to other users'
# subscriptions is generally limited to boolean yes/no.
# * "id" and "recipient_id" are not included as they are not used
# in the Zulip API; it's an internal implementation detail.
# Subscription objects are always looked up in the API via
# (user_profile, stream) pairs.
# * "active" is often excluded in API use cases where it is implied.
# * "is_muted" often needs to be copied to not "in_home_view" for
# backwards-compatibility.
API_FIELDS = [
"active",
"color",
"is_muted",
"pin_to_top",
"audible_notifications",
"desktop_notifications",
"email_notifications",
"push_notifications",
"wildcard_mentions_notify",
"role",
]
@cache_with_key(user_profile_by_id_cache_key, timeout=3600*24*7)
def get_user_profile_by_id(uid: int) -> UserProfile:
return UserProfile.objects.select_related().get(id=uid)
@cache_with_key(user_profile_by_email_cache_key, timeout=3600*24*7)
def get_user_profile_by_email(email: str) -> UserProfile:
"""This function is intended to be used by our unit tests and for
manual manage.py shell work; robust code must use get_user or
get_user_by_delivery_email instead, because Zulip supports
multiple users with a given (delivery) email address existing on a
single server (in different realms).
"""
return UserProfile.objects.select_related().get(delivery_email__iexact=email.strip())
@cache_with_key(user_profile_by_api_key_cache_key, timeout=3600*24*7)
def maybe_get_user_profile_by_api_key(api_key: str) -> Optional[UserProfile]:
try:
return UserProfile.objects.select_related().get(api_key=api_key)
except UserProfile.DoesNotExist:
# We will cache failed lookups with None. The
# use case here is that broken API clients may
# continually ask for the same wrong API key, and
# we want to handle that as quickly as possible.
return None
def get_user_profile_by_api_key(api_key: str) -> UserProfile:
user_profile = maybe_get_user_profile_by_api_key(api_key)
if user_profile is None:
raise UserProfile.DoesNotExist()
return user_profile
def get_user_by_delivery_email(email: str, realm: Realm) -> UserProfile:
"""Fetches a user given their delivery email. For use in
authentication/registration contexts. Do not use for user-facing
views (e.g. Zulip API endpoints) as doing so would violate the
EMAIL_ADDRESS_VISIBILITY_ADMINS security model. Use get_user in
those code paths.
"""
return UserProfile.objects.select_related().get(
delivery_email__iexact=email.strip(), realm=realm)
def get_users_by_delivery_email(emails: Set[str], realm: Realm) -> QuerySet:
"""This is similar to get_user_by_delivery_email, and
it has the same security caveats. It gets multiple
users and returns a QuerySet, since most callers
will only need two or three fields.
If you are using this to get large UserProfile objects, you are
probably making a mistake, but if you must,
then use `select_related`.
"""
'''
Django doesn't support delivery_email__iexact__in, so
we simply OR all the filters that we'd do for the
one-email case.
'''
email_filter = Q()
for email in emails:
email_filter |= Q(delivery_email__iexact=email.strip())
return UserProfile.objects.filter(realm=realm).filter(email_filter)
@cache_with_key(user_profile_cache_key, timeout=3600*24*7)
def get_user(email: str, realm: Realm) -> UserProfile:
"""Fetches the user by its visible-to-other users username (in the
`email` field). For use in API contexts; do not use in
authentication/registration contexts as doing so will break
authentication in organizations using
EMAIL_ADDRESS_VISIBILITY_ADMINS. In those code paths, use
get_user_by_delivery_email.
"""
return UserProfile.objects.select_related().get(email__iexact=email.strip(), realm=realm)
def get_active_user(email: str, realm: Realm) -> UserProfile:
"""Variant of get_user_by_email that excludes deactivated users.
See get_user docstring for important usage notes."""
user_profile = get_user(email, realm)
if not user_profile.is_active:
raise UserProfile.DoesNotExist()
return user_profile
def get_user_profile_by_id_in_realm(uid: int, realm: Realm) -> UserProfile:
return UserProfile.objects.select_related().get(id=uid, realm=realm)
def get_active_user_profile_by_id_in_realm(uid: int, realm: Realm) -> UserProfile:
user_profile = get_user_profile_by_id_in_realm(uid, realm)
if not user_profile.is_active:
raise UserProfile.DoesNotExist()
return user_profile
def get_user_including_cross_realm(email: str, realm: Optional[Realm]=None) -> UserProfile:
if is_cross_realm_bot_email(email):
return get_system_bot(email)
assert realm is not None
return get_user(email, realm)
@cache_with_key(bot_profile_cache_key, timeout=3600*24*7)
def get_system_bot(email: str) -> UserProfile:
return UserProfile.objects.select_related().get(email__iexact=email.strip())
def get_user_by_id_in_realm_including_cross_realm(
uid: int,
realm: Optional[Realm],
) -> UserProfile:
user_profile = get_user_profile_by_id(uid)
if user_profile.realm == realm:
return user_profile
# Note: This doesn't validate whether the `realm` passed in is
# None/invalid for the CROSS_REALM_BOT_EMAILS case.
if user_profile.delivery_email in settings.CROSS_REALM_BOT_EMAILS:
return user_profile
raise UserProfile.DoesNotExist()
@cache_with_key(realm_user_dicts_cache_key, timeout=3600*24*7)
def get_realm_user_dicts(realm_id: int) -> List[Dict[str, Any]]:
return UserProfile.objects.filter(
realm_id=realm_id,
).values(*realm_user_dict_fields)
@cache_with_key(active_user_ids_cache_key, timeout=3600*24*7)
def active_user_ids(realm_id: int) -> List[int]:
query = UserProfile.objects.filter(
realm_id=realm_id,
is_active=True,
).values_list('id', flat=True)
return list(query)
@cache_with_key(active_non_guest_user_ids_cache_key, timeout=3600*24*7)
def active_non_guest_user_ids(realm_id: int) -> List[int]:
query = UserProfile.objects.filter(
realm_id=realm_id,
is_active=True,
).exclude(
role=UserProfile.ROLE_GUEST,
).values_list('id', flat=True)
return list(query)
def get_source_profile(email: str, string_id: str) -> Optional[UserProfile]:
try:
return get_user_by_delivery_email(email, get_realm(string_id))
except (Realm.DoesNotExist, UserProfile.DoesNotExist):
return None
@cache_with_key(bot_dicts_in_realm_cache_key, timeout=3600*24*7)
def get_bot_dicts_in_realm(realm: Realm) -> List[Dict[str, Any]]:
return UserProfile.objects.filter(realm=realm, is_bot=True).values(*bot_dict_fields)
def is_cross_realm_bot_email(email: str) -> bool:
return email.lower() in settings.CROSS_REALM_BOT_EMAILS
# The Huddle class represents a group of individuals who have had a
# Group Private Message conversation together. The actual membership
# of the Huddle is stored in the Subscription table just like with
# Streams, and a hash of that list is stored in the huddle_hash field
# below, to support efficiently mapping from a set of users to the
# corresponding Huddle object.
class Huddle(models.Model):
id: int = models.AutoField(auto_created=True, primary_key=True, verbose_name='ID')
# TODO: We should consider whether using
# CommaSeparatedIntegerField would be better.
huddle_hash: str = models.CharField(max_length=40, db_index=True, unique=True)
# Foreign key to the Recipient object for this Huddle.
recipient = models.ForeignKey(Recipient, null=True, on_delete=models.SET_NULL)
def get_huddle_hash(id_list: List[int]) -> str:
id_list = sorted(set(id_list))
hash_key = ",".join(str(x) for x in id_list)
return make_safe_digest(hash_key)
def huddle_hash_cache_key(huddle_hash: str) -> str:
return f"huddle_by_hash:{huddle_hash}"
def get_huddle(id_list: List[int]) -> Huddle:
huddle_hash = get_huddle_hash(id_list)
return get_huddle_backend(huddle_hash, id_list)
@cache_with_key(lambda huddle_hash, id_list: huddle_hash_cache_key(huddle_hash), timeout=3600*24*7)
def get_huddle_backend(huddle_hash: str, id_list: List[int]) -> Huddle:
with transaction.atomic():
(huddle, created) = Huddle.objects.get_or_create(huddle_hash=huddle_hash)
if created:
recipient = Recipient.objects.create(type_id=huddle.id,
type=Recipient.HUDDLE)
huddle.recipient = recipient
huddle.save(update_fields=["recipient"])
subs_to_create = [Subscription(recipient=recipient,
user_profile_id=user_profile_id)
for user_profile_id in id_list]
Subscription.objects.bulk_create(subs_to_create)
return huddle
class UserActivity(models.Model):
id: int = models.AutoField(auto_created=True, primary_key=True, verbose_name='ID')
user_profile: UserProfile = models.ForeignKey(UserProfile, on_delete=CASCADE)
client: Client = models.ForeignKey(Client, on_delete=CASCADE)
query: str = models.CharField(max_length=50, db_index=True)
count: int = models.IntegerField()
last_visit: datetime.datetime = models.DateTimeField('last visit')
class Meta:
unique_together = ("user_profile", "client", "query")
class UserActivityInterval(models.Model):
MIN_INTERVAL_LENGTH = datetime.timedelta(minutes=15)
id: int = models.AutoField(auto_created=True, primary_key=True, verbose_name='ID')
user_profile: UserProfile = models.ForeignKey(UserProfile, on_delete=CASCADE)
start: datetime.datetime = models.DateTimeField('start time', db_index=True)
end: datetime.datetime = models.DateTimeField('end time', db_index=True)
class UserPresence(models.Model):
"""A record from the last time we heard from a given user on a given client.
This is a tricky subsystem, because it is highly optimized. See the docs:
https://zulip.readthedocs.io/en/latest/subsystems/presence.html
"""
class Meta:
unique_together = ("user_profile", "client")
index_together = [
("realm", "timestamp"),
]
id: int = models.AutoField(auto_created=True, primary_key=True, verbose_name='ID')
user_profile: UserProfile = models.ForeignKey(UserProfile, on_delete=CASCADE)
realm: Realm = models.ForeignKey(Realm, on_delete=CASCADE)
client: Client = models.ForeignKey(Client, on_delete=CASCADE)
# The time we heard this update from the client.
timestamp: datetime.datetime = models.DateTimeField('presence changed')
# The user was actively using this Zulip client as of `timestamp` (i.e.,
# they had interacted with the client recently). When the timestamp is
# itself recent, this is the green "active" status in the webapp.
ACTIVE = 1
# There had been no user activity (keyboard/mouse/etc.) on this client
# recently. So the client was online at the specified time, but it
# could be the user's desktop which they were away from. Displayed as
# orange/idle if the timestamp is current.
IDLE = 2
# Information from the client about the user's recent interaction with
# that client, as of `timestamp`. Possible values above.
#
# There is no "inactive" status, because that is encoded by the
# timestamp being old.
status: int = models.PositiveSmallIntegerField(default=ACTIVE)
@staticmethod
def status_to_string(status: int) -> str:
if status == UserPresence.ACTIVE:
return 'active'
elif status == UserPresence.IDLE:
return 'idle'
else: # nocoverage # TODO: Add a presence test to cover this.
raise ValueError(f'Unknown status: {status}')
@staticmethod
def to_presence_dict(client_name: str, status: int, dt: datetime.datetime, push_enabled: bool=False,
has_push_devices: bool=False) -> Dict[str, Any]:
presence_val = UserPresence.status_to_string(status)
timestamp = datetime_to_timestamp(dt)
return dict(
client=client_name,
status=presence_val,
timestamp=timestamp,
pushable=(push_enabled and has_push_devices),
)
def to_dict(self) -> Dict[str, Any]:
return UserPresence.to_presence_dict(
self.client.name,
self.status,
self.timestamp,
)
@staticmethod
def status_from_string(status: str) -> Optional[int]:
if status == 'active':
status_val: Optional[int] = UserPresence.ACTIVE # See https://github.com/python/mypy/issues/2611
elif status == 'idle':
status_val = UserPresence.IDLE
else:
status_val = None
return status_val
class UserStatus(models.Model):
id: int = models.AutoField(auto_created=True, primary_key=True, verbose_name='ID')
user_profile: UserProfile = models.OneToOneField(UserProfile, on_delete=CASCADE)
timestamp: datetime.datetime = models.DateTimeField()
client: Client = models.ForeignKey(Client, on_delete=CASCADE)
NORMAL = 0
AWAY = 1
status: int = models.PositiveSmallIntegerField(default=NORMAL)
status_text: str = models.CharField(max_length=255, default='')
class DefaultStream(models.Model):
id: int = models.AutoField(auto_created=True, primary_key=True, verbose_name='ID')
realm: Realm = models.ForeignKey(Realm, on_delete=CASCADE)
stream: Stream = models.ForeignKey(Stream, on_delete=CASCADE)
class Meta:
unique_together = ("realm", "stream")
class DefaultStreamGroup(models.Model):
MAX_NAME_LENGTH = 60
id: int = models.AutoField(auto_created=True, primary_key=True, verbose_name='ID')
name: str = models.CharField(max_length=MAX_NAME_LENGTH, db_index=True)
realm: Realm = models.ForeignKey(Realm, on_delete=CASCADE)
streams: Manager = models.ManyToManyField('Stream')
description: str = models.CharField(max_length=1024, default='')
class Meta:
unique_together = ("realm", "name")
def to_dict(self) -> Dict[str, Any]:
return dict(name=self.name,
id=self.id,
description=self.description,
streams=[stream.to_dict() for stream in self.streams.all()])
def get_default_stream_groups(realm: Realm) -> List[DefaultStreamGroup]:
return DefaultStreamGroup.objects.filter(realm=realm)
class AbstractScheduledJob(models.Model):
scheduled_timestamp: datetime.datetime = models.DateTimeField(db_index=True)
# JSON representation of arguments to consumer
data: str = models.TextField()
realm: Realm = models.ForeignKey(Realm, on_delete=CASCADE)
class Meta:
abstract = True
class ScheduledEmail(AbstractScheduledJob):
# Exactly one of users or address should be set. These are
# duplicate values, used to efficiently filter the set of
# ScheduledEmails for use in clear_scheduled_emails; the
# recipients used for actually sending messages are stored in the
# data field of AbstractScheduledJob.
id: int = models.AutoField(auto_created=True, primary_key=True, verbose_name='ID')
users: Manager = models.ManyToManyField(UserProfile)
# Just the address part of a full "name <address>" email address
address: Optional[str] = models.EmailField(null=True, db_index=True)
# Valid types are below
WELCOME = 1
DIGEST = 2
INVITATION_REMINDER = 3
type: int = models.PositiveSmallIntegerField()
def __str__(self) -> str:
return f"<ScheduledEmail: {self.type} {self.address or list(self.users.all())} {self.scheduled_timestamp}>"
class MissedMessageEmailAddress(models.Model):
EXPIRY_SECONDS = 60 * 60 * 24 * 5
ALLOWED_USES = 1
id: int = models.AutoField(auto_created=True, primary_key=True, verbose_name='ID')
message: Message = models.ForeignKey(Message, on_delete=CASCADE)
user_profile: UserProfile = models.ForeignKey(UserProfile, on_delete=CASCADE)
email_token: str = models.CharField(max_length=34, unique=True, db_index=True)
# Timestamp of when the missed message address generated.
# The address is valid until timestamp + EXPIRY_SECONDS.
timestamp: datetime.datetime = models.DateTimeField(db_index=True, default=timezone_now)
times_used: int = models.PositiveIntegerField(default=0, db_index=True)
def __str__(self) -> str:
return settings.EMAIL_GATEWAY_PATTERN % (self.email_token,)
def is_usable(self) -> bool:
not_expired = timezone_now() <= self.timestamp + timedelta(seconds=self.EXPIRY_SECONDS)
has_uses_left = self.times_used < self.ALLOWED_USES
return has_uses_left and not_expired
def increment_times_used(self) -> None:
self.times_used += 1
self.save(update_fields=["times_used"])
class ScheduledMessage(models.Model):
id: int = models.AutoField(auto_created=True, primary_key=True, verbose_name='ID')
sender: UserProfile = models.ForeignKey(UserProfile, on_delete=CASCADE)
recipient: Recipient = models.ForeignKey(Recipient, on_delete=CASCADE)
subject: str = models.CharField(max_length=MAX_TOPIC_NAME_LENGTH)
content: str = models.TextField()
sending_client: Client = models.ForeignKey(Client, on_delete=CASCADE)
stream: Optional[Stream] = models.ForeignKey(Stream, null=True, on_delete=CASCADE)
realm: Realm = models.ForeignKey(Realm, on_delete=CASCADE)
scheduled_timestamp: datetime.datetime = models.DateTimeField(db_index=True)
delivered: bool = models.BooleanField(default=False)
SEND_LATER = 1
REMIND = 2
DELIVERY_TYPES = (
(SEND_LATER, 'send_later'),
(REMIND, 'remind'),
)
delivery_type: int = models.PositiveSmallIntegerField(
choices=DELIVERY_TYPES, default=SEND_LATER,
)
def topic_name(self) -> str:
return self.subject
def set_topic_name(self, topic_name: str) -> None:
self.subject = topic_name
def __str__(self) -> str:
display_recipient = get_display_recipient(self.recipient)
return f"<ScheduledMessage: {display_recipient} {self.subject} {self.sender} {self.scheduled_timestamp}>"
EMAIL_TYPES = {
'followup_day1': ScheduledEmail.WELCOME,
'followup_day2': ScheduledEmail.WELCOME,
'digest': ScheduledEmail.DIGEST,
'invitation_reminder': ScheduledEmail.INVITATION_REMINDER,
}
class AbstractRealmAuditLog(models.Model):
"""Defines fields common to RealmAuditLog and RemoteRealmAuditLog."""
event_time: datetime.datetime = models.DateTimeField(db_index=True)
# If True, event_time is an overestimate of the true time. Can be used
# by migrations when introducing a new event_type.
backfilled: bool = models.BooleanField(default=False)
# Keys within extra_data, when extra_data is a json dict. Keys are strings because
# json keys must always be strings.
OLD_VALUE = '1'
NEW_VALUE = '2'
ROLE_COUNT = '10'
ROLE_COUNT_HUMANS = '11'
ROLE_COUNT_BOTS = '12'
extra_data: Optional[str] = models.TextField(null=True)
# Event types
USER_CREATED = 101
USER_ACTIVATED = 102
USER_DEACTIVATED = 103
USER_REACTIVATED = 104
USER_ROLE_CHANGED = 105
USER_SOFT_ACTIVATED = 120
USER_SOFT_DEACTIVATED = 121
USER_PASSWORD_CHANGED = 122
USER_AVATAR_SOURCE_CHANGED = 123
USER_FULL_NAME_CHANGED = 124
USER_EMAIL_CHANGED = 125
USER_TOS_VERSION_CHANGED = 126
USER_API_KEY_CHANGED = 127
USER_BOT_OWNER_CHANGED = 128
USER_DEFAULT_SENDING_STREAM_CHANGED = 129
USER_DEFAULT_REGISTER_STREAM_CHANGED = 130
USER_DEFAULT_ALL_PUBLIC_STREAMS_CHANGED = 131
USER_NOTIFICATION_SETTINGS_CHANGED = 132
REALM_DEACTIVATED = 201
REALM_REACTIVATED = 202
REALM_SCRUBBED = 203
REALM_PLAN_TYPE_CHANGED = 204
REALM_LOGO_CHANGED = 205
REALM_EXPORTED = 206
REALM_PROPERTY_CHANGED = 207
REALM_ICON_SOURCE_CHANGED = 208
SUBSCRIPTION_CREATED = 301
SUBSCRIPTION_ACTIVATED = 302
SUBSCRIPTION_DEACTIVATED = 303
SUBSCRIPTION_PROPERTY_CHANGED = 304
STRIPE_CUSTOMER_CREATED = 401
STRIPE_CARD_CHANGED = 402
STRIPE_PLAN_CHANGED = 403
STRIPE_PLAN_QUANTITY_RESET = 404
CUSTOMER_CREATED = 501
CUSTOMER_PLAN_CREATED = 502
CUSTOMER_SWITCHED_FROM_MONTHLY_TO_ANNUAL_PLAN = 503
STREAM_CREATED = 601
STREAM_DEACTIVATED = 602
STREAM_NAME_CHANGED = 603
event_type: int = models.PositiveSmallIntegerField()
# event_types synced from on-prem installations to Zulip Cloud when
# billing for mobile push notifications is enabled. Every billing
# event_type should have ROLE_COUNT populated in extra_data.
SYNCED_BILLING_EVENTS = [
USER_CREATED, USER_ACTIVATED, USER_DEACTIVATED, USER_REACTIVATED, USER_ROLE_CHANGED,
REALM_DEACTIVATED, REALM_REACTIVATED]
class Meta:
abstract = True
class RealmAuditLog(AbstractRealmAuditLog):
"""
RealmAuditLog tracks important changes to users, streams, and
realms in Zulip. It is intended to support both
debugging/introspection (e.g. determining when a user's left a
given stream?) as well as help with some database migrations where
we might be able to do a better data backfill with it. Here are a
few key details about how this works:
* acting_user is the user who initiated the state change
* modified_user (if present) is the user being modified
* modified_stream (if present) is the stream being modified
For example:
* When a user subscribes another user to a stream, modified_user,
acting_user, and modified_stream will all be present and different.
* When an administrator changes an organization's realm icon,
acting_user is that administrator and both modified_user and
modified_stream will be None.
"""
id: int = models.AutoField(auto_created=True, primary_key=True, verbose_name='ID')
realm: Realm = models.ForeignKey(Realm, on_delete=CASCADE)
acting_user: Optional[UserProfile] = models.ForeignKey(
UserProfile, null=True, related_name="+", on_delete=CASCADE,
)
modified_user: Optional[UserProfile] = models.ForeignKey(
UserProfile, null=True, related_name="+", on_delete=CASCADE,
)
modified_stream: Optional[Stream] = models.ForeignKey(
Stream, null=True, on_delete=CASCADE,
)
event_last_message_id: Optional[int] = models.IntegerField(null=True)
def __str__(self) -> str:
if self.modified_user is not None:
return f"<RealmAuditLog: {self.modified_user} {self.event_type} {self.event_time} {self.id}>"
if self.modified_stream is not None:
return f"<RealmAuditLog: {self.modified_stream} {self.event_type} {self.event_time} {self.id}>"
return f"<RealmAuditLog: {self.realm} {self.event_type} {self.event_time} {self.id}>"
class UserHotspot(models.Model):
id: int = models.AutoField(auto_created=True, primary_key=True, verbose_name='ID')
user: UserProfile = models.ForeignKey(UserProfile, on_delete=CASCADE)
hotspot: str = models.CharField(max_length=30)
timestamp: datetime.datetime = models.DateTimeField(default=timezone_now)
class Meta:
unique_together = ("user", "hotspot")
def check_valid_user_ids(realm_id: int, val: object,
allow_deactivated: bool=False) -> List[int]:
user_ids = check_list(check_int)("User IDs", val)
realm = Realm.objects.get(id=realm_id)
for user_id in user_ids:
# TODO: Structurally, we should be doing a bulk fetch query to
# get the users here, not doing these in a loop. But because
# this is a rarely used feature and likely to never have more
# than a handful of users, it's probably mostly OK.
try:
user_profile = get_user_profile_by_id_in_realm(user_id, realm)
except UserProfile.DoesNotExist:
raise ValidationError(_('Invalid user ID: %d') % (user_id))
if not allow_deactivated:
if not user_profile.is_active:
raise ValidationError(_('User with ID %d is deactivated') % (user_id))
if (user_profile.is_bot):
raise ValidationError(_('User with ID %d is a bot') % (user_id))
return user_ids
class CustomProfileField(models.Model):
"""Defines a form field for the per-realm custom profile fields feature.
See CustomProfileFieldValue for an individual user's values for one of
these fields.
"""
HINT_MAX_LENGTH = 80
NAME_MAX_LENGTH = 40
id: int = models.AutoField(auto_created=True, primary_key=True, verbose_name='ID')
realm: Realm = models.ForeignKey(Realm, on_delete=CASCADE)
name: str = models.CharField(max_length=NAME_MAX_LENGTH)
hint: Optional[str] = models.CharField(max_length=HINT_MAX_LENGTH, default='', null=True)
order: int = models.IntegerField(default=0)
SHORT_TEXT = 1
LONG_TEXT = 2
CHOICE = 3
DATE = 4
URL = 5
USER = 6
EXTERNAL_ACCOUNT = 7
# These are the fields whose validators require more than var_name
# and value argument. i.e. CHOICE require field_data, USER require
# realm as argument.
CHOICE_FIELD_TYPE_DATA: List[ExtendedFieldElement] = [
(CHOICE, str(_('List of options')), validate_choice_field, str, "CHOICE"),
]
USER_FIELD_TYPE_DATA: List[UserFieldElement] = [
(USER, str(_('Person picker')), check_valid_user_ids, ast.literal_eval, "USER"),
]
CHOICE_FIELD_VALIDATORS: Dict[int, ExtendedValidator] = {
item[0]: item[2] for item in CHOICE_FIELD_TYPE_DATA
}
USER_FIELD_VALIDATORS: Dict[int, RealmUserValidator] = {
item[0]: item[2] for item in USER_FIELD_TYPE_DATA
}
FIELD_TYPE_DATA: List[FieldElement] = [
# Type, Display Name, Validator, Converter, Keyword
(SHORT_TEXT, str(_('Short text')), check_short_string, str, "SHORT_TEXT"),
(LONG_TEXT, str(_('Long text')), check_long_string, str, "LONG_TEXT"),
(DATE, str(_('Date picker')), check_date, str, "DATE"),
(URL, str(_('Link')), check_url, str, "URL"),
(EXTERNAL_ACCOUNT, str(_('External account')), check_short_string, str, "EXTERNAL_ACCOUNT"),
]
ALL_FIELD_TYPES = [*FIELD_TYPE_DATA, *CHOICE_FIELD_TYPE_DATA, *USER_FIELD_TYPE_DATA]
FIELD_VALIDATORS: Dict[int, Validator[Union[int, str, List[int]]]] = {item[0]: item[2] for item in FIELD_TYPE_DATA}
FIELD_CONVERTERS: Dict[int, Callable[[Any], Any]] = {item[0]: item[3] for item in ALL_FIELD_TYPES}
FIELD_TYPE_CHOICES: List[Tuple[int, str]] = [(item[0], item[1]) for item in ALL_FIELD_TYPES]
FIELD_TYPE_CHOICES_DICT: Dict[str, Dict[str, Union[str, int]]] = {
item[4]: {"id": item[0], "name": item[1]} for item in ALL_FIELD_TYPES
}
field_type: int = models.PositiveSmallIntegerField(
choices=FIELD_TYPE_CHOICES, default=SHORT_TEXT,
)
# A JSON blob of any additional data needed to define the field beyond
# type/name/hint.
#
# The format depends on the type. Field types SHORT_TEXT, LONG_TEXT,
# DATE, URL, and USER leave this null. Fields of type CHOICE store the
# choices' descriptions.
#
# Note: There is no performance overhead of using TextField in PostgreSQL.
# See https://www.postgresql.org/docs/9.0/static/datatype-character.html
field_data: Optional[str] = models.TextField(default='', null=True)
class Meta:
unique_together = ('realm', 'name')
def as_dict(self) -> ProfileDataElementBase:
return {
'id': self.id,
'name': self.name,
'type': self.field_type,
'hint': self.hint,
'field_data': self.field_data,
'order': self.order,
}
def is_renderable(self) -> bool:
if self.field_type in [CustomProfileField.SHORT_TEXT, CustomProfileField.LONG_TEXT]:
return True
return False
def __str__(self) -> str:
return f"<CustomProfileField: {self.realm} {self.name} {self.field_type} {self.order}>"
def custom_profile_fields_for_realm(realm_id: int) -> List[CustomProfileField]:
return CustomProfileField.objects.filter(realm=realm_id).order_by('order')
class CustomProfileFieldValue(models.Model):
id: int = models.AutoField(auto_created=True, primary_key=True, verbose_name='ID')
user_profile: UserProfile = models.ForeignKey(UserProfile, on_delete=CASCADE)
field: CustomProfileField = models.ForeignKey(CustomProfileField, on_delete=CASCADE)
value: str = models.TextField()
rendered_value: Optional[str] = models.TextField(null=True, default=None)
class Meta:
unique_together = ('user_profile', 'field')
def __str__(self) -> str:
return f"<CustomProfileFieldValue: {self.user_profile} {self.field} {self.value}>"
# Interfaces for services
# They provide additional functionality like parsing message to obtain query url, data to be sent to url,
# and parsing the response.
GENERIC_INTERFACE = 'GenericService'
SLACK_INTERFACE = 'SlackOutgoingWebhookService'
# A Service corresponds to either an outgoing webhook bot or an embedded bot.
# The type of Service is determined by the bot_type field of the referenced
# UserProfile.
#
# If the Service is an outgoing webhook bot:
# - name is any human-readable identifier for the Service
# - base_url is the address of the third-party site
# - token is used for authentication with the third-party site
#
# If the Service is an embedded bot:
# - name is the canonical name for the type of bot (e.g. 'xkcd' for an instance
# of the xkcd bot); multiple embedded bots can have the same name, but all
# embedded bots with the same name will run the same code
# - base_url and token are currently unused
class Service(models.Model):
id: int = models.AutoField(auto_created=True, primary_key=True, verbose_name='ID')
name: str = models.CharField(max_length=UserProfile.MAX_NAME_LENGTH)
# Bot user corresponding to the Service. The bot_type of this user
# deterines the type of service. If non-bot services are added later,
# user_profile can also represent the owner of the Service.
user_profile: UserProfile = models.ForeignKey(UserProfile, on_delete=CASCADE)
base_url: str = models.TextField()
token: str = models.TextField()
# Interface / API version of the service.
interface: int = models.PositiveSmallIntegerField(default=1)
# Valid interfaces are {generic, zulip_bot_service, slack}
GENERIC = 1
SLACK = 2
ALLOWED_INTERFACE_TYPES = [
GENERIC,
SLACK,
]
# N.B. If we used Django's choice=... we would get this for free (kinda)
_interfaces: Dict[int, str] = {
GENERIC: GENERIC_INTERFACE,
SLACK: SLACK_INTERFACE,
}
def interface_name(self) -> str:
# Raises KeyError if invalid
return self._interfaces[self.interface]
def get_bot_services(user_profile_id: int) -> List[Service]:
return list(Service.objects.filter(user_profile__id=user_profile_id))
def get_service_profile(user_profile_id: int, service_name: str) -> Service:
return Service.objects.get(user_profile__id=user_profile_id, name=service_name)
class BotStorageData(models.Model):
id: int = models.AutoField(auto_created=True, primary_key=True, verbose_name='ID')
bot_profile: UserProfile = models.ForeignKey(UserProfile, on_delete=CASCADE)
key: str = models.TextField(db_index=True)
value: str = models.TextField()
class Meta:
unique_together = ("bot_profile", "key")
class BotConfigData(models.Model):
id: int = models.AutoField(auto_created=True, primary_key=True, verbose_name='ID')
bot_profile: UserProfile = models.ForeignKey(UserProfile, on_delete=CASCADE)
key: str = models.TextField(db_index=True)
value: str = models.TextField()
class Meta:
unique_together = ("bot_profile", "key")
class InvalidFakeEmailDomain(Exception):
pass
def get_fake_email_domain() -> str:
try:
# Check that the fake email domain can be used to form valid email addresses.
validate_email("bot@" + settings.FAKE_EMAIL_DOMAIN)
except ValidationError:
raise InvalidFakeEmailDomain(settings.FAKE_EMAIL_DOMAIN + ' is not a valid domain. '
'Consider setting the FAKE_EMAIL_DOMAIN setting.')
return settings.FAKE_EMAIL_DOMAIN
class AlertWord(models.Model):
# Realm isn't necessary, but it's a nice denormalization. Users
# never move to another realm, so it's static, and having Realm
# here optimizes the main query on this table, which is fetching
# all the alert words in a realm.
id: int = models.AutoField(auto_created=True, primary_key=True, verbose_name='ID')
realm: Realm = models.ForeignKey(Realm, db_index=True, on_delete=CASCADE)
user_profile: UserProfile = models.ForeignKey(UserProfile, on_delete=CASCADE)
# Case-insensitive name for the alert word.
word: str = models.TextField()
class Meta:
unique_together = ("user_profile", "word")
def flush_realm_alert_words(realm: Realm) -> None:
cache_delete(realm_alert_words_cache_key(realm))
cache_delete(realm_alert_words_automaton_cache_key(realm))
def flush_alert_word(sender: Any, **kwargs: Any) -> None:
realm = kwargs['instance'].realm
flush_realm_alert_words(realm)
post_save.connect(flush_alert_word, sender=AlertWord)
post_delete.connect(flush_alert_word, sender=AlertWord)
|
brainwane/zulip
|
zerver/models.py
|
Python
|
apache-2.0
| 128,981
|
[
"VisIt"
] |
f86c70a5bb374a148889db0b2bf2a7d994902dbb43bbcb186099b9372beb13c7
|
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Yambo(AutotoolsPackage):
"""Yambo is a FORTRAN/C code for Many-Body calculations in solid
state and molecular physics.
Yambo relies on the Kohn-Sham wavefunctions generated by two DFT
public codes: abinit, and PWscf. The code was originally developed
in the Condensed Matter Theoretical Group of the Physics Department
at the University of Rome "Tor Vergata" by Andrea Marini. Previous
to its release under the GPL license, yambo was known as SELF.
"""
homepage = "http://www.yambo-code.org/index.php"
url = "https://github.com/yambo-code/yambo/archive/4.2.2.tar.gz"
version('4.2.2', sha256='86b4ebe679387233266aba49948246c85a32b1e6840d024f162962bd0112448c')
version('4.2.1', sha256='8ccd0ca75cc32d9266d4a37edd2a7396cf5038f3a68be07c0f0f77d1afc72bdc')
version('4.2.0', sha256='9f78c4237ff363ff4e9ea5eeea671b6fff783d9a6078cc31b0b1abeb1f040f4d')
variant('dp', default=False, description='Enable double precision')
variant(
'profile', values=any_combination_of('time', 'memory'),
description='Activate profiling of specific sections'
)
variant(
'io', values=any_combination_of('iotk', 'etsf-io'),
description='Activate support for different io formats (requires network access)', # noqa
)
# MPI + OpenMP parallelism
variant('mpi', default=True, description='Enable MPI support')
variant('openmp', default=False, description='Enable OpenMP support')
depends_on('blas')
depends_on('lapack')
# MPI dependencies are forced, until we have proper forwarding of variants
#
# Note that yambo is used as an application, and not linked as a library,
# thus there will be no case where another package pulls-in e.g.
# netcdf-c+mpi and wants to depend on yambo~mpi.
depends_on('mpi', when='+mpi')
depends_on('netcdf-c+mpi', when='+mpi')
depends_on('hdf5+mpi', when='+mpi')
depends_on('fftw+mpi', when='+mpi')
depends_on('scalapack', when='+mpi')
depends_on('netcdf-c~mpi', when='~mpi')
depends_on('hdf5~mpi', when='~mpi')
depends_on('fftw~mpi', when='~mpi')
depends_on('hdf5+fortran')
depends_on('netcdf-c')
depends_on('netcdf-fortran')
depends_on('[email protected]:')
build_targets = ['all']
parallel = False
# The configure in the package has the string 'cat config/report'
# hard-coded, which causes a failure at configure time due to the
# current working directory in Spack. Fix this by using the absolute
# path to the file.
@run_before('configure')
def filter_configure(self):
report_abspath = join_path(self.build_directory, 'config', 'report')
filter_file('config/report', report_abspath, 'configure')
def enable_or_disable_time(self, activated):
return '--enable-time-profile' if activated else '--disable-time-profile' # noqa: E501
def enable_or_disable_memory(self, activated):
return '--enable-memory-profile' if activated else '--disable-memory-profile' # noqa: E501
def enable_or_disable_openmp(self, activated):
return '--enable-open-mp' if activated else '--disable-open-mp'
def configure_args(self):
args = [
# As of version 4.2.1 there are hard-coded paths that make
# the build process fail if the target prefix is not the
# configure directory
'--prefix={0}'.format(self.stage.source_path),
'--disable-keep-objects',
'--with-editor=none'
]
spec = self.spec
# Double precision
args.extend(self.enable_or_disable('dp'))
# Application profiling
args.extend(self.enable_or_disable('profile'))
# MPI + threading
args.extend(self.enable_or_disable('mpi'))
args.extend(self.enable_or_disable('openmp'))
# LAPACK
if '+mpi' in spec:
args.append('--with-scalapack-libs={0}'.format(
spec['scalapack'].libs +
spec['lapack'].libs +
spec['blas'].libs
))
args.extend([
'--with-blas-libs={0}'.format(spec['blas'].libs),
'--with-lapack-libs={0}'.format(spec['lapack'].libs)
])
# Netcdf
args.extend([
'--enable-netcdf-hdf5',
'--enable-hdf5-compression',
'--with-hdf5-libs={0}'.format(spec['hdf5'].libs),
'--with-netcdf-path={0}'.format(spec['netcdf-c'].prefix),
'--with-netcdff-path={0}'.format(spec['netcdf-fortran'].prefix)
])
args.extend(self.enable_or_disable('io'))
# Other dependencies
args.append('--with-fft-path={0}'.format(spec['fftw'].prefix))
args.append('--with-libxc-path={0}'.format(spec['libxc'].prefix))
return args
def install(self, spec, prefix):
# As of version 4.2.1 an 'install' target is advertized,
# but not present
install_tree('bin', prefix.bin)
install_tree('lib', prefix.lib)
install_tree('include', prefix.include)
install_tree('driver', prefix.driver)
|
iulian787/spack
|
var/spack/repos/builtin/packages/yambo/package.py
|
Python
|
lgpl-2.1
| 5,358
|
[
"ABINIT",
"NetCDF",
"Yambo"
] |
5f239bf882fe25bd770360bdcf2731f28e943368043fb4e18e0bffd9cceb7020
|
import numpy as np
from meshio import Mesh
from meshio.vtk_io import vtk_to_meshio_type
def read(filetype, filename):
import vtk
from vtk.util import numpy as numpy_support
def _read_data(data):
"""Extract numpy arrays from a VTK data set."""
# Go through all arrays, fetch data.
out = {}
for k in range(data.GetNumberOfArrays()):
array = data.GetArray(k)
if array:
array_name = array.GetName()
out[array_name] = np.copy(vtk.util.numpy_support.vtk_to_numpy(array))
return out
def _read_cells(vtk_mesh):
data = np.copy(
vtk.util.numpy_support.vtk_to_numpy(vtk_mesh.GetCells().GetData())
)
offsets = np.copy(
vtk.util.numpy_support.vtk_to_numpy(vtk_mesh.GetCellLocationsArray())
)
types = np.copy(
vtk.util.numpy_support.vtk_to_numpy(vtk_mesh.GetCellTypesArray())
)
# `data` is a one-dimensional vector with
# (num_points0, p0, p1, ... ,pk, numpoints1, p10, p11, ..., p1k, ...
# Translate it into the cells dictionary.
cells = {}
for vtk_type, meshio_type in vtk_to_meshio_type.items():
# Get all offsets for vtk_type
os = offsets[np.argwhere(types == vtk_type).transpose()[0]]
num_cells = len(os)
if num_cells > 0:
if meshio_type == "polygon":
for idx_cell in range(num_cells):
num_pts = data[os[idx_cell]]
cell = data[os[idx_cell] + 1 : os[idx_cell] + 1 + num_pts]
key = meshio_type + str(num_pts)
if key in cells:
cells[key] = np.vstack([cells[key], cell])
else:
cells[key] = cell
else:
num_pts = data[os[0]]
# instantiate the array
arr = np.empty((num_cells, num_pts), dtype=int)
# store the num_pts entries after the offsets into the columns
# of arr
for k in range(num_pts):
arr[:, k] = data[os + k + 1]
cells[meshio_type] = arr
return cells
if filetype in ["vtk", "vtk-ascii", "vtk-binary"]:
reader = vtk.vtkUnstructuredGridReader()
reader.SetFileName(filename)
reader.SetReadAllNormals(1)
reader.SetReadAllScalars(1)
reader.SetReadAllTensors(1)
reader.SetReadAllVectors(1)
reader.Update()
vtk_mesh = reader.GetOutput()
elif filetype in ["vtu", "vtu-ascii", "vtu-binary"]:
reader = vtk.vtkXMLUnstructuredGridReader()
reader.SetFileName(filename)
reader.Update()
vtk_mesh = reader.GetOutput()
elif filetype in ["xdmf", "xdmf2"]:
reader = vtk.vtkXdmfReader()
reader.SetFileName(filename)
reader.SetReadAllColorScalars(1)
reader.SetReadAllFields(1)
reader.SetReadAllNormals(1)
reader.SetReadAllScalars(1)
reader.SetReadAllTCoords(1)
reader.SetReadAllTensors(1)
reader.SetReadAllVectors(1)
reader.Update()
vtk_mesh = reader.GetOutputDataObject(0)
elif filetype == "xdmf3":
reader = vtk.vtkXdmf3Reader()
reader.SetFileName(filename)
reader.SetReadAllColorScalars(1)
reader.SetReadAllFields(1)
reader.SetReadAllNormals(1)
reader.SetReadAllScalars(1)
reader.SetReadAllTCoords(1)
reader.SetReadAllTensors(1)
reader.SetReadAllVectors(1)
reader.Update()
vtk_mesh = reader.GetOutputDataObject(0)
else:
assert filetype == "exodus", f"Unknown file type '{filename}'."
reader = vtk.vtkExodusIIReader()
reader.SetFileName(filename)
vtk_mesh = _read_exodusii_mesh(reader)
# Explicitly extract points, cells, point data, field data
points = np.copy(numpy_support.vtk_to_numpy(vtk_mesh.GetPoints().GetData()))
cells = _read_cells(vtk_mesh)
point_data = _read_data(vtk_mesh.GetPointData())
field_data = _read_data(vtk_mesh.GetFieldData())
cell_data = _read_data(vtk_mesh.GetCellData())
# split cell_data by the cell type
cd = {}
index = 0
for cell_type in cells:
num_cells = len(cells[cell_type])
cd[cell_type] = {}
for name, array in cell_data.items():
cd[cell_type][name] = array[index : index + num_cells]
index += num_cells
cell_data = cd
return Mesh(
points, cells, point_data=point_data, cell_data=cell_data, field_data=field_data
)
def _read_exodusii_mesh(reader, timestep=None):
"""Uses a vtkExodusIIReader to return a vtkUnstructuredGrid."""
# Fetch metadata.
reader.UpdateInformation()
# Set time step to read.
if timestep:
reader.SetTimeStep(timestep)
# Make sure the point data are read during Update().
for k in range(reader.GetNumberOfPointResultArrays()):
arr_name = reader.GetPointResultArrayName(k)
reader.SetPointResultArrayStatus(arr_name, 1)
# Make sure the cell data are read during Update().
for k in range(reader.GetNumberOfElementResultArrays()):
arr_name = reader.GetElementResultArrayName(k)
reader.SetElementResultArrayStatus(arr_name, 1)
# Make sure all field data is read.
for k in range(reader.GetNumberOfGlobalResultArrays()):
arr_name = reader.GetGlobalResultArrayName(k)
reader.SetGlobalResultArrayStatus(arr_name, 1)
# Read the file.
reader.Update()
out = reader.GetOutput()
# Loop through the blocks and search for a vtkUnstructuredGrid.
# In Exodus, different element types are stored different meshes, with
# point information possibly duplicated.
vtk_mesh = []
for i in range(out.GetNumberOfBlocks()):
blk = out.GetBlock(i)
for j in range(blk.GetNumberOfBlocks()):
sub_block = blk.GetBlock(j)
if sub_block is not None and sub_block.IsA("vtkUnstructuredGrid"):
vtk_mesh.append(sub_block)
assert vtk_mesh, "No 'vtkUnstructuredGrid' found!"
assert len(vtk_mesh) == 1, "More than one 'vtkUnstructuredGrid' found!"
# Cut off trailing '_' from array names.
for k in range(vtk_mesh[0].GetPointData().GetNumberOfArrays()):
array = vtk_mesh[0].GetPointData().GetArray(k)
array_name = array.GetName()
if array_name[-1] == "_":
array.SetName(array_name[0:-1])
# time_values = reader.GetOutputInformation(0).Get(
# vtkStreamingDemandDrivenPipeline.TIME_STEPS()
# )
return vtk_mesh[0] # , time_values
|
nschloe/meshio
|
tests/legacy_reader.py
|
Python
|
mit
| 6,813
|
[
"VTK"
] |
47d0ae5f360636c44bad8a73aa62ca0fc6371f62c3449ca3ccd33bcc28a4ecf0
|
from __future__ import print_function
import time
from numpy import pi, sin, cos, linspace
from bokeh.browserlib import view
from bokeh.document import Document
from bokeh.models.glyphs import Line
from bokeh.models import (
Plot, DataRange1d, LinearAxis, Range1d,
ColumnDataSource, PanTool, WheelZoomTool
)
from bokeh.session import Session
document = Document()
session = Session()
session.use_doc('line_animate')
session.load_document(document)
x = linspace(-6*pi, 6*pi, 1000)
y = sin(x)
z = cos(x)
source = ColumnDataSource(data=dict(x=x, y=y, z=z))
plot = Plot(x_range=Range1d(-2*pi, 2*pi), y_range=DataRange1d(), min_border=50)
line_glyph = Line(x="x", y="y", line_color="blue")
plot.add_glyph(source, line_glyph)
line_glyph2 = Line(x="x", y="z", line_color="red")
plot.add_glyph(source, line_glyph2)
plot.add_layout(LinearAxis(), 'below')
plot.add_layout(LinearAxis(), 'left')
plot.add_tools(PanTool(), WheelZoomTool())
document.add(plot)
session.store_document(document)
link = session.object_link(document.context)
print("please visit %s to see plots" % link)
view(link)
print("\nanimating... press ctrl-C to stop")
while True:
for i in linspace(-2*pi, 2*pi, 50):
source.data['x'] = x + i
session.store_objects(source)
time.sleep(0.05)
|
akloster/bokeh
|
examples/glyphs/line_animate.py
|
Python
|
bsd-3-clause
| 1,296
|
[
"VisIt"
] |
4bd090e72513a17ab07614580414c815837906ccff070b9ad0543f4695bfd375
|
#
# @BEGIN LICENSE
#
# Psi4: an open-source quantum chemistry software package
#
# Copyright (c) 2007-2017 The Psi4 Developers.
#
# The copyrights for code used from other parties are included in
# the corresponding files.
#
# This file is part of Psi4.
#
# Psi4 is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, version 3.
#
# Psi4 is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with Psi4; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# @END LICENSE
#
"""
Module of helper functions for ccresponse distributed property calculations.
Defines functions for interacting with the database created by the run_XXX
driver function.
Properties that are able to use this module should be added to
the registered_props dictionary.
"""
from __future__ import absolute_import
from __future__ import print_function
import collections
import shelve
import copy
import os
from psi4 import core
from psi4.driver import p4util
from psi4.driver.constants import *
def generate_inputs(db,name):
"""
Generates the input files in each sub-directory of the
distributed finite differences property calculation.
name: ( string ) method name passed to calling driver,
db: (database) The database object associated with this property
calculation. On exit this db['inputs_generated'] has been set True
Returns: nothing
Throws: Exception if the number of atomic displacements is not correct.
"""
molecule = core.get_active_molecule()
natom = molecule.natom()
# get list of displacements
displacement_geoms = core.atomic_displacements(molecule)
# Sanity Check
# there should be 3 cords * natoms *2 directions (+/-)
if not (6 * natom) == len(displacement_geoms):
raise Exception('The number of atomic displacements should be 6 times'
' the number of atoms!')
displacement_names = db['job_status'].keys()
for n, entry in enumerate(displacement_names):
if not os.path.exists(entry):
os.makedirs(entry)
# Setup up input file string
inp_template = 'molecule {molname}_{disp}'
inp_template += ' {{\n{molecule_info}\n}}\n{options}\n{jobspec}\n'
molecule.set_geometry(displacement_geoms[n])
molecule.fix_orientation(True)
molecule.fix_com(True)
inputfile = open('{0}/input.dat'.format(entry), 'w')
inputfile.write("# This is a psi4 input file auto-generated for"
"computing properties by finite differences.\n\n")
inputfile.write(
inp_template.format(
molname=molecule.name(),
disp=entry,
molecule_info=molecule.create_psi4_string_from_molecule(),
options=p4util.format_options_for_input(),
jobspec=db['prop_cmd']))
inputfile.close()
db['inputs_generated'] = True
# END generate_inputs
def initialize_database(database, name, prop, properties_array, additional_kwargs=None):
"""
Initialize the database for computation of some property
using distributed finite differences driver
database: (database) the database object passed from the caller
name: (string) name as passed to calling driver
prop: (string) the property being computed, used to add xxx_computed flag
to database
prop_array: (list of strings) properties to go in
properties kwarg of the property() cmd in each sub-dir
additional_kwargs: (list of strings) *optional*
any additional kwargs that should go in the call to the
property() driver method in each subdir
Returns: nothing
Throws: nothing
"""
database['inputs_generated'] = False
database['jobs_complete'] = False
prop_cmd ="property('{0}',".format(name)
prop_cmd += "properties=[ '{}' ".format(properties_array[0])
if len(properties_array) > 1:
for element in properties_array[1:]:
prop_cmd += ",'{}'".format(element)
prop_cmd += "]"
if additional_kwargs is not None:
for arg in additional_kwargs:
prop_cmd += ", {}".format(arg)
prop_cmd += ")"
database['prop_cmd'] = prop_cmd
database['job_status'] = collections.OrderedDict()
# Populate the job_status dict
molecule = core.get_active_molecule()
natom = molecule.natom()
coordinates = ['x', 'y', 'z']
#step_direction = ['p', 'm'] changing due to change in findif atomic_displacements
step_direction = ['m', 'p']
for atom in range(1, natom + 1):
for coord in coordinates:
for step in step_direction:
job_name = '{}_{}_{}'.format(atom, coord, step)
database['job_status'].update({job_name: 'not_started'})
database['{}_computed'.format(prop)] = False
# END initialize_database()
def stat(db):
"""
Checks displacement sub_directories for the status of each
displacement computation
db: (database) the database storing information for this distributed
property calculation
Returns: nothing
Throws: nothing
"""
n_finished = 0
for job, status in db['job_status'].items():
if status == 'finished':
n_finished += 1
elif status in ('not_started', 'running'):
try:
with open("{}/output.dat".format(job)) as outfile:
outfile.seek(-150, 2)
for line in outfile:
if 'Psi4 exiting successfully' in line:
db['job_status'][job] = 'finished'
n_finished += 1
break
else:
db['job_status'][job] = 'running'
except:
pass
# check all jobs done?
if n_finished == len(db['job_status'].keys()):
db['jobs_complete'] = True
# END stat()
|
jH0ward/psi4
|
psi4/driver/procrouting/findif_response_utils/db_helper.py
|
Python
|
lgpl-3.0
| 6,374
|
[
"Psi4"
] |
ea7628867d398b90e92e8f52e7b66c49e95a32ad4779171811365982059bbde2
|
import numpy
import pylab
import moose
import time
def main():
"""
This example implements a reaction-diffusion like system which is
bistable and propagates losslessly. It is based on the NEURON example
rxdrun.py, but incorporates more compartments and runs for a longer time.
The system is implemented as a hybrid of a reaction and a function which
sets its rates. Please see rxdFuncDiffusion.py for a variant that uses
just a function object to set up the system.
"""
dt = 0.1
# define the geometry
compt = moose.CylMesh( '/cylinder' )
compt.r0 = compt.r1 = 1
compt.diffLength = 0.2
compt.x1 = 100
assert( compt.numDiffCompts == compt.x1/compt.diffLength )
#define the molecule. Its geometry is defined by its parent volume, cylinder
c = moose.Pool( '/cylinder/pool' )
c.diffConst = 1 # define diffusion constant
# There is an implicit reaction substrate/product. MOOSE makes it explicit.
buf = moose.BufPool( '/cylinder/buf' )
buf.nInit = 1
# The reaction is something entirely peculiar, not a chemical thing.
reaction = moose.Reac( '/cylinder/reac' )
reaction.Kb = 0
# so here we set up a function calculation to do the same thing.
func = moose.Function( '/cylinder/reac/func' )
func.expr = "(1 - x0) * (0.3 - x0)"
func.x.num = 1 #specify number of input variables.
#Connect the reaction to the pools
moose.connect( reaction, 'sub', c, 'reac' )
moose.connect( reaction, 'prd', buf, 'reac' )
#Connect the function to the reaction
moose.connect( func, 'valueOut', reaction, 'setNumKf' )
#Connect the molecules to the func
moose.connect( c, 'nOut', func.x[0], 'input' )
#Set up solvers
ksolve = moose.Ksolve( '/cylinder/ksolve' )
dsolve = moose.Dsolve( '/cylinder/dsolve' )
stoich = moose.Stoich( '/cylinder/stoich' )
stoich.compartment = compt
stoich.ksolve = ksolve
stoich.dsolve = dsolve
stoich.reacSystemPath = '/cylinder/##'
for i in range( 10, 18 ):
moose.setClock( i, dt )
#initialize
x = numpy.arange( 0, compt.x1, compt.diffLength )
c.vec.nInit = [ (q < 0.2 * compt.x1) for q in x ]
# Run and plot it.
moose.reinit()
updateDt = 50
runtime = updateDt * 4
plt = pylab.plot( x, c.vec.n, label='t = 0 ')
t1 = time.time()
for t in range( 0, runtime-1, updateDt ):
moose.start( updateDt )
plt = pylab.plot( x, c.vec.n, label='t = '+str(t + updateDt) )
print(("Time = ", time.time() - t1))
pylab.ylim( 0, 1.05 )
pylab.legend()
pylab.show()
if __name__ == '__main__':
main()
|
BhallaLab/moose-examples
|
snippets/rxdReacDiffusion.py
|
Python
|
gpl-2.0
| 2,651
|
[
"MOOSE",
"NEURON"
] |
69ddbcfdd9480544c0172a7fd56df24b934a4861e2babc53e34ad91a97bfa75f
|
from common2 import *
# NAME IDEA -> pooling/random/sparse/distributed hebbian/horde/crowd/fragment/sample memory
# FEATURES:
# + boost -- neurons with empty mem slots learn faster
# + noise --
# + dropout -- temporal disabling of neurons
# + decay -- remove from mem
# + negatives -- learning to avoid detecting some patterns
# + fatigue -- winner has lower score for some time
# ~ sklearn -- compatibile api
# - prune -- if input < mem shrink mem ? (problem with m > input len)
# - weights -- sample weights for imbalanced classes
# - popularity -- most popular neuron is cloned / killed
# NEXT VERSION:
# - attention
# - https://towardsdatascience.com/the-fall-of-rnn-lstm-2d1594c74ce0
# - https://towardsdatascience.com/memory-attention-sequences-37456d271992
# - https://medium.com/breathe-publication/neural-networks-building-blocks-a5c47bcd7c8d
# - https://distill.pub/2016/augmented-rnns/
# - http://akosiorek.github.io/ml/2017/10/14/visual-attention.html
# + IDEA:
# append activated neurons indexes to queue available as input
# queue ages at constant rate and drops oldest values
# - IDEA:
# each neuron has small memory of activation prior to winning
# this memory is compared to ctx and intersection added to score
# winner updated this memory
# OPTION: several memories with diferent time frames
# NEXT VERSION:
# - layers -- rsm stacking
# NEXT VERSIONS:
# - numpy -- faster version
# - cython -- faster version
# - gpu -- faster version
# - distributed
class rsm:
def __init__(self,n,m,c=0,**kw):
"""Random Sample Memory
n -- number of neurons
m -- max connections per neuron (memory)
"""
self.mem = {j:set() for j in range(n)}
self.win = {j:0 for j in range(n)}
self.tow = {j:-42000 for j in range(n)} # time of win
self.t = 0
self.ctx = deque(maxlen=c) # context queue
# cfg
cfg = {}
cfg['n'] = n
cfg['m'] = m
cfg['c'] = c
cfg['k'] = kw.get('k',1)
cfg['method'] = kw.get('method',1)
cfg['cutoff'] = kw.get('cutoff',0.5)
cfg['decay'] = kw.get('decay',0.0)
cfg['dropout'] = kw.get('dropout',0.0)
cfg['fatigue'] = kw.get('fatigue',0)
cfg['boost'] = kw.get('boost',True)
cfg['noise'] = kw.get('noise',True)
cfg['sequence'] = kw.get('sequence',False)
cfg.update(kw)
self.cfg = cfg
# ---[ core ]---------------------------------------------------------------
def new_ctx(self):
self.ctx.clear()
# TODO -- input length vs mem length
# TODO -- args from cfg
def scores(self, input, raw=False, boost=False, noise=False, fatigue=0, dropout=0.0, **ignore): # -> dict[i] -> scores
"""
input -- sparse binary features
raw -- disable all postprocessing
boost -- improve scores based on number of unconnected synapses (TODO)
noise -- randomize scores to prevent snowballing
dropout -- temporal disabling of neurons
"""
mem = self.mem
tow = self.tow
N = self.cfg['n']
M = self.cfg['m']
t = self.t
scores = {}
for j in mem:
scores[j] = len(set(input) & mem[j])
if raw:
return scores
if noise:
for j in mem:
scores[j] += 0.9*random()
if boost:
for j in mem:
scores[j] += 1+2*(M-len(mem[j])) if len(mem[j])<M else 0
# TODO boost also based on low win ratio / low tow
if fatigue:
for j in mem:
dt = 1.0*min(fatigue,t - tow[j])
factor = dt / fatigue
scores[j] *= factor
if dropout:
k = int(round(float(dropout)*N))
for j in combinations(N,k):
scores[j] = -1
return scores
def learn(self, input, negative=False, **ignore):
for i in range(0,len(input),10):
self.learn_(set(input[i:i+10]),negative=negative)
def learn_(self, input, negative=False, **ignore):
"""
input -- sparse binary features
k -- number of winning neurons
"""
mem = self.mem
win = self.win
tow = self.tow
ctx = self.ctx
t = self.t
cfg = self.cfg
M = self.cfg['m']
N = self.cfg['n']
k = self.cfg['k']
decay = self.cfg['decay']
sequence = self.cfg['sequence']
known_inputs = set()
for j in mem:
known_inputs.update(mem[j])
# context
input = input | set(ctx)
# scoring
scores = self.scores(input, **cfg)
winners = top(k,scores)
for j in winners:
# negative learning
if negative:
mem[j].difference_update(input)
continue
# positive learning
unknown_inputs = input - known_inputs
mem[j].update(pick(unknown_inputs, M-len(mem[j])))
known_inputs.update(mem[j])
# handle decay
if decay:
decay_candidates = mem[j] - input
if decay_candidates:
for d in decay_candidates:
if random() < decay:
mem[j].remove(d)
# handle popularity
win[j] += 1
# handle fatigue
tow[j] = t
# handle context
if sequence:
for i in range(len(ctx)):
ctx[i] -= N
for j in winners:
ctx.append(-j-1)
self.t += 1
# ---[ auxiliary ]----------------------------------------------------------
def fit(self, X, Y):
cfg = self.cfg
for x,y in zip(X,Y):
negative = not y
self.learn(x,negative=negative,**cfg)
def fit2(self, X1, X0):
cfg = self.cfg
# TODO - unbalanced
for x1,x0 in zip(X1,X0):
self.learn(x1,negative=False,**cfg)
self.learn(x0,negative=True,**cfg)
def transform(self, X):
cutoff = self.cfg['cutoff']
out = []
for s in self.score_many(X):
y = 1 if s>=cutoff else 0
out += [y]
return out
def fit_transform(self, X, Y):
self.fit(X,Y)
return self.transform(X)
def score(self, X, Y, kind='acc'):
c = self.confusion(X,Y)
p = float(c['p'])
n = float(c['n'])
tp = float(c['tp'])
tn = float(c['tn'])
fp = float(c['fp'])
fn = float(c['fn'])
try:
if kind=='acc':
return (tp + tn) / (p + n)
elif kind=='f1':
return (2*tp) / (2*tp + fp + fn)
elif kind=='prec':
return tp / (tp + fp)
elif kind=='sens':
return tp / (tp + fn)
elif kind=='spec':
return tn / (tn + fp)
except ZeroDivisionError:
return float('nan')
def confusion(self, X, Y):
PY = self.transform(X)
p = 0
n = 0
tp = 0
tn = 0
fp = 0
fn = 0
for y,py in zip(Y,PY):
if y: p+=1
else: n+=1
if y:
if py: tp+=1
else: fn+=1
else:
if py: fp+=1
else: tn+=1
return dict(p=p,n=n,tp=tp,tn=tn,fp=fp,fn=fn)
def score_many(self, X):
out = []
for x in X:
s = self.score_one(x)
out += [s]
return out
# TODO
def calibrate(self, X, Y, kind='f1'):
for i in range(1,20):
c = 0.05*i
self.set_params(cutoff=c)
s = self.score(X,Y,kind)
print'{} {:.3} -> {:.3}'.format(kind,c,s)
def score_one(self, input):
"aggregate scores to scalar"
k = self.cfg['k']
method = self.cfg['method']
scores = self.scores(input)
M = self.cfg['m']
if method==0:
return top(k, scores, values=True)
elif method==1:
score = 1.0*sum(top(k, scores, values=True))/(k*(M+1))
return score
elif method==2:
score = 1.0*sum(top(k, scores, values=True))/(k*M)
return min(1.0,score)
if method==3:
score = 1.0*min(top(k, scores, values=True))/(M+1)
return score
elif method==4:
score = 1.0*min(top(k, scores, values=True))/M
return min(1.0,score)
if method==5:
score = 1.0*max(top(k, scores, values=True))/(M+1)
return score
elif method==6:
score = 1.0*max(top(k, scores, values=True))/M
return min(1.0,score)
def stats(self,prefix=''):
N = self.cfg['n']
M = self.cfg['m']
mem_v = self.mem.values()
out = {}
# mem
out['mem_empty'] = sum([1.0 if len(x)==0 else 0.0 for x in mem_v])/N
out['mem_not_empty'] = sum([1.0 if len(x)>0 else 0.0 for x in mem_v])/N
out['mem_full'] = sum([1.0 if len(x)==M else 0.0 for x in mem_v])/N
out['mem_avg'] = sum([1.0*len(x) for x in mem_v])/(N*M)
# win
win = list(sorted(self.win.values()))
out['win_min'] = win[0]
out['win_max'] = win[-1]
gini = 0
for a in win:
for b in win:
gini += abs(a-b)
gini = float(gini)/(2.0*len(win)*sum(win))
out['win_gini'] = round(gini,3)
# ctx
out['ctx_mem_sum'] = sum([1 if x<0 else 0 for m in mem_v for x in m])
out['ctx_mem_cnt'] = sum([max([1 if x<0 else 0 for x in m]) for m in mem_v if m])
out['ctx_mem_max'] = max([sum([1 if x<0 else 0 for x in m]) for m in mem_v if m])
#
return {k:v for k,v in out.items() if k.startswith(prefix)}
def set_params(self,**kw):
self.cfg.update(kw)
# TODO: deep parameter
def get_params(self,deep=True):
return self.cfg # TODO copy ???
|
mobarski/sandbox
|
rsm/v9le/v5.py
|
Python
|
mit
| 8,419
|
[
"NEURON"
] |
7656ff0172adf122ea32e874fb283769242a7d4811d5bc2fe0e01ef715937d81
|
"""Substitution matrices and associated functions.
Several of these are originally imported from BioPython.
Substitution matrices are similarity and not distance matrices.
Matrices are represented in two ways in this module and there are
converters that go from one to the other:
(1) Parameters called "subst" are dicts with tuple keys
representing the substitution and keys with the similarity
('X', 'Y') : s
(2) Parameters called "substMat" are square 2d np.ndarrays (dtype = np.float64)
The indices align with those in the FULL_AALPHABET.
TODO: Use pd.DataFrames instead to avoid losing the index. But
this would not be a feature used by numba or numpy optimized routines,
just the end user.
TODO:
(1) Add suppot for a DNA alphabet and associated matrices.
"""
from Bio.SubsMat.MatrixInfo import blosum90, ident, blosum62
import numpy as np
from copy import deepcopy
import itertools
from . import FULL_AALPHABET
from . import AALPHABET
__all__ = ['nanGapScores',
'nanZeroGapScores',
'binGapScores',
'blosum90GapScores',
'binarySubst',
'binaryMat',
'identMat',
'blosum62Mat',
'blosum90Mat',
'addGapScores',
'blosum90',
'ident',
'blosum62']
def subst2mat(subst, alphabet = FULL_AALPHABET):
"""Converts a substitution dictionary
(like those from Bio) into a numpy 2d substitution matrix.
Assumes the matrix is symetrical,
but if its not this will still produce a good copy.
Missing substitutions are nan.
Return type is float64"""
mat = np.nan * np.zeros((len(alphabet), len(alphabet)), dtype = np.float64)
ij = np.zeros((len(subst), 2), dtype=np.int)
for ki, ((aa1, aa2), v) in enumerate(subst.items()):
i, j = alphabet.index(aa1), alphabet.index(aa2)
ij[ki,:] = [i, j]
mat[i, j] = v
for ki in range(ij.shape[0]):
"""Go back through all the assignments and make the symetrical assignments
if the value is still nan (i.e. otherwise unspecified)"""
i, j = ij[ki, 0], ij[ki, 1]
if np.isnan(mat[j, i]):
mat[j, i] = mat[i, j]
return mat
def addGapScores(subst, gapScores = None, minScorePenalty = False, returnMat = False):
"""Add gap similarity scores for each AA (Could be done once for a set of sequences to improve speed)
if gapScores is None then it will use defaults:
gapScores = {('-','-'):1,
('-','X'):0,
('X','-'):0}
OR for blosum90 default is:
blosum90GapScores = {('-','-'):5,
('-','X'):-11,
('X','-'):-11}
"""
if minScorePenalty:
gapScores = {('-', '-') : 1,
('-', 'X') : np.min(list(subst.values())),
('X', '-') : np.min(list(subst.values()))}
elif gapScores is None:
if subst is binarySubst:
print('Using default binGapScores for binarySubst')
gapScores = binGapScores
elif subst is blosum90:
print('Using default blosum90 gap scores')
gapScores = blosum90GapScores
else:
raise Exception('Cannot determine which gap scores to use!')
su = deepcopy(subst)
uAA = np.unique([k[0] for k in list(subst.keys())])
su.update({('-', aa) : gapScores[('-', 'X')] for aa in uAA})
su.update({(aa, '-') : gapScores[('X', '-')] for aa in uAA})
su.update({('-', '-') : gapScores[('-', '-')]})
if returnMat:
return subst2mat(su)
return su
"""Many different ways of handling gaps. Remember that these are SIMILARITY scores"""
nanGapScores = {('-', '-'):np.nan,
('-', 'X'):np.nan,
('X', '-'):np.nan}
nanZeroGapScores = {('-', '-'):np.nan,
('-', 'X'):0,
('X', '-'):0}
"""Default for addGapScores()"""
binGapScores = {('-', '-'):1,
('-', 'X'):0,
('X', '-'):0}
"""Arbitrary/reasonable values (extremes for blosum90 I think)"""
blosum90GapScores = {('-', '-'):5,
('-', 'X'):-11,
('X', '-'):-11}
binarySubst = {(aa1, aa2):np.float64(aa1==aa2) for aa1, aa2 in itertools.product(AALPHABET, AALPHABET)}
identMat = subst2mat(ident)
blosum90Mat = subst2mat(blosum90)
blosum62Mat = subst2mat(blosum62)
binaryMat = subst2mat(binarySubst)
|
agartland/seqdistance
|
matrices.py
|
Python
|
mit
| 4,539
|
[
"Biopython"
] |
5bdb403e4a30c2a58ceb7b1eeeb4af3b0e5d9f244392365b76ebcd61cff90a15
|
#!/usr/bin/env python
__author__ = 'Mike McCann,Duane Edgington,Reiko Michisaki'
__copyright__ = '2015'
__license__ = 'GPL v3'
__contact__ = 'duane at mbari.org'
__doc__ = '''
Master loader for all CANON activities in May 2015
Mike McCann and Duane Edgington
MBARI 11 May 2015
@var __date__: Date of last svn commit
@undocumented: __doc__ parser
@status: production
@license: GPL
'''
import os
import sys
import datetime # needed for glider data
import time # for startdate, enddate args
import csv
import urllib.request, urllib.error, urllib.parse
parentDir = os.path.join(os.path.dirname(__file__), "../")
sys.path.insert(0, parentDir) # So that CANON is found
from CANON import CANONLoader
from thredds_crawler.crawl import Crawl
import timing
cl = CANONLoader('stoqs_canon_may2015', 'CANON-ECOHAB - May 2015',
description = 'Spring 2015 Experiment in Monterey Bay',
x3dTerrains = {
'https://stoqs.mbari.org/x3d/Monterey25_10x/Monterey25_10x_scene.x3d': {
'position': '-2822317.31255 -4438600.53640 3786150.85474',
'orientation': '0.89575 -0.31076 -0.31791 1.63772',
'centerOfRotation': '-2711557.9403829873 -4331414.329506527 3801353.4691465236',
'VerticalExaggeration': '10',
'speed': '0.1',
}
},
grdTerrain = os.path.join(parentDir, 'Monterey25.grd')
)
# Set start and end dates for all loads from sources that contain data
# beyond the temporal bounds of the campaign
startdate = datetime.datetime(2015, 5, 6) # Fixed start
enddate = datetime.datetime(2015, 6, 11) # Fixed end
# default location of thredds and dods data:
cl.tdsBase = 'http://odss.mbari.org/thredds/'
cl.dodsBase = cl.tdsBase + 'dodsC/'
#####################################################################
# DORADO
#####################################################################
# special location for dorado data
cl.dorado_base = 'http://dods.mbari.org/opendap/data/auvctd/surveys/2015/netcdf/'
cl.dorado_files = [
'Dorado389_2015_132_04_132_04_decim.nc',
'Dorado389_2015_148_01_148_01_decim.nc',
'Dorado389_2015_156_00_156_00_decim.nc',
]
cl.dorado_parms = [ 'temperature', 'oxygen', 'nitrate', 'bbp420', 'bbp700',
'fl700_uncorr', 'salinity', 'biolume', 'rhodamine',
'sepCountList', 'mepCountList',
'roll', 'pitch', 'yaw',
]
######################################################################
# GLIDERS
######################################################################
# Glider data files from CeNCOOS thredds server
# L_662
# cl.l_662_base = 'http://www.cencoos.org/thredds/dodsC/gliders/Line66/'
cl.l_662_base = 'http://legacy.cencoos.org/thredds/dodsC/gliders/Line66/'
cl.l_662_files = [ 'OS_Glider_L_662_20150427_TS.nc' ]
cl.l_662_parms = ['TEMP', 'PSAL', 'FLU2']
cl.l_662_startDatetime = startdate
cl.l_662_endDatetime = enddate
# NPS_29
#cl.nps29_base = 'http://www.cencoos.org/thredds/dodsC/gliders/Line66/'
#cl.nps29_files = [ 'OS_Glider_NPS_G29_20140930_TS.nc' ]
#cl.nps29_parms = ['TEMP', 'PSAL']
#cl.nps29_startDatetime = startdate
#cl.nps29_endDatetime = enddate
# slocum_294 also known as UCSC294
cl.slocum_294_base = 'http://data.ioos.us/gliders/thredds/dodsC/deployments/mbari/UCSC294-20150430T2218/'
cl.slocum_294_files = [ 'UCSC294-20150430T2218.nc3.nc' ]
cl.slocum_294_parms = ['temperature', 'salinity']
cl.slocum_294_startDatetime = startdate
cl.slocum_294_endDatetime = enddate
# slocum_260 also known as UCSC160
cl.slocum_260_base = 'http://data.ioos.us/gliders//thredds/dodsC/deployments/mbari/UCSC260-20150520T0000/'
cl.slocum_260_files = [ 'UCSC260-20150520T0000.nc3.nc' ]
cl.slocum_260_parms = ['temperature', 'salinity']
cl.slocum_260_startDatetime = startdate
cl.slocum_260_endDatetime = enddate
# slocum_nemesis
######################################################################
# Wavegliders
######################################################################
# WG Tex - All instruments combined into one file - one time coordinate
#cl.wg_tex_base = cl.dodsBase + 'CANON/2015_May/Platforms/Waveglider/SV3_Tiny/'
#cl.wg_tex_files = [ 'SV3_20150501_QC.nc' ]
#cl.wg_tex_parms = [ 'wind_dir', 'avg_wind_spd', 'max_wind_spd', 'atm_press', 'air_temp', 'water_temp', 'sal', 'bb_470', 'bb_650', 'chl',
# 'beta_470', 'beta_650', 'pCO2_water', 'pCO2_air', 'pH', 'O2_conc' ]
#cl.wg_tex_startDatetime = startdate
#cl.wg_tex_endDatetime = enddate
# WG Tiny - All instruments combined into one file - one time coordinate
cl.wg_Tiny_base = cl.dodsBase + 'CANON/2015_May/Platforms/Waveglider/SV3_Tiny/'
cl.wg_Tiny_files = [ 'SV3_20150501_QC.nc' ]
cl.wg_Tiny_parms = [ 'wind_dir', 'avg_wind_spd', 'max_wind_spd', 'atm_press', 'air_temp', 'water_temp', 'sal', 'bb_470', 'bb_650', 'chl',
'beta_470', 'beta_650', 'pCO2_water', 'pCO2_air', 'pH', 'O2_conc' ]
cl.wg_Tiny_startDatetime = startdate
cl.wg_Tiny_endDatetime = enddate
# WG OA - All instruments combined into one file - one time coordinate
##cl.wg_oa_base = cl.dodsBase + 'CANON_september2013/Platforms/Gliders/WG_OA/final/'
##cl.wg_oa_files = [ 'Sept_2013_OAWaveglider_final.nc' ]
##cl.wg_oa_parms = [ 'distance', 'wind_dir', 'avg_wind_spd', 'max_wind_spd', 'atm_press', 'air_temp', 'water_temp', 'sal', 'O2_conc',
## 'O2_sat', 'beta_470', 'bb_470', 'beta_700', 'bb_700', 'chl', 'pCO2_water', 'pCO2_air', 'pH' ]
##cl.wg_oa_startDatetime = startdate
##cl.wg_oa_endDatetime = enddate
######################################################################
# WESTERN FLYER: not in this cruise
######################################################################
# UCTD
cl.wfuctd_base = cl.dodsBase + 'CANON/2015_May/Platforms/Ships/Western_Flyer/uctd/'
cl.wfuctd_parms = [ 'TEMP', 'PSAL', 'xmiss', 'wetstar' ]
cl.wfuctd_files = [
]
# PCTD
cl.wfpctd_base = cl.dodsBase + 'CANON/2014_Sep/Platforms/Ships/Western_Flyer/pctd/'
cl.wfpctd_parms = [ 'TEMP', 'PSAL', 'xmiss', 'ecofl' , 'oxygen']
cl.wfpctd_files = [
]
######################################################################
# RACHEL CARSON: May 2015 --
######################################################################
# UCTD
cl.rcuctd_base = cl.dodsBase + 'CANON/2015_May/Platforms/Ships/Rachel_Carson/uctd/'
cl.rcuctd_parms = [ 'TEMP', 'PSAL', 'xmiss', 'wetstar' ]
cl.rcuctd_files = [
'13115plm01.nc',
'13215plm01.nc',
'14115plm01.nc',
'14815plm01.nc',
'15515plm01.nc',
'15615plm01.nc',
]
# PCTD
cl.rcpctd_base = cl.dodsBase + 'CANON/2015_May/Platforms/Ships/Rachel_Carson/pctd/'
cl.rcpctd_parms = [ 'TEMP', 'PSAL', 'xmiss', 'ecofl', 'oxygen' ]
cl.rcpctd_files = [
'13115c01.nc', '13115c02.nc', '13115c03.nc',
'13215c01.nc', '13215c02.nc', '13215c03.nc', '13215c04.nc', '13215c05.nc',
'14115c01.nc', '14115c02.nc', '14115c03.nc', '14115c04.nc',
'14815c01.nc', '14815c02.nc', '14815c03.nc', '14815c04.nc', '14815c05.nc', '14815c06.nc',
'15515c01.nc', '15515c02.nc', '15515c03.nc',
'15615c01.nc', '15615c02.nc', '15615c03.nc', '15615c04.nc',
]
#####################################################################
# JOHN MARTIN
#####################################################################
cl.JMpctd_base = cl.dodsBase + 'CANON/2015_May/Platforms/Ships/Martin/pctd/'
cl.JMpctd_parms = ['TEMP', 'PSAL', 'xmiss', 'wetstar', 'oxygen' ]
cl.JMpctd_files = [
'EH15_18.nc', 'EH15_19.nc', 'EH15_20.nc', 'EH15_21.nc', 'EH15_22.nc', 'EH15_24.nc',
'EH15_25.nc', 'EH15_26.nc', 'EH15_27.nc', 'EH15_28a.nc', 'EH15_29a.nc', 'EH15_29b.nc',
'EH15_29.nc', 'EH15_30.nc', 'EH15_31.nc', 'EH15_Sta10a.nc', 'EH15_Sta11.nc', 'EH15_Sta12a.nc',
'EH15_Sta12.nc', 'EH15_Sta13.nc', 'EH15_Sta14.nc', 'EH15_Sta15.nc', 'EH15_Sta16.nc', 'EH15_Sta17.nc',
'EH15_Sta8b.nc', 'EH15_Sta9.nc',
]
######################################################################
# MOORINGS May 2015
######################################################################
# Mooring M1 Combined file produced by DPforSSDS processing - for just the duration of the campaign
cl.m1_base = 'http://dods.mbari.org/opendap/data/ssdsdata/deployments/m1/201407/'
cl.m1_files = [
'OS_M1_20140716hourly_CMSTV.nc',
]
cl.m1_parms = [ 'eastward_sea_water_velocity_HR', 'northward_sea_water_velocity_HR',
'SEA_WATER_SALINITY_HR', 'SEA_WATER_TEMPERATURE_HR', 'SW_FLUX_HR', 'AIR_TEMPERATURE_HR',
'EASTWARD_WIND_HR', 'NORTHWARD_WIND_HR', 'WIND_SPEED_HR',
]
cl.m1_startDatetime = startdate
cl.m1_endDatetime = enddate
# Mooring 0A1
cl.oa1_base = cl.dodsBase + 'CANON/2015_May/Platforms/Moorings/OA1/'
cl.oa1_files = [
'OA1_Canon2015_May.nc'
]
cl.oa1_parms = [
'wind_dir', 'avg_wind_spd', 'atm_press', 'air_temp', 'water_temp',
'sal', 'O2_conc', 'chl', 'pCO2_water', 'pCO2_air', 'pH',
]
cl.oa1_startDatetime = startdate
cl.oa1_endDatetime = enddate
# Mooring 0A2
cl.oa2_base = cl.dodsBase + 'CANON/2015_May/Platforms/Moorings/OA2/'
cl.oa2_files = [
'OA2_Canon2015_May.nc'
]
cl.oa2_parms = [
'wind_dir', 'avg_wind_spd', 'atm_press', 'air_temp', 'water_temp',
'sal', 'O2_conc', 'chl', 'pCO2_water', 'pCO2_air', 'pH',
]
cl.oa2_startDatetime = startdate
cl.oa2_endDatetime = enddate
#######################################################################################
# ESP MOORINGS
#######################################################################################
##cl.bruce_moor_base = cl.dodsBase + 'CANON_september2013/Platforms/Moorings/ESP_Bruce/NetCDF/'
##cl.bruce_moor_files = ['Bruce_ctd.nc']
##cl.bruce_moor_parms = [ 'TEMP','PSAL','chl','xmiss','oxygen','beamc',
## ]
##cl.bruce_moor_startDatetime = startdate
##cl.bruce_moor_endDatetime = enddate
##cl.mack_moor_base = cl.dodsBase + 'CANON_september2013/Platforms/Moorings/ESP_Mack/NetCDF/'
##cl.mack_moor_files = ['Mack_ctd.nc']
##cl.mack_moor_parms = [ 'TEMP','PSAL','chl','xmiss','oxygen','beamc',
## ]
##cl.mack_moor_startDatetime = startdate
##cl.mack_moor_endDatetime = enddate
###################################################################################################
# SubSample data files from /mbari/BOG_Archive/ReportsForSTOQS/
# 13115 13215 14115 14815 15515 15615
# copied to local BOG_Data/CANON_May2105 dir
###################################################################################################
cl.subsample_csv_base = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'BOG_Data/CANON_May2015/')
cl.subsample_csv_files = [
## these are loaded OK:
'STOQS_13115_CARBON_GFF.csv', 'STOQS_13115_CHL_1U.csv', 'STOQS_13115_CHL_5U.csv', 'STOQS_13115_CHL_GFF.csv',
'STOQS_13115_CHLA.csv', 'STOQS_13115_NO2.csv', 'STOQS_13115_NO3.csv', 'STOQS_13115_PHAEO_1U.csv',
'STOQS_13115_PHAEO_5U.csv', 'STOQS_13115_PHAEO_GFF.csv', 'STOQS_13115_PO4.csv', 'STOQS_13115_SIO4.csv',
'STOQS_13215_CARBON_GFF.csv',
## error no data
## 'STOQS_13215_CHL_1U.csv', ## error no data
## 'STOQS_13215_CHL_5U.csv', ## error no data
'STOQS_13215_CHL_GFF.csv','STOQS_13215_CHLA.csv', 'STOQS_13215_NO2.csv', 'STOQS_13215_NO3.csv',
## error no data
## 'STOQS_13215_PHAEO_1U.csv', ## error no data
## 'STOQS_13215_PHAEO_5U.csv', ## error no data
'STOQS_13215_PHAEO_GFF.csv', 'STOQS_13215_PO4.csv', 'STOQS_13215_SIO4.csv',
'STOQS_14115_CARBON_GFF.csv', 'STOQS_14115_CHL_1U.csv', 'STOQS_14115_CHL_5U.csv', 'STOQS_14115_CHL_GFF.csv',
'STOQS_14115_CHLA.csv', 'STOQS_14115_NO2.csv', 'STOQS_14115_NO3.csv', 'STOQS_14115_PHAEO_1U.csv',
'STOQS_14115_PHAEO_5U.csv', 'STOQS_14115_PHAEO_GFF.csv', 'STOQS_14115_PO4.csv', 'STOQS_14115_SIO4.csv',
'STOQS_14815_CARBON_GFF.csv', 'STOQS_14815_CHL_1U.csv', 'STOQS_14815_CHL_5U.csv', 'STOQS_14815_CHL_GFF.csv',
'STOQS_14815_CHLA.csv', 'STOQS_14815_NO2.csv', 'STOQS_14815_NO3.csv', 'STOQS_14815_PHAEO_1U.csv',
'STOQS_14815_PHAEO_5U.csv', 'STOQS_14815_PHAEO_GFF.csv', 'STOQS_14815_PO4.csv', 'STOQS_14815_SIO4.csv',
'STOQS_15515_CARBON_GFF.csv', 'STOQS_15515_CHL_1U.csv', 'STOQS_15515_CHL_5U.csv', 'STOQS_15515_CHL_GFF.csv',
'STOQS_15515_CHLA.csv', 'STOQS_15515_NO2.csv', 'STOQS_15515_NO3.csv', 'STOQS_15515_PHAEO_1U.csv',
'STOQS_15515_PHAEO_5U.csv', 'STOQS_15515_PHAEO_GFF.csv', 'STOQS_15515_PO4.csv', 'STOQS_15515_SIO4.csv',
'STOQS_15615_CARBON_GFF.csv', 'STOQS_15615_CHL_1U.csv', 'STOQS_15615_CHL_5U.csv', 'STOQS_15615_CHL_GFF.csv',
'STOQS_15615_CHLA.csv', 'STOQS_15615_NO2.csv', 'STOQS_15615_NO3.csv', 'STOQS_15615_PHAEO_1U.csv',
'STOQS_15615_PHAEO_5U.csv', 'STOQS_15615_PHAEO_GFF.csv', 'STOQS_15615_PO4.csv', 'STOQS_15615_SIO4.csv',
]
###################################################################################################################
# Execute the load
cl.process_command_line()
if cl.args.test:
cl.loadL_662(stride=100)
##cl.load_NPS29(stride=10) ## not in this campaign
##cl.load_slocum_294(stride=10) ## waiting for STOQS enhancement to load slocum_294
##cl.load_slocum_260(stride=10) ## waiting for STOQS enhancement to load slocum_294
##cl.load_wg_tex(stride=10) ## not in this campaign
cl.load_wg_Tiny(stride=10)
##cl.load_wg_oa(stride=10) ## waiting for data to be formated for loading
cl.loadDorado(stride=100)
cl.loadRCuctd(stride=10)
cl.loadRCpctd(stride=10)
##cl.loadJMuctd(stride=10) ## waiting for data to be formated for loading
cl.loadJMpctd(stride=10)
##cl.loadWFuctd(stride=10) ## not in this campaign
##cl.loadWFpctd(stride=10) ## not in this campaign
cl.loadM1(stride=10)
##cl.loadBruceMoor(stride=10) ## waiting for data to be formated for loading
##cl.loadMackMoor(stride=10) ## waiting for data to be formated for loading
cl.loadSubSamples() ## need to populate local directory /loaders/CANON/BOG_Data/CANON_May2015/ with sample files
elif cl.args.optimal_stride:
cl.loadL_662(stride=2)
##cl.load_NPS29(stride=2) ## not in this campaign
##cl.load_slocum_294(stride=2) ## waiting for STOQS enhancement to load slocum_294
##cl.load_slocum_260(stride=2) ## waiting for STOQS enhancement to load slocum_294
##cl.load_wg_tex(stride=2) ## not in this campaign
cl.load_wg_Tiny(stride=2)
##cl.load_wg_oa(stride=2) ## waiting for data to be formated for loading
cl.loadDorado(stride=2)
cl.loadRCuctd(stride=2)
cl.loadRCpctd(stride=2)
##cl.loadJMuctd(stride=2) ## waiting for data to be formated for loading
cl.loadJMpctd(stride=2)
##cl.loadWFuctd(stride=2) ## not in this campaign
##cl.loadWFpctd(stride=2) ## not in this campaign
cl.loadM1(stride=1)
##cl.loadBruceMoor(stride=2) ## waiting for data to be formated for loading
##cl.loadMackMoor(stride=2) ## waiting for data to be formated for loading
cl.loadSubSamples() ## need to populate local directory /loaders/CANON/BOG_Data/CANON_May2015/ with sample files
else:
cl.stride = cl.args.stride
cl.loadL_662()
##cl.load_NPS29() ## not in this campaign
##cl.load_slocum_294() ## waiting for STOQS enhancement to load slocum_294
##cl.load_slocum_260() ## waiting for STOQS enhancement to load slocum_294
##cl.load_wg_tex() ## not in this campaign
cl.load_wg_Tiny()
##cl.load_wg_oa() ## waiting for data to be formated for loading
cl.loadDorado()
cl.loadRCuctd()
cl.loadRCpctd()
##cl.loadJMuctd() ## waiting for data to be formated for loading
cl.loadJMpctd()
##cl.loadWFuctd() ## not in this campaign
##cl.loadWFpctd() ## not in this campaign
cl.loadM1()
cl.load_oa1()
cl.load_oa2()
##cl.loadBruceMoor() ## waiting for data to be formated for loading
##cl.loadMackMoor() ## waiting for data to be formated for loading
cl.loadSubSamples() ## need to populate local directory /loaders/CANON/BOG_Data/CANON_May2015/ with sample files
# Add any X3D Terrain information specified in the constructor to the database - must be done after a load is executed
cl.addTerrainResources()
print("All Done.")
|
stoqs/stoqs
|
stoqs/loaders/CANON/loadCANON_may2015.py
|
Python
|
gpl-3.0
| 17,046
|
[
"NetCDF"
] |
6ae054fea7bb2d367b7cc9a16135316a171aa0145bb050341f32c519d6ca0a1c
|
import dbus as _dbus
from razer.client.devices import RazerDevice as __RazerDevice, BaseDeviceFactory as __BaseDeviceFactory
from razer.client.devices.firefly import RazerFirefly as __RazerFirefly
from razer.client.devices.keyboard import RazerKeyboardFactory as __RazerKeyboardFactory
from razer.client.devices.mice import RazerMouse as __RazerMouse
DEVICE_MAP ={
'firefly': __RazerFirefly,
'keyboard': __RazerKeyboardFactory,
'mouse': __RazerMouse,
'tartarus': __RazerKeyboardFactory,
'default': __RazerDevice
}
class RazerDeviceFactory(__BaseDeviceFactory):
"""
Simple factory to return an object for a given device
"""
@staticmethod
def get_device(serial, vid_pid=None, daemon_dbus=None):
"""
Factory for turning a serial into a class
Device factory, will return a class fit for the device in question. The DEVICE_MAP mapping above
can contain a device_type => DeviceClass or DeviceFactory, this allows us to specify raw device classes
if there is only one model (like Firefly) or a factory for the keyboards (so we can differentiate between
old blackwidows and chromas). If the device is not in the device mapping then the factory will default
to a raw RazerDevice.
:param serial: Device serial
:type serial: str
:param vid_pid: Device VID, PID
:type vid_pid: list of int
:param daemon_dbus: Daemon DBus object
:type daemon_dbus: object or None
:return: RazerDevice object (or subclass)
:rtype: RazerDevice
"""
if daemon_dbus is None:
session_bus = _dbus.SessionBus()
daemon_dbus = session_bus.get_object("org.razer", "/org/razer/device/{0}".format(serial))
device_dbus = _dbus.Interface(daemon_dbus, "razer.device.misc")
device_type = device_dbus.getDeviceType()
device_vid_pid = device_dbus.getVidPid()
if device_type in DEVICE_MAP:
# Have device mapping
device_class = DEVICE_MAP[device_type]
if hasattr(device_class, 'get_device'):
# DeviceFactory
device = device_class.get_device(serial, vid_pid=device_vid_pid, daemon_dbus=daemon_dbus)
else:
# DeviceClass
device = device_class(serial, vid_pid=device_vid_pid, daemon_dbus=daemon_dbus)
else:
# No mapping, default to RazerDevice
device = DEVICE_MAP['default'](serial, vid_pid=device_vid_pid, daemon_dbus=daemon_dbus)
return device
|
z3ntu/razer-drivers
|
pylib/razer/client/device.py
|
Python
|
gpl-2.0
| 2,599
|
[
"Firefly"
] |
c614b52b22f1741b37ecad15270bf12bf867df3737e56a2f1285963da97d371c
|
# ----------------------------------------------------------------------------
# Copyright (c) 2013--, scikit-bio development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function
def _pprint_strs(strs, max_chars=80, delimiter=', ', suffix='...',):
"""Pretty-print an iterable of strings, truncating if necessary."""
# Adapted from http://stackoverflow.com/a/250373
joined_str = delimiter.join(repr(s) for s in strs)
if len(joined_str) > max_chars:
truncated = joined_str[:max_chars + 1].split(delimiter)[0:-1]
joined_str = delimiter.join(truncated)
if joined_str:
joined_str += delimiter
joined_str += suffix
return joined_str
|
demis001/scikit-bio
|
skbio/stats/_misc.py
|
Python
|
bsd-3-clause
| 935
|
[
"scikit-bio"
] |
923af26a0ccb685eeaf9b684057f8e57194da1e129237a835020ff4d4af68bc2
|
# -*- coding: iso-8859-15 -*-
# ISO-3166 alpha country codes - ver 1
ISO3166CountryCodesAlpha= { "ABW":"Aruba",\
"AFG":"Afghanistan",\
"AGO":"Angola",\
"AIA":"Anguilla",\
"ALA":"Åland Islands",\
"ALB":"Albania",\
"AND":"Andorra",\
"ANT":"Netherlands Antilles",\
"ARE":"United Arab Emirates",\
"ARG":"Argentina",\
"ARM":"Armenia",\
"ASM":"American Samoa",\
"ATA":"Antarctica",\
"ATF":"French Southern Territories",\
"ATG":"Antigua and Barbuda",\
"AUS":"Australia",\
"AUT":"Austria",\
"AZE":"Azerbaijan",\
"BDI":"Burundi",\
"BDR":"Bundesdruckerei",\
"BEL":"Belgium",\
"BEN":"Benin",\
"BFA":"Burkina Faso",\
"BGD":"Bangladesh",\
"BGR":"Bulgaria",\
"BHR":"Bahrain",\
"BHS":"Bahamas",\
"BIH":"Bosnia and Herzegovina",\
"BLR":"Belarus",\
"BLZ":"Belize",\
"BMU":"Bermuda",\
"BOL":"Bolivia",\
"BRA":"Brazil",\
"BRB":"Barbados",\
"BRN":"Brunei Darussalam",\
"BTN":"Bhutan",\
"BVT":"Bouvet Island",\
"BWA":"Botswana",\
"CAF":"Central African Republic",\
"CAN":"Canada",\
"CCK":"Cocos (Keeling) Islands",\
"CHE":"Switzerland",\
"CHL":"Chile",\
"CHN":"China",\
"CIV":"Côte d'Ivoire",\
"CMR":"Cameroon",\
"COD":"Congo, the Democratic Republic of the",\
"COG":"Congo",\
"COK":"Cook Islands",\
"COL":"Colombia",\
"COM":"Comoros",\
"CPV":"Cape Verde",\
"CRI":"Costa Rica",\
"CUB":"Cuba",\
"CXR":"Christmas Island",\
"CYM":"Cayman Islands",\
"CYP":"Cyprus",\
"CZE":"Czech Republic",\
"DEU":"Germany",\
# for brain-damaged german passports
"D":"Germany",\
"DJI":"Djibouti",\
"DMA":"Dominica",\
"DNK":"Denmark",\
"DOM":"Dominican Republic",\
"DZA":"Algeria",\
"ECU":"Ecuador",\
"EGY":"Egypt",\
"ERI":"Eritrea",\
"ESH":"Western Sahara",\
"ESP":"Spain",\
"EST":"Estonia",\
"ETH":"Ethiopia",\
"FIN":"Finland",\
"FJI":"Fiji",\
"FLK":"Falkland Islands (Malvinas)",\
"FRA":"France",\
"FRO":"Faroe Islands",\
"FSM":"Micronesia, Federated States of",\
"GAB":"Gabon",\
"GBR":"United Kingdom",\
"GEO":"Georgia",\
"GGY":"Guernsey",\
"GHA":"Ghana",\
"GIB":"Gibraltar",\
"GIN":"Guinea",\
"GLP":"Guadeloupe",\
"GMB":"Gambia",\
"GNB":"Guinea-Bissau",\
"GNQ":"Equatorial Guinea",\
"GRC":"Greece",\
"GRD":"Grenada",\
"GRL":"Greenland",\
"GTM":"Guatemala",\
"GUF":"French Guiana",\
"GUM":"Guam",\
"GUY":"Guyana",\
"HKG":"Hong Kong",\
"HMD":"Heard Island and McDonald Islands",\
"HND":"Honduras",\
"HRV":"Croatia",\
"HTI":"Haiti",\
"HUN":"Hungary",\
"IDN":"Indonesia",\
"IMN":"Isle of Man",\
"IND":"India",\
"IOT":"British Indian Ocean Territory",\
"IRL":"Ireland",\
"IRN":"Iran, Islamic Republic of",\
"IRQ":"Iraq",\
"ISL":"Iceland",\
"ISR":"Israel",\
"ITA":"Italy",\
"JAM":"Jamaica",\
"JEY":"Jersey",\
"JOR":"Jordan",\
"JPN":"Japan",\
"KAZ":"Kazakhstan",\
"KEN":"Kenya",\
"KGZ":"Kyrgyzstan",\
"KHM":"Cambodia",\
"KIR":"Kiribati",\
"KNA":"Saint Kitts and Nevis",\
"KOR":"Korea, Republic of",\
"KWT":"Kuwait",\
"LAO":"Lao People's Democratic Republic",\
"LBN":"Lebanon",\
"LBR":"Liberia",\
"LBY":"Libyan Arab Jamahiriya",\
"LCA":"Saint Lucia",\
"LIE":"Liechtenstein",\
"LKA":"Sri Lanka",\
"LSO":"Lesotho",\
"LTU":"Lithuania",\
"LUX":"Luxembourg",\
"LVA":"Latvia",\
"MAC":"Macao",\
"MAR":"Morocco",\
"MCO":"Monaco",\
"MDA":"Moldova, Republic of",\
"MDG":"Madagascar",\
"MDV":"Maldives",\
"MEX":"Mexico",\
"MHL":"Marshall Islands",\
"MKD":"Macedonia, the former Yugoslav Republic of",\
"MLI":"Mali",\
"MLT":"Malta",\
"MMR":"Myanmar",\
"MNE":"Montenegro",\
"MNG":"Mongolia",\
"MNP":"Northern Mariana Islands",\
"MOZ":"Mozambique",\
"MRT":"Mauritania",\
"MSR":"Montserrat",\
"MTQ":"Martinique",\
"MUS":"Mauritius",\
"MWI":"Malawi",\
"MYS":"Malaysia",\
"MYT":"Mayotte",\
"NAM":"Namibia",\
"NCL":"New Caledonia",\
"NER":"Niger",\
"NFK":"Norfolk Island",\
"NGA":"Nigeria",\
"NIC":"Nicaragua",\
"NIU":"Niue",\
"NLD":"Netherlands",\
"NOR":"Norway",\
"NPL":"Nepal",\
"NRU":"Nauru",\
"NZL":"New Zealand",\
"OMN":"Oman",\
"PAK":"Pakistan",\
"PAN":"Panama",\
"PCN":"Pitcairn",\
"PER":"Peru",\
"PHL":"Philippines",\
"PLW":"Palau",\
"PNG":"Papua New Guinea",\
"POL":"Poland",\
"PRI":"Puerto Rico",\
"PRK":"Korea, Democratic People's Republic of",\
"PRT":"Portugal",\
"PRY":"Paraguay",\
"PSE":"Palestinian Territory, Occupied",\
"PYF":"French Polynesia",\
"QAT":"Qatar",\
"REU":"Réunion",\
"ROU":"Romania",\
"RUS":"Russian Federation",\
"RWA":"Rwanda",\
"SAU":"Saudi Arabia",\
"SDN":"Sudan",\
"SEN":"Senegal",\
"SGP":"Singapore",\
"SGS":"South Georgia and the South Sandwich Islands",\
"SHN":"Saint Helena",\
"SJM":"Svalbard and Jan Mayen",\
"SLB":"Solomon Islands",\
"SLE":"Sierra Leone",\
"SLV":"El Salvador",\
"SMR":"San Marino",\
"SOM":"Somalia",\
"SPM":"Saint Pierre and Miquelon",\
"SRB":"Serbia",\
"STP":"Sao Tome and Principe",\
"SUR":"Suriname",\
"SVK":"Slovakia",\
"SVN":"Slovenia",\
"SWE":"Sweden",\
"SWZ":"Swaziland",\
"SYC":"Seychelles",\
"SYR":"Syrian Arab Republic",\
"TCA":"Turks and Caicos Islands",\
"TCD":"Chad",\
"TGO":"Togo",\
"THA":"Thailand",\
"TJK":"Tajikistan",\
"TKL":"Tokelau",\
"TKM":"Turkmenistan",\
"TLS":"Timor-Leste",\
"TON":"Tonga",\
"TTO":"Trinidad and Tobago",\
"TUN":"Tunisia",\
"TUR":"Turkey",\
"TUV":"Tuvalu",\
"TWN":"Taiwan, Province of China",\
"TZA":"Tanzania, United Republic of",\
"UGA":"Uganda",\
"UKR":"Ukraine",\
"UMI":"United States Minor Outlying Islands",\
"URY":"Uruguay",\
"USA":"United States",\
"UTO":"Utopia",\
"UZB":"Uzbekistan",\
"VAT":"Holy See (Vatican City State)",\
"VCT":"Saint Vincent and the Grenadines",\
"VEN":"Venezuela",\
"VGB":"Virgin Islands, British",\
"VIR":"Virgin Islands, U.S.",\
"VNM":"Viet Nam",\
"VUT":"Vanuatu",\
"WLF":"Wallis and Futuna",\
"WSM":"Samoa",\
"YEM":"Yemen",\
"ZAF":"South Africa",\
"ZMB":"Zambia",\
"ZWE":"Zimbabwe",\
"UNO":"United Nations Organization",\
"UNA":"United Nations specialized agency official",\
"XXA":"Stateless",\
"XXB":"Refugee",\
"XXC":"Refugee (non-convention)",\
"XXX":"Unspecified / Unknown",\
}
# combined ISO-3166 country and icar.org manufacturer codes
ISO3166CountryCodes= {'004':'Afghanistan',\
'248':'Åland Islands',\
'008':'Albania',\
'012':'Algeria',\
'016':'American Samoa',\
'020':'Andorra',\
'024':'Angola',\
'660':'Anguilla',\
'010':'Antarctica',\
'028':'Antigua and Barbuda',\
'032':'Argentina',\
'051':'Armenia',\
'533':'Aruba',\
'036':'Australia',\
'040':'Austria',\
'031':'Azerbaijan',\
'044':'Bahamas',\
'048':'Bahrain',\
'050':'Bangladesh',\
'052':'Barbados',\
'112':'Belarus',\
'056':'Belgium',\
'084':'Belize',\
'204':'Benin',\
'060':'Bermuda',\
'064':'Bhutan',\
'068':'Bolivia',\
'070':'Bosnia and Herzegovina',\
'072':'Botswana',\
'074':'Bouvet Island',\
'076':'Brazil',\
'086':'British Indian Ocean Territory',\
'096':'Brunei Darussalam',\
'100':'Bulgaria',\
'854':'Burkina Faso',\
'108':'Burundi',\
'116':'Cambodia',\
'120':'Cameroon',\
'124':'Canada',\
'132':'Cape Verde',\
'136':'Cayman Islands',\
'140':'Central African Republic',\
'148':'Chad',\
'152':'Chile',\
'156':'China',\
'162':'Christmas Island',\
'166':'Cocos (Keeling) Islands',\
'170':'Colombia',\
'174':'Comoros',\
'178':'Congo',\
'180':'Congo, the Democratic Republic of the',\
'184':'Cook Islands',\
'188':'Costa Rica',\
'384':'Côte d\'Ivoire',\
'191':'Croatia',\
'192':'Cuba',\
'196':'Cyprus',\
'203':'Czech Republic',\
'208':'Denmark',\
'262':'Djibouti',\
'212':'Dominica',\
'214':'Dominican Republic',\
'218':'Ecuador',\
'818':'Egypt',\
'222':'El Salvador',\
'226':'Equatorial Guinea',\
'232':'Eritrea',\
'233':'Estonia',\
'231':'Ethiopia',\
'238':'Falkland Islands (Malvinas)',\
'234':'Faroe Islands',\
'242':'Fiji',\
'246':'Finland',\
'250':'France',\
'254':'French Guiana',\
'258':'French Polynesia',\
'260':'French Southern Territories',\
'266':'Gabon',\
'270':'Gambia',\
'268':'Georgia',\
'276':'Germany',\
'288':'Ghana',\
'292':'Gibraltar',\
'300':'Greece',\
'304':'Greenland',\
'308':'Grenada',\
'312':'Guadeloupe',\
'316':'Guam',\
'320':'Guatemala',\
'831':'Guernsey',\
'324':'Guinea',\
'624':'Guinea-Bissau',\
'328':'Guyana',\
'332':'Haiti',\
'334':'Heard Island and McDonald Islands',\
'336':'Holy See (Vatican City State)',\
'340':'Honduras',\
'344':'Hong Kong',\
'348':'Hungary',\
'352':'Iceland',\
'356':'India',\
'360':'Indonesia',\
'364':'Iran, Islamic Republic of',\
'368':'Iraq',\
'372':'Ireland',\
'833':'Isle of Man',\
'376':'Israel',\
'380':'Italy',\
'388':'Jamaica',\
'392':'Japan',\
'832':'Jersey',\
'400':'Jordan',\
'398':'Kazakhstan',\
'404':'Kenya',\
'296':'Kiribati',\
'408':'Korea, Democratic People\'s Republic of',\
'410':'Korea, Republic of',\
'414':'Kuwait',\
'417':'Kyrgyzstan',\
'418':'Lao People\'s Democratic Republic',\
'428':'Latvia',\
'422':'Lebanon',\
'426':'Lesotho',\
'430':'Liberia',\
'434':'Libyan Arab Jamahiriya',\
'438':'Liechtenstein',\
'440':'Lithuania',\
'442':'Luxembourg',\
'446':'Macao',\
'807':'Macedonia, the former Yugoslav Republic of',\
'450':'Madagascar',\
'454':'Malawi',\
'458':'Malaysia',\
'462':'Maldives',\
'466':'Mali',\
'470':'Malta',\
'584':'Marshall Islands',\
'474':'Martinique',\
'478':'Mauritania',\
'480':'Mauritius',\
'175':'Mayotte',\
'484':'Mexico',\
'583':'Micronesia, Federated States of',\
'498':'Moldova, Republic of',\
'492':'Monaco',\
'496':'Mongolia',\
'499':'Montenegro',\
'500':'Montserrat',\
'504':'Morocco',\
'508':'Mozambique',\
'104':'Myanmar',\
'516':'Namibia',\
'520':'Nauru',\
'524':'Nepal',\
'528':'Netherlands',\
'530':'Netherlands Antilles',\
'540':'New Caledonia',\
'554':'New Zealand',\
'558':'Nicaragua',\
'562':'Niger',\
'566':'Nigeria',\
'570':'Niue',\
'574':'Norfolk Island',\
'580':'Northern Mariana Islands',\
'578':'Norway',\
'512':'Oman',\
'586':'Pakistan',\
'585':'Palau',\
'275':'Palestinian Territory, Occupied',\
'591':'Panama',\
'598':'Papua New Guinea',\
'600':'Paraguay',\
'604':'Peru',\
'608':'Philippines',\
'612':'Pitcairn',\
'616':'Poland',\
'620':'Portugal',\
'630':'Puerto Rico',\
'634':'Qatar',\
'638':'Réunion',\
'642':'Romania',\
'643':'Russian Federation',\
'646':'Rwanda',\
'654':'Saint Helena',\
'659':'Saint Kitts and Nevis',\
'662':'Saint Lucia',\
'666':'Saint Pierre and Miquelon',\
'670':'Saint Vincent and the Grenadines',\
'882':'Samoa',\
'674':'San Marino',\
'678':'Sao Tome and Principe',\
'682':'Saudi Arabia',\
'686':'Senegal',\
'688':'Serbia',\
'690':'Seychelles',\
'694':'Sierra Leone',\
'702':'Singapore',\
'703':'Slovakia',\
'705':'Slovenia',\
'090':'Solomon Islands',\
'706':'Somalia Somalia',\
'710':'South Africa',\
'239':'South Georgia and the South Sandwich Islands',\
'724':'Spain',\
'144':'Sri Lanka',\
'736':'Sudan',\
'740':'Suriname',\
'744':'Svalbard and Jan Mayen',\
'748':'Swaziland',\
'752':'Sweden',\
'756':'Switzerland',\
'760':'Syrian Arab Republic',\
'158':'Taiwan, Province of China',\
'762':'Tajikistan',\
'834':'Tanzania, United Republic of',\
'764':'Thailand',\
'626':'Timor-Leste',\
'768':'Togo',\
'772':'Tokelau',\
'776':'Tonga',\
'780':'Trinidad and Tobago',\
'788':'Tunisia',\
'792':'Turkey',\
'795':'Turkmenistan',\
'796':'Turks and Caicos Islands',\
'798':'Tuvalu',\
'800':'Uganda',\
'804':'Ukraine',\
'784':'United Arab Emirates',\
'826':'United Kingdom',\
'840':'United States',\
'581':'United States Minor Outlying Islands',\
'858':'Uruguay',\
'860':'Uzbekistan',\
'548':'Vanuatu',\
'862':'Venezuela',\
'704':'Viet Nam',\
'092':'Virgin Islands, British',\
'850':'Virgin Islands, U.S.',\
'876':'Wallis and Futuna',\
'732':'Western Sahara',\
'887':'Yemen',\
'894':'Zambia',\
'716':'Zimbabwe',\
'985':'MANUF: Destron Fearing / Digital Angel Corporation',\
'984':'MANUF: Nedap',\
'983':'MANUF: Texas Instruments',\
'982':'MANUF: Allflex',\
'981':'MANUF: Datamars',\
'980':'MANUF: AGRIDENT BV',\
'979':'MANUF: Earlsmere I.D.',\
'978':'MANUF: IER SA',\
'977':'MANUF: Avid',\
'976':'MANUF: Gemplus',\
'975':'MANUF: Sokymat',\
'974':'MANUF: Impro',\
'973':'MANUF: Fujihira',\
'972':'MANUF: Planet ID',\
'971':'MANUF: Alfa Laval Agri',\
'970':'MANUF: Amphenol',\
'969':'MANUF: Caisley',\
'968':'MANUF: AEG',\
'967':'MANUF: Rfdynamics',\
'966':'MANUF: PetCode',\
'965':'MANUF: 4D Technology Co. Ltd.',\
'964':'MANUF: Rumitag S.L.',\
'963':'MANUF: Korth Eletro Mecanica LTDA',\
'962':'MANUF: DigiTag A/S',\
'961':'MANUF: Mannings I.A.I.D.',\
'960':'MANUF: Chevillot',\
'959':'MANUF: Global ID Technologies',\
'958':'MANUF: Pet ID',\
'957':'MANUF: Innoceramics',\
'956':'MANUF: Trovan Ltd.',\
'955':'MANUF: Reseaumatique',\
'954':'MANUF: Ryeflex',\
'953':'MANUF: Cromasa',\
'952':'MANUF: JECTA',\
'951':'MANUF: Leader Products Pty Ltd',\
'950':'MANUF: SPLICE do Brasil Telecomunicacoes e Eletronica S.A.',\
'949':'MANUF: Y-Tex Corporation',\
'948':'MANUF: H. Hauptner und Richard Herberholz GmbH & Co. KG',\
'947':'MANUF: BELCAM. ID',\
'946':'MANUF: Advanced Ceramics Limited',\
'945':'MANUF: Business Inception Identification B.V.',\
'944':'MANUF: Net & Telligent SA',\
'943':'MANUF: E-Mark Technologie & Development',\
'942':'MANUF: Zee Tags',\
'941':'MANUF: Felixcan S.L.',\
'940':'MANUF: Shearwell Data Ltd.',\
'939':'MANUF: RealTrace',\
'938':'MANUF: INSVET',\
'937':'MANUF: ID & Traceback Systems AS',\
'936':'MANUF: CROVET, S.L.',\
'935':'MANUF: VeriLogik, Inc.',\
'900':'MANUF: Shared (see http://www.icar.org/manufacturer_codes.htm)',\
'1022':'UNREGISTERED MANUF: VeriChip Corporation'}
|
kaosbeat/datakamp
|
RFIDIOt-master/rfidiot/iso3166.py
|
Python
|
mit
| 16,392
|
[
"BWA"
] |
8364efd8fea1dfb6ce361618eec50718d2809cb42e52b2d61f8e8810abc09ddc
|
#!/usr/bin/env python
#David Hoover, based on gatk by Dan Blankenberg
"""
A wrapper script for running the GenomeAnalysisTK.jar commands.
"""
import sys, optparse, os, tempfile, subprocess, shutil
from binascii import unhexlify
GALAXY_EXT_TO_GATK_EXT = { 'gatk_interval':'intervals', 'bam_index':'bam.bai', 'gatk_dbsnp':'dbSNP', 'picard_interval_list':'interval_list' } #items not listed here will use the galaxy extension as-is
GALAXY_EXT_TO_GATK_FILE_TYPE = GALAXY_EXT_TO_GATK_EXT #for now, these are the same, but could be different if needed
DEFAULT_GATK_PREFIX = "gatk_file"
CHUNK_SIZE = 2**20 #1mb
def cleanup_before_exit( tmp_dir ):
if tmp_dir and os.path.exists( tmp_dir ):
shutil.rmtree( tmp_dir )
def gatk_filename_from_galaxy( galaxy_filename, galaxy_ext, target_dir = None, prefix = None ):
suffix = GALAXY_EXT_TO_GATK_EXT.get( galaxy_ext, galaxy_ext )
if prefix is None:
prefix = DEFAULT_GATK_PREFIX
if target_dir is None:
target_dir = os.getcwd()
gatk_filename = os.path.join( target_dir, "%s.%s" % ( prefix, suffix ) )
os.symlink( galaxy_filename, gatk_filename )
return gatk_filename
def gatk_filetype_argument_substitution( argument, galaxy_ext ):
return argument % dict( file_type = GALAXY_EXT_TO_GATK_FILE_TYPE.get( galaxy_ext, galaxy_ext ) )
def open_file_from_option( filename, mode = 'rb' ):
if filename:
return open( filename, mode = mode )
return None
def html_report_from_directory( html_out, dir ):
html_out.write( '<html>\n<head>\n<title>Galaxy - GATK Output</title>\n</head>\n<body>\n<p/>\n<ul>\n' )
for fname in sorted( os.listdir( dir ) ):
html_out.write( '<li><a href="%s">%s</a></li>\n' % ( fname, fname ) )
html_out.write( '</ul>\n</body>\n</html>\n' )
def index_bam_files( bam_filenames ):
for bam_filename in bam_filenames:
bam_index_filename = "%s.bai" % bam_filename
if not os.path.exists( bam_index_filename ):
#need to index this bam file
stderr_name = tempfile.NamedTemporaryFile( prefix = "bam_index_stderr" ).name
command = 'samtools index %s %s' % ( bam_filename, bam_index_filename )
try:
subprocess.check_call( args=command, shell=True, stderr=open( stderr_name, 'wb' ) )
except:
for line in open( stderr_name ):
print >> sys.stderr, line
raise Exception( "Error indexing BAM file" )
finally:
os.unlink( stderr_name )
def __main__():
#Parse Command Line
parser = optparse.OptionParser()
parser.add_option( '-p', '--pass_through', dest='pass_through_options', action='append', type="string", help='These options are passed through directly to GATK, without any modification.' )
parser.add_option( '-o', '--pass_through_options', dest='pass_through_options_encoded', action='append', type="string", help='These options are passed through directly to GATK, with decoding from binascii.unhexlify.' )
parser.add_option( '-d', '--dataset', dest='datasets', action='append', type="string", nargs=4, help='"-argument" "original_filename" "galaxy_filetype" "name_prefix"' )
parser.add_option( '', '--max_jvm_heap', dest='max_jvm_heap', action='store', type="string", default=None, help='If specified, the maximum java virtual machine heap size will be set to the provide value.' )
parser.add_option( '', '--max_jvm_heap_fraction', dest='max_jvm_heap_fraction', action='store', type="int", default=None, help='If specified, the maximum java virtual machine heap size will be set to the provide value as a fraction of total physical memory.' )
parser.add_option( '', '--stdout', dest='stdout', action='store', type="string", default=None, help='If specified, the output of stdout will be written to this file.' )
parser.add_option( '', '--stderr', dest='stderr', action='store', type="string", default=None, help='If specified, the output of stderr will be written to this file.' )
parser.add_option( '', '--html_report_from_directory', dest='html_report_from_directory', action='append', type="string", nargs=2, help='"Target HTML File" "Directory"')
parser.add_option( '-e', '--phone_home', dest='phone_home', action='store', type="string", default='STANDARD', help='What kind of GATK run report should we generate(NO_ET|STANDARD|STDOUT)' )
parser.add_option( '-K', '--gatk_key', dest='gatk_key', action='store', type="string", default=None, help='What kind of GATK run report should we generate(NO_ET|STANDARD|STDOUT)' )
(options, args) = parser.parse_args()
if options.pass_through_options:
cmd = ' '.join( options.pass_through_options )
else:
cmd = ''
if options.pass_through_options_encoded:
cmd = '%s %s' % ( cmd, ' '.join( map( unhexlify, options.pass_through_options_encoded ) ) )
if options.max_jvm_heap is not None:
cmd = cmd.replace( 'java ', 'java -Xmx%s ' % ( options.max_jvm_heap ), 1 )
elif options.max_jvm_heap_fraction is not None:
cmd = cmd.replace( 'java ', 'java -XX:DefaultMaxRAMFraction=%s -XX:+UseParallelGC ' % ( options.max_jvm_heap_fraction ), 1 )
bam_filenames = []
tmp_dir = tempfile.mkdtemp( prefix='tmp-gatk-' )
try:
if options.datasets:
for ( dataset_arg, filename, galaxy_ext, prefix ) in options.datasets:
gatk_filename = gatk_filename_from_galaxy( filename, galaxy_ext, target_dir = tmp_dir, prefix = prefix )
if dataset_arg:
cmd = '%s %s "%s"' % ( cmd, gatk_filetype_argument_substitution( dataset_arg, galaxy_ext ), gatk_filename )
if galaxy_ext == "bam":
bam_filenames.append( gatk_filename )
if galaxy_ext == 'fasta':
subprocess.check_call( 'samtools faidx "%s"' % gatk_filename, shell=True )
subprocess.check_call( 'java -jar %s R=%s O=%s QUIET=true' % ( os.path.join(os.environ['JAVA_JAR_PATH'], 'CreateSequenceDictionary.jar'), gatk_filename, os.path.splitext(gatk_filename)[0] + '.dict' ), shell=True )
index_bam_files( bam_filenames )
#set up stdout and stderr output options
stdout = open_file_from_option( options.stdout, mode = 'wb' )
stderr = open_file_from_option( options.stderr, mode = 'wb' )
#if no stderr file is specified, we'll use our own
if stderr is None:
stderr = tempfile.NamedTemporaryFile( prefix="gatk-stderr-", dir=tmp_dir )
proc = subprocess.Popen( args=cmd, stdout=stdout, stderr=stderr, shell=True, cwd=tmp_dir )
return_code = proc.wait()
if return_code:
stderr_target = sys.stderr
else:
stderr_target = sys.stdout
stderr.flush()
stderr.seek(0)
while True:
chunk = stderr.read( CHUNK_SIZE )
if chunk:
stderr_target.write( chunk )
else:
break
stderr.close()
finally:
cleanup_before_exit( tmp_dir )
#generate html reports
if options.html_report_from_directory:
for ( html_filename, html_dir ) in options.html_report_from_directory:
html_report_from_directory( open( html_filename, 'wb' ), html_dir )
if __name__ == "__main__":
__main__()
|
mr-c/tools-iuc
|
tools/gatk2/gatk2_wrapper.py
|
Python
|
mit
| 7,377
|
[
"Galaxy"
] |
80d509564f52bb0bd464c36c615e525a9cdbcb92062ea47a32c6134aa7e9fa10
|
"""
Base class for factories for adding objects to the pipeline.
"""
# Author: Gael Varoquaux <[email protected]>
# Copyright (c) 2007, Enthought, Inc.
# License: BSD Style.
import warnings
from auto_doc import make_doc
from traits.api import HasPrivateTraits, Str, TraitError,\
Instance, Any, Bool
from mayavi.core.filter import Filter
from mayavi.core.engine import Engine
from mayavi.core.source import Source
from mayavi.core.scene import Scene
from mayavi.core.module_manager import ModuleManager
from tvtk.api import tvtk
import tools
from engine_manager import get_engine
def get_obj(obj, components):
""" Get the target object for the specified components. """
for component in components:
obj = getattr(obj, component)
return obj
def make_function(factory_class):
def the_function(*args, **kwargs):
factory = factory_class(*args, **kwargs)
return factory._target
the_function.__doc__ = make_doc(factory_class)
the_function.func_name = factory_class.__name__.lower()
return the_function
def get_module_manager(obj):
""" Returns the module manager that would be used when a module
is added on the given object, if any, and None elsewhere.
"""
if hasattr(obj, 'module_manager'):
return obj.module_manager
elif isinstance(obj, ModuleManager):
return obj
for child in reversed(obj.children):
if isinstance(child, ModuleManager):
return child
else:
return None
##############################################################################
class PipeFactory(HasPrivateTraits):
""" Base class for all factories adding pipes on the pipeline """
name = Str(adapts='name', help='the name of the vtk object created.')
figure = Instance(Scene)
_engine = Instance(Engine, help=('the figure on which the object '
'should be added'))
_target = Any
_do_redraw = Bool
def add_module(self, parent, kwargs=dict()):
""" Add the target module to the given object.
"""
# We check to see if the module-manager-related option require to
# add a new module manager:
if parent is not None:
module_manager = get_module_manager(parent)
if (module_manager is not None and
len(module_manager.children) > 0):
scalar_lut = module_manager.scalar_lut_manager
vector_lut = module_manager.vector_lut_manager
if 'vmin' in kwargs:
if not scalar_lut.use_default_range and \
kwargs['vmin'] != scalar_lut.data_range[0]:
parent = self._engine.add_module(ModuleManager(),
module_manager.parent)
elif not scalar_lut.use_default_range and \
kwargs['vmin'] != scalar_lut.data_range[0]:
parent = self._engine.add_module(ModuleManager(),
module_manager.parent)
elif 'vmax' in kwargs:
if not scalar_lut.use_default_range and \
kwargs['vmax'] != scalar_lut.data_range[1]:
parent = self._engine.add_module(ModuleManager(),
module_manager.parent)
elif not scalar_lut.use_default_range and \
kwargs['vmax'] != scalar_lut.data_range[1]:
parent = self._engine.add_module(ModuleManager(),
module_manager.parent)
elif 'colormap' in kwargs:
cmap = kwargs['colormap']
if (scalar_lut.lut_mode != cmap
or vector_lut.lut_mode != cmap):
parent = self._engine.add_module(ModuleManager(),
module_manager.parent)
self._engine.add_module(self._target, obj=parent)
def __init__(self, parent, **kwargs):
# We are not passing the traits to the parent class
super(PipeFactory, self).__init__()
# Try to find the right engine and scene to work with
ancester = parent
while hasattr(ancester, 'parent'):
ancester = getattr(ancester, 'parent')
if isinstance(ancester, Scene):
self._scene = ancester
self._engine = ancester.parent
break
else:
if self.figure is not None:
self._scene = self.figure
else:
self._scene = tools.gcf()
self._engine = get_engine()
scene = self._scene.scene
if self.figure is not None and self.figure is not self._scene:
warnings.warn('Trying to add a module on the wrong scene')
if isinstance(parent, (Source, tvtk.DataSet)) \
and not isinstance(parent, Filter) and scene is not None:
# Search the current scene to see if the source is already
# in it, if not add it.
if not parent in self._scene.children:
parent = tools.add_dataset(parent, figure=self._scene)
if scene is not None:
self._do_redraw = not scene.disable_render
scene.disable_render = True
if issubclass(self._target.__class__, Filter):
self._engine.add_filter(self._target, obj=parent)
else:
self.add_module(parent, kwargs)
# Inject the magical mlab source trait.
if hasattr(parent, 'mlab_source'):
ms = parent.mlab_source
self._target.add_trait('mlab_source', Instance(ms.__class__))
self._target.mlab_source = ms
traits = self.get(self.class_trait_names())
[traits.pop(key) for key in traits.keys()
if key[0] == '_' or key is None]
traits.update(kwargs)
# Now calling the traits setter, so that traits handlers are
# called
self.set(**traits)
if scene is not None:
scene.disable_render = not self._do_redraw
def set(self, trait_change_notify=True, **traits):
""" Same as HasTraits.set except that notification is forced,
unless trait_change_notify==False"""
HasPrivateTraits.set(self, trait_change_notify=trait_change_notify,
**traits)
if trait_change_notify == False:
return
for trait in traits.iterkeys():
callback = getattr(self, '_%s_changed' % trait)
value = getattr(self, trait)
try:
if callback is not None:
callback()
self._anytrait_changed(trait, value)
except TraitError:
if value is None:
# This means "default"
pass
else:
raise
def _anytrait_changed(self, name, value):
""" This is where we implement the adaptation code. """
trait = self.trait(name)
if name[0] == '_':
# Private attribute
return
# hasattr(traits, "adapts") always returns True :-<.
if not trait.adapts is None:
components = trait.adapts.split('.')
obj = get_obj(self._target, components[:-1])
setattr(obj, components[-1], value)
|
liulion/mayavi
|
mayavi/tools/pipe_base.py
|
Python
|
bsd-3-clause
| 7,617
|
[
"Mayavi",
"VTK"
] |
b9d2dfc4e878e937aed669a43b89e69e623790572e470e74086253423a434e1d
|
# coding:utf-8
"""
视频抓取脚本
"""
# coding:utf-8
"""
数据库配置
"""
from urllib import quote
from pymongo import MongoClient
DEBUG = False
# mongo
NEW_USER = "username"
NEW_PASSWORD = quote("password")
if DEBUG:
NEW_HOST_PORT = "ip:port"
else:
NEW_HOST_PORT = "ip:port"
NEW_DATABASE = "db_name"
NEW_MONGO_URL = "mongodb://{0}:{1}@{2}/{3}".format(NEW_USER, NEW_PASSWORD, NEW_HOST_PORT, NEW_DATABASE)
MONGO_URL = NEW_MONGO_URL
client = MongoClient(host=MONGO_URL, maxPoolSize=1, minPoolSize=1)
import random
from urlparse import urljoin
from urllib import unquote_plus
from datetime import datetime, timedelta
import json
import re
import logging
from collections import namedtuple
import requests
from w3lib.encoding import html_to_unicode
from bs4 import Tag, BeautifulSoup
from pymongo.errors import DuplicateKeyError
from bson.objectid import ObjectId
def find_tag(root, param):
if not isinstance(root, (Tag, BeautifulSoup)):
return None
method = param.get("method", "find")
params = param["params"]
nth = param.get("nth", 0)
if method == "find":
tag = root.find(**params)
return tag
elif method == "find_all":
tags = root.find_all(**params)
elif method == "select":
tags = root.select(**params)
else:
raise ValueError("param['method'] only support find, find_all and select")
return tags[nth] if len(tags) > nth else None
def find_tags(root, param):
if not isinstance(root, (Tag, BeautifulSoup)):
return []
method = param.get("method", "find_all")
params = param["params"]
if method == "find":
tag = root.find(**params)
if tag is None:
return []
else:
return [tag]
elif method == "find_all":
tags = root.find_all(**params)
elif method == "select":
tags = root.select(**params)
else:
raise ValueError("param['method'] only support find, find_all and select")
return tags
def extract_tag_attribute(root, name="text"):
if root is None:
return ""
assert isinstance(root, (Tag, BeautifulSoup))
if name == "text":
return root.get_text().strip()
else:
value = root.get(name, "")
if isinstance(value, (list, tuple)):
return ",".join(value)
else:
return value.strip()
def format_time(t=None):
f = "%Y-%m-%d %H:%M:%S"
if t is None:
return datetime.utcnow()
try:
result = datetime.strptime(t, f)
except Exception:
result = datetime.utcnow()
return result
class VideoBase(object):
"""
视频抓取基类
"""
headers = {
"user-agent": ("Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36"
" (KHTML, like Gecko) Chrome/50.0.2661.86 Safari/537.36")}
timeout = 30
r_json = False
PB_SITE = None
@classmethod
def download(cls, url, c_json=False, skip=None, headers=None):
if headers is None:
headers = cls.headers
response = requests.get(url, headers=headers,
timeout=(10, cls.timeout))
content = response.content
if skip:
content = content[skip[0]:skip[1]]
if c_json:
return json.loads(content)
else:
_, content = html_to_unicode(
content_type_header=response.headers.get("content-type"),
html_body_str=content
)
return content.encode("utf-8")
@classmethod
def build_video(cls, task):
video = Video()
video.pb_site = cls.PB_SITE
video.site = task.site_id
video.channel = task.channel_id
video.cname = task.cname
video.source_id = task.source_id
video.chid = task.chid
video.second_chid = task.second_chid
return video
@classmethod
def parse(cls, document, task):
raise NotImplementedError
@classmethod
def run(cls, task):
url = task.url
document = cls.download(url, c_json=cls.r_json)
videos = cls.parse(document, task)
logging.info("%s: %s" % (cls.__name__, len(videos)))
return videos
class Video(object):
"""
视频对象
"""
def __init__(self):
self.title = None
self.pb_time = None
self.pb_site = None
self.pb_url = None
self.author = None
self.avatar = None
self.insert = datetime.utcnow()
self.site = None
self.channel = None
self.cname = None
self.c_type = "mp4"
self.c_src = None
self.c_thumbnail = None
self.c_duration = 0
self.source_id = None
self.chid = None
self.second_chid = None
self.style = 6
self.rtype = 6
def show(self):
print "title: %s" % self.title
print "pb_time: %s" % self.pb_time
print "pb_site: %s" % self.pb_site
print "site: %s" % self.site
print "channel: %s" % self.channel
print "pb_url: %s" % self.pb_url
print "author: %s" % self.author
print "avatar: %s" % self.avatar
print "insert: %s" % self.insert
print "c_type: %s" % self.c_type
print "c_src: %s" % self.c_src
print "c_thumbnail: %s " % self.c_thumbnail
print "c_duration: % s " % self.c_duration
print "*" * 120
class StoreVideo(object):
"""
视频存储类,用于对爬取解析的数据进行多种形式的存储
"""
db = client.get_default_database()
collection = db.videos
UPLOAD_URL = "http://10.25.60.218:8081/api/store/video"
RELATE_URL = "http://10.25.60.218:8081/search/relate/video"
def upload_to_mongodb(self, video):
"""
上传至MONGO数据库,直接通过数据库驱动进行操作
:param video:
:return:
"""
document = dict()
document["title"] = video.title
document["pb_time"] = video.pb_time
document["insert"] = video.insert
document["pb_site"] = video.pb_site
document["cname"] = video.cname
document["pb_url"] = video.pb_url
document["author"] = video.author
document["avatar"] = video.avatar
document["site"] = str(video.site)
document["channel"] = str(video.channel)
document["content"] = dict()
document["content"]["type"] = video.c_type
document["content"]["src"] = video.c_src
document["content"]["thumbnail"] = video.c_thumbnail
if video.c_duration:
document["content"]["duration"] = int(video.c_duration)
else:
document["content"]["duration"] = 0
try:
result = self.collection.insert_one(document)
except DuplicateKeyError:
pass
except Exception as e:
logging.error(e.message, exc_info=True)
else:
logging.info("store video data id: %s" % result.inserted_id)
@classmethod
def clean_pg_data(cls, data):
"""清洗上传到pg的数据, inplace"""
if data["publish_site"] == VideoWeibo.PB_SITE:
data["title"] = cls.clean_weibo_title(data["title"])
@staticmethod
def clean_weibo_title(title):
"""清洗标题"""
title = title.strip("")
words = title.split("#")
return "".join([word for i, word in enumerate(words) if i % 2 == 0])
@classmethod
def upload_to_pg(cls, video):
"""上传至PG,通过统一Web存储API"""
assert isinstance(video, Video)
assert isinstance(video.pb_time, datetime)
assert isinstance(video.insert, datetime)
insert = video.insert + timedelta(hours=8)
data = {
"title": video.title,
"unique_id": video.pb_url,
"publish_url": video.pb_url,
"publish_site": video.author,
"publish_time": video.pb_time.isoformat()[:-7] + "Z",
"insert_time": insert.isoformat()[:-7] + "Z",
"author": video.author,
"author_icon": video.avatar,
"site_icon": video.avatar,
"channel_id": video.chid,
"second_channel_id": video.second_chid,
"source_id": video.source_id,
"online": True,
"video_url": video.c_src,
"video_thumbnail": video.c_thumbnail,
"video_duration": video.c_duration,
"play_times": 0,
}
# cls.clean_pg_data(data) # 清洗要上传到pg的数据
try:
r = requests.post(cls.UPLOAD_URL, json=data, timeout=(5, 10))
except Exception as e:
logging.warning(e.message)
else:
if r.status_code == 200:
logging.info(json.dumps(r.json()))
# cls.store_relate_videos(r.json()["id"])
else:
logging.info(r.status_code)
@classmethod
def store_relate_videos(cls, nid):
try:
r = requests.post(cls.RELATE_URL, data={"id": nid}, timeout=(5, 10))
except Exception as e:
logging.warning(e.message)
else:
logging.info(len(r.json()))
def store(self, video):
"""存储,存储调用函数"""
if not (video.title and video.c_src and video.author
and video.avatar and video.c_thumbnail):
logging.warn("video data miss fields title: %s,c_src: %s"
% (video.title, video.c_src))
return
try:
self.upload_to_mongodb(video)
self.upload_to_pg(video)
except Exception as e:
logging.error(e.message)
class VideoMeiPai(VideoBase):
"""美拍视频抓取解析类"""
PB_SITE = u"美拍"
r_json = True
@classmethod
def parse(cls, document, task):
data = [item["media"] for item in document if item["type"] == "media"]
videos = list()
for item in data:
video = cls.build_video(task)
video.title = item['caption']
video.insert = format_time()
video.pb_time = format_time(item['created_at'])
video.pb_url = item['url']
video.author = item["user"]["screen_name"]
video.c_src = item["video"]
video.c_thumbnail = item['cover_pic']
video.c_duration = item['time']
video.avatar = item["user"]["avatar"]
videos.append(video)
return videos
class VideoKuaiShou(VideoBase):
"""快手视频抓取解析类"""
PB_SITE = u"快手"
r_json = True
@classmethod
def parse(cls, document, task):
data = document.get("feeds", [])
videos = list()
for item in data:
video_urls = item.get("main_mv_urls")
thumbs = item.get("cover_thumbnail_urls")
avatars = item.get("headurls")
if not all([video_urls, thumbs, avatars]):
continue
video = cls.build_video(task)
video.title = item['caption']
video.insert = format_time()
video.pb_time = format_time(item['timestamp'])
video.author = item["user_name"]
video.c_src = video_urls[0]["url"]
video.pb_url = video.c_src
video.c_thumbnail = thumbs[0]["url"]
duration = int(item["ext_params"].get("video", 0) / 1000.0)
video.c_duration = duration
video.avatar = avatars[0]["url"]
videos.append(video)
return videos
class VideoZAKER(VideoBase):
"""ZAKER视频抓取解析类"""
PB_SITE = "ZAKER"
r_json = True
@classmethod
def parse(cls, document, task):
data = document["data"].get("articles", [])
videos = list()
for item in data:
detail_url = item["full_url"]
detail = cls.download(detail_url, c_json=True).get("data")
if not detail:
continue
video_url = detail["video_info"]["url"]
if video_url.endswith("m3u8"):
video_url = video_url.replace("m3u8", "mp4")
label = detail['video_info']["video_label"].split(":")[::-1]
duration = 0
for num, i in enumerate(label):
duration += pow(60, num) * int(i)
video = cls.build_video(task)
video.title = item['title']
video.pb_time = format_time()
video.insert = format_time()
video.author = item["auther_name"]
video.c_src = video_url
video.pb_url = item["weburl"]
video.c_thumbnail = detail["video_info"]["pic_url"]
video.c_duration = duration
video.avatar = detail["article_group"]["logo"]["url"]
videos.append(video)
return videos
class VideoWeibo(VideoBase):
"""微博视频抓取解析类(仅能抓取其中得秒拍视频)"""
PB_SITE = u"微博"
r_json = False
cookie_dict = {
"SUB": "_2AkMvnF44dcPhrAJWm_EXzGzqaIhH-jycSTfOAn7uJhMyAxh77nc-qSWPCC49JGeSHgISGwk67XxQvGhEsQ.."}
video_url_re = re.compile(r'video_src=(.*?)&playerType')
config = {
"detail_url": {"attribute": "href", "method": "select"},
"title": {"params": {"selector": "div.txt_cut"}, "method": "select"},
"user_name": {"params": {"selector": "div.item_a"}, "method": "select"},
"user_avatar": {"attribute": "src", "params": {"selector": "img.face_pho"}, "method": "select"},
"thumbnail": {"attribute": "src", "params": {"selector": "img.piccut"}, "method": "select"},
"list": {"params": {"selector": "div.weibo_tv_frame > ul.li_list_1 > a"}, "method": "select"}
}
@classmethod
def download(cls, url, c_json=False, skip=None, headers=None):
session = requests.Session()
session.cookies = requests.utils.cookiejar_from_dict(cls.cookie_dict,
cookiejar=None,
overwrite=True)
response = session.get(url, headers=headers, timeout=(10, cls.timeout))
content = response.content
_, content = html_to_unicode(
content_type_header=response.headers.get("content-type"),
html_body_str=content
)
return content.encode("utf-8")
@staticmethod
def find_extract_tag_attribute(tag, params):
if params.get("params"):
tag = find_tag(tag, params)
attribute = params.get("attribute", "text")
return extract_tag_attribute(tag, attribute)
@classmethod
def parse(cls, document, task):
soup = BeautifulSoup(document, "lxml", from_encoding="utf-8")
tags = soup.select(selector="div.weibo_tv_frame > ul.li_list_1 > a")
videos = list()
for tag in tags:
video = cls.build_video(task)
video.title = cls.find_extract_tag_attribute(tag, cls.config["title"])
video.pb_time = format_time()
detail_url = urljoin("http://weibo.com", cls.find_extract_tag_attribute(tag, cls.config["detail_url"]))
content = cls.download(url=detail_url)
video_url = unquote_plus(cls.video_url_re.findall(content)[0])
if "miaopai" not in video_url:
continue
video_url = video_url[:video_url.index('?')]
video.c_src = video_url
video.pb_url = detail_url
video.author = cls.find_extract_tag_attribute(tag, cls.config["user_name"])
video.c_thumbnail = cls.find_extract_tag_attribute(tag, cls.config["thumbnail"])
video.c_duration = 0
video.avatar = cls.find_extract_tag_attribute(tag, cls.config["user_avatar"])
videos.append(video)
return videos
class VideoThePaper(VideoBase):
"""澎湃新闻视频抓取解析类"""
PB_SITE = "ThePaper"
r_json = False
video_url_re = re.compile(r'source src="(.*?)" type="video/mp4"')
config = {
"detail_url": {"attribute": "href", "params": {"selector": "a"}, "method": "select"},
"title": {"params": {"selector": "div.video_title"}, "method": "select"},
"user_name": {"params": {"selector": "div.t_source > a"}, "method": "select"},
"user_avatar": {"attribute": "src", "params": {"selector": "div.video_txt_r_icon img"}, "method": "select"},
"thumbnail": {"attribute": "src", "params": {"selector": "div.video_list_pic > img"}, "method": "select"},
"list": {"params": {"selector": "div.video_list > li.video_news"}, "method": "select"}
}
@staticmethod
def find_extract_tag_attribute(tag, params):
if params.get("params"):
tag = find_tag(tag, params)
attribute = params.get("attribute", "text")
return extract_tag_attribute(tag, attribute)
@classmethod
def parse(cls, document, task):
soup = BeautifulSoup(document, "lxml", from_encoding="utf-8")
tags = soup.select(selector=".video_news")
videos = list()
for tag in tags:
video = cls.build_video(task)
video.title = cls.find_extract_tag_attribute(tag, cls.config["title"])
video.pb_time = format_time()
detail_url = urljoin("http://www.thepaper.cn/",
cls.find_extract_tag_attribute(tag, cls.config["detail_url"]))
content = cls.download(url=detail_url)
try:
video_url = unquote_plus(cls.video_url_re.findall(content)[0])
except IndexError as e:
logging.warning("Can not get the url of the video")
continue
except Exception as e:
logging.warning(e)
continue
video.c_src = video_url
video.pb_url = detail_url
video.author = cls.find_extract_tag_attribute(tag, cls.config["user_name"])
video.author = video.author.replace(u"@所有人", u"澎湃视频")
video.c_thumbnail = cls.find_extract_tag_attribute(tag, cls.config["thumbnail"])
video.c_duration = 0
detail_soup = BeautifulSoup(document, "lxml", from_encoding="utf-8")
video.avatar = cls.find_extract_tag_attribute(detail_soup, cls.config["user_avatar"])
videos.append(video)
return videos
class VideoAutoHome(VideoBase):
"""汽车之家视频抓取解析类"""
PB_SITE = u"汽车之家"
site_icon = "https://oss-cn-hangzhou.aliyuncs.com/bdp-images/cf68e2b0b6d611e6ad6a00163e001e55.jpg"
r_json = False
vid_re = re.compile(r'vid=(.*?)&|vid: \"(.*?)\"') #
video_info_url = "http://p-vp.autohome.com.cn/api/gmi?mid={mid}&useragent=Android"
config = {
"detail_url": {"attribute": "href", "params": {"selector": "div.video-item-tit > a"}, "method": "select"},
"title": {"params": {"selector": "div.video-item-tit > a"}, "method": "select"},
"pb_time": {"params": {"selector": "div:nth-of-type(3) > span:nth-of-type(3)"}, "method": "select"},
}
@staticmethod
def find_extract_tag_attribute(tag, params):
if params.get("params"):
tag = find_tag(tag, params)
attribute = params.get("attribute", "text")
return extract_tag_attribute(tag, attribute)
@classmethod
def parse(cls, document, task):
soup = BeautifulSoup(document, "lxml", from_encoding="utf-8")
tags = soup.select(selector="div.video-item")
videos = list()
for tag in tags:
video = cls.build_video(task)
video.title = cls.find_extract_tag_attribute(tag, cls.config["title"])
pb_time = cls.find_extract_tag_attribute(tag, cls.config["pb_time"])
video.pb_time = format_time(pb_time)
if "youchuang" in task.url:
detail_url = urljoin("http://youchuang.autohome.com.cn/",
cls.find_extract_tag_attribute(tag, cls.config["detail_url"]))
else:
detail_url = urljoin("http://v.autohome.com.cn/",
cls.find_extract_tag_attribute(tag, cls.config["detail_url"]))
content = cls.download(url=detail_url)
try:
vid = cls.vid_re.findall(content)[0]
vid = filter(lambda x: x, vid)[0]
except IndexError as e:
logging.warning("Can not get the vid of the video")
continue
video_info_url = cls.video_info_url.format(mid=vid)
video_info = cls.download(video_info_url, c_json=True, skip=(5, -1))
video.c_src = video_info["copies"][0]["playurl"]
video.pb_url = detail_url
video.author = cls.PB_SITE
video.c_thumbnail = video_info["img"]
video.c_duration = int(video_info["duration"])
video.avatar = cls.site_icon
videos.append(video)
return videos
# ("meipai", "热门", "https://newapi.meipai.com/hot/feed_timeline.json?page=1&language=zh-Hans&client_id=1089857302&device_id=862535037295724&version=5920"),
# ("zaker", "视频", "http://iphone.myzaker.com/zaker/video_tab.php"),
# ("kuaishou","视频","http://api.gifshow.com/rest/n/feed/list?mod=Xiaomi%28MI%20MAX%29&lon=116.376867&country_code=CN&did=ANDROID_27dafccd6e32bfb2&app=0&net=WIFI&oc=UNKNOWN&ud=0&c=XIAOMI&sys=ANDROID_6.0.1&appver=4.53.6.3294&language=zh-cn&lat=39.905152&ver=4.53&id=4&token=&pv=false&client_key=3c2cd3f3&count=20&page=1&type=7&os=android&sig=1c4e1dd2e802c2c8bcc41269af64c91a&"),
Channel = namedtuple("Channel",
[
"desc",
"site_id",
"channel_id",
"url",
"handler",
"chid",
"second_chid",
"source_id",
"cname"
])
TASKS = [
Channel("美拍-搞笑", ObjectId("58be81943deaeb61dd2e28a6"), ObjectId("58be831eccb13641f8bbc7fc"),
"https://newapi.meipai.com//channels/feed_timeline.json?id=13&type=1&feature=new&page=1&language=zh-Hans&client_id=1089857302&device_id=862535037295724&version=5920",
VideoMeiPai, 44, 4402, 5256, u"搞笑"),
Channel("美拍-宝宝", ObjectId("58be81943deaeb61dd2e28a6"), ObjectId("58be831eccb13641f8bbc7fd"),
"https://newapi.meipai.com//channels/feed_timeline.json?id=18&type=1&feature=new&page=1&language=zh-Hans&client_id=1089857302&device_id=862535037295724&version=5920",
VideoMeiPai, 44, 4403, 5257, u"萌宠萌娃"),
Channel("美拍-宠物", ObjectId("58be81943deaeb61dd2e28a6"), ObjectId("58be831fccb13641f8bbc7fe"),
"https://newapi.meipai.com//channels/feed_timeline.json?id=6&type=1&feature=new&page=1&language=zh-Hans&client_id=1089857302&device_id=862535037295724&version=5920",
VideoMeiPai, 44, 4403, 5258, u"萌宠萌娃"),
Channel("微博-搞笑", ObjectId("583bc5155d272cd5c47a7668"), ObjectId("58be8879ccb1364284fda8f1"),
"http://weibo.com/tv/vfun",
VideoWeibo, 44, 4402, 5259, u"搞笑"),
Channel("微博-萌宠萌娃", ObjectId("583bc5155d272cd5c47a7668"), ObjectId("58be8879ccb1364284fda8f2"),
"http://weibo.com/tv/moe",
VideoWeibo, 44, 4403, 5260, u"萌宠萌娃"),
Channel("微博-影视", ObjectId("583bc5155d272cd5c47a7668"), ObjectId("58c8ed29ccb1367615a9efdd"),
"http://weibo.com/tv/movie",
VideoWeibo, 44, 4404, 5261, u"娱乐"),
Channel("微博-音乐", ObjectId("583bc5155d272cd5c47a7668"), ObjectId("58c8ed2accb1367615a9efde"),
"http://weibo.com/tv/music",
VideoWeibo, 44, 4404, 5262, u"娱乐"),
Channel("微博-爱生活", ObjectId("583bc5155d272cd5c47a7668"), ObjectId("58c8ed2cccb1367615a9efe0"),
"http://weibo.com/tv/lifestyle",
VideoWeibo, 44, 4405, 5264, u"生活"),
Channel("微博-体育健康", ObjectId("583bc5155d272cd5c47a7668"), ObjectId("58c8ed2dccb1367615a9efe1"),
"http://weibo.com/tv/sports",
VideoWeibo, 44, 4406, 5265, u"体育"),
Channel("微博-明星综艺", ObjectId("583bc5155d272cd5c47a7668"), ObjectId("58c8ed2bccb1367615a9efdf"),
"http://weibo.com/tv/show",
VideoWeibo, 44, 4404, 5263, u"娱乐"),
Channel("澎湃-视频", ObjectId("57a43ec2da083a1c19957a64"), ObjectId("59141c483deaeb61dd2e54b6"),
"http://www.thepaper.cn/channel_26916",
VideoThePaper, 44, 4, 5268, u"新闻"),
Channel("汽车之家-原创视频", ObjectId("59141d373deaeb61dd2e54b7"), ObjectId("59141eab3deaeb61dd2e54b8"),
"http://v.autohome.com.cn/Original#pvareaid=2029180",
VideoAutoHome, 44, 4412, 5269, u"汽车"),
Channel("汽车之家-视频广场", ObjectId("59141d373deaeb61dd2e54b7"), ObjectId("59141ffe3deaeb61dd2e54ba"),
"http://v.autohome.com.cn/general/0-1-1#pvareaid=106447",
VideoAutoHome, 44, 4412, 5270, u"汽车"),
Channel("汽车之家-优创+精选", ObjectId("59141d373deaeb61dd2e54b7"), ObjectId("591420a23deaeb61dd2e54bb"),
"http://youchuang.autohome.com.cn/Subject/VRecommend/Index",
VideoAutoHome, 44, 4412, 5271, u"汽车")
]
def main():
random.shuffle(TASKS)
vs = StoreVideo()
for task in TASKS:
logging.info("start crawl: %s" % task.desc)
try:
videos = task.handler.run(task)
except Exception as e:
logging.error(e.message, exc_info=True)
else:
for video in videos:
vs.store(video)
logging.info("end crawl: %s" % task.desc)
client.close()
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO,
format="%(asctime)s %(levelname)s %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
filename="video.log",
filemode="a+")
main()
client.close()
|
China-00-com/china-00-com-movie
|
spiders/video_spider.py
|
Python
|
gpl-3.0
| 26,182
|
[
"MOE"
] |
f32fe0d215d00f4f23c77bbf2ed42e9dce568aa0e84f4527e3d9fda09f5edd9d
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Various classes representing distributed inputs."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import sys
import six
from tensorflow.python import tf2
from tensorflow.python.data.experimental.ops import batching
from tensorflow.python.data.experimental.ops import cardinality
from tensorflow.python.data.experimental.ops import distribute
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.data.ops import iterator_ops
from tensorflow.python.data.ops import multi_device_iterator_ops
from tensorflow.python.data.ops import optional_ops
from tensorflow.python.distribute import device_util
from tensorflow.python.distribute import distribute_lib
from tensorflow.python.distribute import distribute_utils
from tensorflow.python.distribute import distribution_strategy_context
from tensorflow.python.distribute import input_ops
from tensorflow.python.distribute import reduce_util
from tensorflow.python.distribute import values
from tensorflow.python.distribute.distribute_lib import InputReplicationMode
from tensorflow.python.eager import context
from tensorflow.python.framework import composite_tensor
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import device as tf_device
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.framework import type_spec
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops.ragged import ragged_tensor
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.types import distribute as distribute_types
from tensorflow.python.util import nest
from tensorflow.python.util.compat import collections_abc
from tensorflow.python.util.deprecation import deprecated
from tensorflow.python.util.tf_export import tf_export
from tensorflow.tools.docs import doc_controls
def get_distributed_dataset(dataset,
input_workers,
strategy,
num_replicas_in_sync=None,
input_context=None,
options=None,
build=True):
"""Returns a distributed dataset from the given tf.data.Dataset instance.
This is a common function that is used by all strategies to return a
distributed dataset. The distributed dataset instance returned is different
depending on if we are in a TF 1 or TF 2 context. The distributed dataset
instances returned differ from each other in the APIs supported by each of
them.
Args:
dataset: a tf.data.Dataset instance.
input_workers: an InputWorkers object which specifies devices on which
iterators should be created.
strategy: a `tf.distribute.Strategy` object, used to run all-reduce to
handle last partial batch.
num_replicas_in_sync: Optional integer. If this is not None, the value is
used to decide how to rebatch datasets into smaller batches so that
the total batch size for each step (across all workers and replicas)
adds up to `dataset`'s batch size.
input_context: `InputContext` for sharding. Only pass this in for between
graph multi-worker cases where there is only one `input_worker`. In
these cases, we will shard based on the `input_pipeline_id` and
`num_input_pipelines` in the `InputContext`.
options: Default is None. `tf.distribute.InputOptions` used to control
options on how this dataset is distributed.
build: whether to build underlying datasets when a DistributedDataset is
created. This is only useful for `ParameterServerStrategy` now.
Returns:
A distributed dataset instance.
"""
if tf2.enabled():
return DistributedDataset(
input_workers,
strategy,
dataset,
num_replicas_in_sync=num_replicas_in_sync,
input_context=input_context,
build=build,
options=options)
else:
return DistributedDatasetV1(
dataset,
input_workers,
strategy,
num_replicas_in_sync=num_replicas_in_sync,
input_context=input_context,
options=options)
def get_distributed_datasets_from_function(dataset_fn,
input_workers,
input_contexts,
strategy,
options=None,
build=True):
"""Returns a distributed dataset from the given input function.
This is a common function that is used by all strategies to return a
distributed dataset. The distributed dataset instance returned is different
depending on if we are in a TF 1 or TF 2 context. The distributed dataset
instances returned differ from each other in the APIs supported by each of
them.
Args:
dataset_fn: a function that returns a tf.data.Dataset instance.
input_workers: an InputWorkers object which specifies devices on which
iterators should be created.
input_contexts: A list of `InputContext` instances to be passed to call(s)
to `dataset_fn`. Length and order should match worker order in
`worker_device_pairs`.
strategy: a `tf.distribute.Strategy` object, used to run all-reduce to
handle last partial batch.
options: Default is None. `tf.distribute.InputOptions` used to control
options on how this dataset is distributed.
build: whether to build underlying datasets when a
`DistributedDatasetFromFunction` is created. This is only useful for
`ParameterServerStrategy` now.
Returns:
A distributed dataset instance.
Raises:
ValueError: if `options.experimental_replication_mode` and
`options.experimental_place_dataset_on_device` are not consistent
"""
if (options is not None and
options.experimental_replication_mode != InputReplicationMode.PER_REPLICA
and options.experimental_place_dataset_on_device):
raise ValueError(
"When `experimental_place_dataset_on_device` is set for dataset "
"placement, you must also specify `PER_REPLICA` for the "
"replication mode")
if (options is not None and
options.experimental_replication_mode == InputReplicationMode.PER_REPLICA
and options.experimental_fetch_to_device and
options.experimental_place_dataset_on_device):
raise ValueError(
"`experimental_place_dataset_on_device` can not be set to True "
"when experimental_fetch_to_device is True and "
"replication mode is set to `PER_REPLICA`")
if tf2.enabled():
return DistributedDatasetsFromFunction(
input_workers,
strategy,
input_contexts=input_contexts,
dataset_fn=dataset_fn,
options=options,
build=build,
)
else:
return DistributedDatasetsFromFunctionV1(input_workers, strategy,
input_contexts, dataset_fn,
options)
def get_iterator_spec_from_dataset(strategy, dataset):
"""Returns an iterator spec from dataset function.
This function constructs type spec for iterator obtained from
iter(dataset).
Args:
strategy: a `tf.distribute.Strategy` object, used to run all-reduce to
handle last partial batch.
dataset: A tf.data.Dataset instance. If using a function that returns a
tf.data.Dataset instance, pass dataset_fn.structured_outputs.
Returns:
A type_spec for iterator for dataset instance.
"""
output_element_spec = dataset.element_spec
if isinstance(dataset._type_spec, # pylint: disable=protected-access
(DistributedDatasetSpec,
DistributedDatasetsFromFunctionSpec)):
iterator_type_spec = DistributedIteratorSpec(
strategy.extended._input_workers_with_options( # pylint: disable=protected-access
), output_element_spec,
strategy.extended._container_strategy(), True, # pylint: disable=protected-access
None)
else:
if strategy.extended._num_gpus_per_worker: # pylint: disable=protected-access
logging.warning(
f"{strategy.extended._num_gpus_per_worker} GPUs " # pylint: disable=protected-access
"are allocated per worker. Please use DistributedDataset by "
"calling strategy.experimental_distribute_dataset or strategy."
"distribute_datasets_from_function to make best use of GPU "
"resources"
)
iterator_type_spec = iterator_ops.IteratorSpec(output_element_spec)
return iterator_type_spec
@tf_export("distribute.DistributedIterator", v1=[])
class DistributedIteratorInterface(collections_abc.Iterator,
distribute_types.Iterator):
"""An iterator over `tf.distribute.DistributedDataset`.
`tf.distribute.DistributedIterator` is the primary mechanism for enumerating
elements of a `tf.distribute.DistributedDataset`. It supports the Python
Iterator protocol, which means it can be iterated over using a for-loop or by
fetching individual elements explicitly via `get_next()`.
You can create a `tf.distribute.DistributedIterator` by calling `iter` on
a `tf.distribute.DistributedDataset` or creating a python loop over a
`tf.distribute.DistributedDataset`.
Visit the [tutorial](https://www.tensorflow.org/tutorials/distribute/input)
on distributed input for more examples and caveats.
"""
def get_next(self):
"""Returns the next input from the iterator for all replicas.
Example use:
>>> strategy = tf.distribute.MirroredStrategy(["GPU:0", "GPU:1"])
>>> dataset = tf.data.Dataset.range(100).batch(2)
>>> dist_dataset = strategy.experimental_distribute_dataset(dataset)
>>> dist_dataset_iterator = iter(dist_dataset)
>>> @tf.function
... def one_step(input):
... return input
>>> step_num = 5
>>> for _ in range(step_num):
... strategy.run(one_step, args=(dist_dataset_iterator.get_next(),))
>>> strategy.experimental_local_results(dist_dataset_iterator.get_next())
(<tf.Tensor: shape=(1,), dtype=int64, numpy=array([10])>,
<tf.Tensor: shape=(1,), dtype=int64, numpy=array([11])>)
Returns:
A single `tf.Tensor` or a `tf.distribute.DistributedValues` which contains
the next input for all replicas.
Raises:
`tf.errors.OutOfRangeError`: If the end of the iterator has been reached.
"""
raise NotImplementedError(
"DistributedIterator.get_next() must be implemented in descendants.")
@property
def element_spec(self):
# pylint: disable=line-too-long
"""The type specification of an element of `tf.distribute.DistributedIterator`.
Example usage:
>>> global_batch_size = 16
>>> strategy = tf.distribute.MirroredStrategy(["GPU:0", "GPU:1"])
>>> dataset = tf.data.Dataset.from_tensors(([1.],[2])).repeat(100).batch(global_batch_size)
>>> distributed_iterator = iter(strategy.experimental_distribute_dataset(dataset))
>>> distributed_iterator.element_spec
(PerReplicaSpec(TensorSpec(shape=(None, 1), dtype=tf.float32, name=None),
TensorSpec(shape=(None, 1), dtype=tf.float32, name=None)),
PerReplicaSpec(TensorSpec(shape=(None, 1), dtype=tf.int32, name=None),
TensorSpec(shape=(None, 1), dtype=tf.int32, name=None)))
Returns:
A nested structure of `tf.TypeSpec` objects matching the structure of an
element of this `tf.distribute.DistributedIterator`. This returned value
is typically a `tf.distribute.DistributedValues` object and specifies the
`tf.TensorSpec` of individual components.
"""
raise NotImplementedError(
"DistributedIterator.element_spec() must be implemented in descendants")
def get_next_as_optional(self):
# pylint: disable=line-too-long
"""Returns a `tf.experimental.Optional` that contains the next value for all replicas.
If the `tf.distribute.DistributedIterator` has reached the end of the
sequence, the returned `tf.experimental.Optional` will have no value.
Example usage:
>>> strategy = tf.distribute.MirroredStrategy(["GPU:0", "GPU:1"])
>>> global_batch_size = 2
>>> steps_per_loop = 2
>>> dataset = tf.data.Dataset.range(10).batch(global_batch_size)
>>> distributed_iterator = iter(
... strategy.experimental_distribute_dataset(dataset))
>>> def step_fn(x):
... # train the model with inputs
... return x
>>> @tf.function
... def train_fn(distributed_iterator):
... for _ in tf.range(steps_per_loop):
... optional_data = distributed_iterator.get_next_as_optional()
... if not optional_data.has_value():
... break
... per_replica_results = strategy.run(step_fn, args=(optional_data.get_value(),))
... tf.print(strategy.experimental_local_results(per_replica_results))
>>> train_fn(distributed_iterator)
... # ([0 1], [2 3])
... # ([4], [])
Returns:
An `tf.experimental.Optional` object representing the next value from the
`tf.distribute.DistributedIterator` (if it has one) or no value.
"""
# pylint: enable=line-too-long
raise NotImplementedError(
"get_next_as_optional() not implemented in descendants")
@tf_export("distribute.DistributedDataset", v1=[])
class DistributedDatasetInterface(collections_abc.Iterable,
distribute_types.Iterable):
# pylint: disable=line-too-long
"""Represents a dataset distributed among devices and machines.
A `tf.distribute.DistributedDataset` could be thought of as a "distributed"
dataset. When you use `tf.distribute` API to scale training to multiple
devices or machines, you also need to distribute the input data, which leads
to a `tf.distribute.DistributedDataset` instance, instead of a
`tf.data.Dataset` instance in the non-distributed case. In TF 2.x,
`tf.distribute.DistributedDataset` objects are Python iterables.
Note: `tf.distribute.DistributedDataset` instances are *not* of type
`tf.data.Dataset`. It only supports two usages we will mention below:
iteration and `element_spec`. We don't support any other APIs to transform or
inspect the dataset.
There are two APIs to create a `tf.distribute.DistributedDataset` object:
`tf.distribute.Strategy.experimental_distribute_dataset(dataset)`and
`tf.distribute.Strategy.distribute_datasets_from_function(dataset_fn)`.
*When to use which?* When you have a `tf.data.Dataset` instance, and the
regular batch splitting (i.e. re-batch the input `tf.data.Dataset` instance
with a new batch size that is equal to the global batch size divided by the
number of replicas in sync) and autosharding (i.e. the
`tf.data.experimental.AutoShardPolicy` options) work for you, use the former
API. Otherwise, if you are *not* using a canonical `tf.data.Dataset` instance,
or you would like to customize the batch splitting or sharding, you can wrap
these logic in a `dataset_fn` and use the latter API. Both API handles
prefetch to device for the user. For more details and examples, follow the
links to the APIs.
There are two main usages of a `DistributedDataset` object:
1. Iterate over it to generate the input for a single device or multiple
devices, which is a `tf.distribute.DistributedValues` instance. To do this,
you can:
* use a pythonic for-loop construct:
>>> global_batch_size = 4
>>> strategy = tf.distribute.MirroredStrategy(["GPU:0", "GPU:1"])
>>> dataset = tf.data.Dataset.from_tensors(([1.],[1.])).repeat(4).batch(global_batch_size)
>>> dist_dataset = strategy.experimental_distribute_dataset(dataset)
>>> @tf.function
... def train_step(input):
... features, labels = input
... return labels - 0.3 * features
>>> for x in dist_dataset:
... # train_step trains the model using the dataset elements
... loss = strategy.run(train_step, args=(x,))
... print("Loss is", loss)
Loss is PerReplica:{
0: tf.Tensor(
[[0.7]
[0.7]], shape=(2, 1), dtype=float32),
1: tf.Tensor(
[[0.7]
[0.7]], shape=(2, 1), dtype=float32)
}
Placing the loop inside a `tf.function` will give a performance boost.
However `break` and `return` are currently not supported if the loop is
placed inside a `tf.function`. We also don't support placing the loop
inside a `tf.function` when using
`tf.distribute.experimental.MultiWorkerMirroredStrategy` or
`tf.distribute.experimental.TPUStrategy` with multiple workers.
* use `__iter__` to create an explicit iterator, which is of type
`tf.distribute.DistributedIterator`
>>> global_batch_size = 4
>>> strategy = tf.distribute.MirroredStrategy(["GPU:0", "GPU:1"])
>>> train_dataset = tf.data.Dataset.from_tensors(([1.],[1.])).repeat(50).batch(global_batch_size)
>>> train_dist_dataset = strategy.experimental_distribute_dataset(train_dataset)
>>> @tf.function
... def distributed_train_step(dataset_inputs):
... def train_step(input):
... loss = tf.constant(0.1)
... return loss
... per_replica_losses = strategy.run(train_step, args=(dataset_inputs,))
... return strategy.reduce(tf.distribute.ReduceOp.SUM, per_replica_losses,axis=None)
>>> EPOCHS = 2
>>> STEPS = 3
>>> for epoch in range(EPOCHS):
... total_loss = 0.0
... num_batches = 0
... dist_dataset_iterator = iter(train_dist_dataset)
... for _ in range(STEPS):
... total_loss += distributed_train_step(next(dist_dataset_iterator))
... num_batches += 1
... average_train_loss = total_loss / num_batches
... template = ("Epoch {}, Loss: {:.4f}")
... print (template.format(epoch+1, average_train_loss))
Epoch 1, Loss: 0.2000
Epoch 2, Loss: 0.2000
To achieve a performance improvement, you can also wrap the `strategy.run`
call with a `tf.range` inside a `tf.function`. This runs multiple steps in a
`tf.function`. Autograph will convert it to a `tf.while_loop` on the worker.
However, it is less flexible comparing with running a single step inside
`tf.function`. For example, you cannot run things eagerly or arbitrary
python code within the steps.
2. Inspect the `tf.TypeSpec` of the data generated by `DistributedDataset`.
`tf.distribute.DistributedDataset` generates
`tf.distribute.DistributedValues` as input to the devices. If you pass the
input to a `tf.function` and would like to specify the shape and type of
each Tensor argument to the function, you can pass a `tf.TypeSpec` object to
the `input_signature` argument of the `tf.function`. To get the
`tf.TypeSpec` of the input, you can use the `element_spec` property of the
`tf.distribute.DistributedDataset` or `tf.distribute.DistributedIterator`
object.
For example:
>>> global_batch_size = 4
>>> epochs = 1
>>> steps_per_epoch = 1
>>> mirrored_strategy = tf.distribute.MirroredStrategy(["GPU:0", "GPU:1"])
>>> dataset = tf.data.Dataset.from_tensors(([2.])).repeat(100).batch(global_batch_size)
>>> dist_dataset = mirrored_strategy.experimental_distribute_dataset(dataset)
>>> @tf.function(input_signature=[dist_dataset.element_spec])
... def train_step(per_replica_inputs):
... def step_fn(inputs):
... return tf.square(inputs)
... return mirrored_strategy.run(step_fn, args=(per_replica_inputs,))
>>> for _ in range(epochs):
... iterator = iter(dist_dataset)
... for _ in range(steps_per_epoch):
... output = train_step(next(iterator))
... print(output)
PerReplica:{
0: tf.Tensor(
[[4.]
[4.]], shape=(2, 1), dtype=float32),
1: tf.Tensor(
[[4.]
[4.]], shape=(2, 1), dtype=float32)
}
Visit the [tutorial](https://www.tensorflow.org/tutorials/distribute/input)
on distributed input for more examples and caveats.
"""
def __iter__(self):
"""Creates an iterator for the `tf.distribute.DistributedDataset`.
The returned iterator implements the Python Iterator protocol.
Example usage:
>>> global_batch_size = 4
>>> strategy = tf.distribute.MirroredStrategy(["GPU:0", "GPU:1"])
>>> dataset = tf.data.Dataset.from_tensor_slices([1, 2, 3, 4]).repeat().batch(global_batch_size)
>>> distributed_iterator = iter(strategy.experimental_distribute_dataset(dataset))
>>> print(next(distributed_iterator))
PerReplica:{
0: tf.Tensor([1 2], shape=(2,), dtype=int32),
1: tf.Tensor([3 4], shape=(2,), dtype=int32)
}
Returns:
An `tf.distribute.DistributedIterator` instance for the given
`tf.distribute.DistributedDataset` object to enumerate over the
distributed data.
"""
raise NotImplementedError("Must be implemented in descendants")
@property
def element_spec(self):
"""The type specification of an element of this `tf.distribute.DistributedDataset`.
Example usage:
>>> global_batch_size = 16
>>> strategy = tf.distribute.MirroredStrategy(["GPU:0", "GPU:1"])
>>> dataset = tf.data.Dataset.from_tensors(([1.],[2])).repeat(100).batch(global_batch_size)
>>> dist_dataset = strategy.experimental_distribute_dataset(dataset)
>>> dist_dataset.element_spec
(PerReplicaSpec(TensorSpec(shape=(None, 1), dtype=tf.float32, name=None),
TensorSpec(shape=(None, 1), dtype=tf.float32, name=None)),
PerReplicaSpec(TensorSpec(shape=(None, 1), dtype=tf.int32, name=None),
TensorSpec(shape=(None, 1), dtype=tf.int32, name=None)))
Returns:
A nested structure of `tf.TypeSpec` objects matching the structure of an
element of this `tf.distribute.DistributedDataset`. This returned value is
typically a `tf.distribute.DistributedValues` object and specifies the
`tf.TensorSpec` of individual components.
"""
raise NotImplementedError(
"DistributedDataset.element_spec must be implemented in descendants.")
@doc_controls.do_not_generate_docs
def reduce(self, initial_state, reduce_func):
raise NotImplementedError(
"DistributedDataset.reduce must be implemented in descendants.")
class InputWorkers(object):
"""A 1-to-many mapping from input worker devices to compute devices."""
# TODO(ishark): Remove option canonicalize_devices and make all the callers
# pass canonicalized or raw device strings as relevant from strategy.
def __init__(self, worker_device_pairs, canonicalize_devices=True):
"""Initialize an `InputWorkers` object.
Args:
worker_device_pairs: A sequence of pairs: `(input device, a tuple of
compute devices fed by that input device)`.
canonicalize_devices: Whether to canonicalize devices for workers fully or
partially. If False, it will partially canonicalize devices by removing
job and task.
"""
self._worker_device_pairs = worker_device_pairs
self._input_worker_devices = tuple(d for d, _ in self._worker_device_pairs)
self._canonicalize_devices = canonicalize_devices
if canonicalize_devices:
self._fed_devices = tuple(
tuple(device_util.canonicalize(d)
for d in f)
for _, f in self._worker_device_pairs)
else:
self._fed_devices = tuple(
tuple(device_util.canonicalize_without_job_and_task(d)
for d in f)
for _, f in self._worker_device_pairs)
@property
def num_workers(self):
return len(self._input_worker_devices)
@property
def worker_devices(self):
return self._input_worker_devices
def compute_devices_for_worker(self, worker_index):
return self._fed_devices[worker_index]
def __repr__(self):
devices = self.worker_devices
debug_repr = ",\n".join(" %d %s: %s" %
(i, devices[i], self._fed_devices[i])
for i in range(len(devices)))
return "%s:{\n%s}" % (self.__class__.__name__, debug_repr)
def serialize(self):
return (self._worker_device_pairs, self._canonicalize_devices)
def deserialize(self, serialized):
return InputWorkers(serialized)
def _get_next_as_optional(iterator, strategy, return_per_replica=False):
"""Returns an empty dataset indicator and the next input from the iterator.
Args:
iterator: a DistributedIterator object.
strategy: the `tf.distribute.Strategy` instance.
return_per_replica: a boolean. If True, the returned data will be wrapped
with `PerReplica` structure. Otherwise it is a 2D
num_input_workers*num_replicas_per_worker list.
Returns:
A tuple (a boolean tensor indicating whether the next batch has value
globally, data from all replicas).
"""
replicas = []
worker_has_values = []
worker_devices = []
with distribution_strategy_context.enter_or_assert_strategy(strategy):
if distribution_strategy_context.get_replica_context() is not None:
raise ValueError("next(iterator) should be called from outside of "
"replica_fn. e.g. strategy.run(replica_fn, "
"args=(next(iterator),))")
for i, worker in enumerate(iterator._input_workers.worker_devices): # pylint: disable=protected-access
with ops.device(worker):
worker_has_value, next_element = (
iterator._iterators[i].get_next_as_list()) # pylint: disable=protected-access
# Collective all-reduce requires explicit devices for inputs.
with ops.device("/cpu:0"):
# Converting to integers for all-reduce.
worker_has_value = math_ops.cast(worker_has_value, dtypes.int64)
worker_devices.append(worker_has_value.device)
worker_has_values.append(worker_has_value)
# Make `replicas` a flat list of values across all replicas.
replicas.append(next_element)
if return_per_replica:
flattened_data = []
for per_worker_data in replicas:
flattened_data.extend(per_worker_data)
replicas = _create_per_replica(flattened_data, strategy)
# Run an all-reduce to see whether any worker has values.
# TODO(b/131423105): we should be able to short-cut the all-reduce in some
# cases.
if getattr(strategy.extended, "_support_per_replica_values", True):
# `reduce` expects a `PerReplica`, so we pass it one, even
# though it doesn't actually have a value per replica
worker_has_values = values.PerReplica(worker_has_values)
global_has_value = strategy.reduce(
reduce_util.ReduceOp.SUM, worker_has_values, axis=None)
else:
assert len(worker_has_values) == 1
global_has_value = worker_has_values[0]
global_has_value = array_ops.reshape(
math_ops.cast(global_has_value, dtypes.bool), [])
return global_has_value, replicas
def _is_statically_shaped(element_spec):
"""Test if an iterator output is statically shaped.
For sparse and ragged tensors this only tests the batch dimension.
Args:
element_spec: a nest structure of `tf.TypeSpec`. The element spec of the
dataset of the iterator.
Returns:
True if the shape is static, false otherwise.
"""
for spec in nest.flatten(element_spec):
if isinstance(
spec, (sparse_tensor.SparseTensorSpec, ragged_tensor.RaggedTensorSpec)):
# For sparse or ragged tensor, we should only check the first
# dimension in order to get_next_as_optional. This is because
# when these tensors get batched by dataset only the batch dimension
# is set.
if spec.shape.rank > 0 and spec.shape.as_list()[0] is None:
return False
else:
for component in nest.flatten(spec._component_specs): # pylint: disable=protected-access
if not component.shape.is_fully_defined():
return False
return True
class DistributedIteratorBase(DistributedIteratorInterface):
"""Common implementation for all input iterators."""
# pylint: disable=super-init-not-called
def __init__(self, input_workers, iterators, strategy,
enable_get_next_as_optional):
assert isinstance(input_workers, InputWorkers)
if not input_workers.worker_devices:
raise ValueError("Should have at least one worker for input iterator.")
self._iterators = iterators
self._input_workers = input_workers
self._strategy = strategy
self._enable_get_next_as_optional = enable_get_next_as_optional
def next(self):
return self.__next__()
def __next__(self):
try:
return self.get_next()
except errors.OutOfRangeError:
raise StopIteration
def __iter__(self):
return self
def get_next_as_optional(self):
global_has_value, replicas = _get_next_as_optional(
self, self._strategy, return_per_replica=True)
def return_none():
return optional_ops.Optional.empty(self._element_spec)
return control_flow_ops.cond(
global_has_value, lambda: optional_ops.Optional.from_value(replicas),
return_none)
def get_next(self, name=None):
"""Returns the next input from the iterator for all replicas."""
if not self._enable_get_next_as_optional:
with distribution_strategy_context.enter_or_assert_strategy(
self._strategy):
if distribution_strategy_context.get_replica_context() is not None:
raise ValueError("next(iterator) should be called from outside of "
"replica_fn. e.g. strategy.run(replica_fn, "
"args=(next(iterator),))")
replicas = []
for i, worker in enumerate(self._input_workers.worker_devices):
if name is not None:
d = tf_device.DeviceSpec.from_string(worker)
new_name = "%s_%s_%d" % (name, d.job, d.task)
else:
new_name = None
with ops.device(worker):
# Make `replicas` a flat list of values across all replicas.
replicas.extend(
self._iterators[i].get_next_as_list_static_shapes(new_name))
return _create_per_replica(replicas, self._strategy)
out_of_range_replicas = []
def out_of_range_fn(worker_index, device):
"""This function will throw an OutOfRange error."""
# As this will be only called when there is no data left, so calling
# get_next() will trigger an OutOfRange error.
data = self._iterators[worker_index].get_next(device)
out_of_range_replicas.append(data)
return data
global_has_value, replicas = _get_next_as_optional(
self, self._strategy, return_per_replica=False)
results = []
for i, worker in enumerate(self._input_workers.worker_devices):
with ops.device(worker):
devices = self._input_workers.compute_devices_for_worker(i)
for j, device in enumerate(devices):
with ops.device(device):
# pylint: disable=undefined-loop-variable
# pylint: disable=cell-var-from-loop
# It is fine for the lambda to capture variables from the loop as
# the lambda is executed in the loop as well.
result = control_flow_ops.cond(
global_has_value,
lambda: replicas[i][j],
lambda: out_of_range_fn(i, device),
strict=True,
)
# pylint: enable=cell-var-from-loop
# pylint: enable=undefined-loop-variable
results.append(result)
replicas = results
return _create_per_replica(replicas, self._strategy)
class DistributedIteratorV1(DistributedIteratorBase):
"""Input Iterator for a distributed dataset."""
# We need a private initializer method for re-initializing multidevice
# iterators when used with Keras training loops. If we don't reinitialize the
# iterator we run into memory leak issues (b/123315763).
@property
def _initializer(self):
init_ops = []
for it in self._iterators:
init_ops.extend(it.initialize())
return control_flow_ops.group(init_ops)
@deprecated(None, "Use the iterator's `initializer` property instead.")
def initialize(self):
"""Initialize underlying iterators.
Returns:
A list of any initializer ops that should be run.
"""
return self._initializer
@property
def initializer(self):
"""Returns a list of ops that initialize the iterator."""
return self.initialize()
# TODO(priyag): Remove when we switch to using `MultiDeviceIterator` for TPUs.
@property
def output_classes(self):
return self._iterators[0].output_classes
# TODO(priyag): Remove when we switch to using `MultiDeviceIterator` for TPUs.
@property
def output_shapes(self):
return self._iterators[0].output_shapes
# TODO(priyag): Remove when we switch to using `MultiDeviceIterator` for TPUs.
@property
def output_types(self):
return self._iterators[0].output_types
# TODO(priyag): Remove when we switch to using `MultiDeviceIterator` for TPUs.
def get_iterator(self, worker):
for i, w in enumerate(self._input_workers.worker_devices):
if worker == w:
return self._iterators[i]
return None
@property
def element_spec(self):
"""The type specification of an element of this iterator."""
return self._element_spec
class DistributedDatasetAndIteratorSpec(type_spec.TypeSpec):
"""Common Type specification for `DistributedDataset and DistributedDatasetsFromFunction."""
__slots__ = [
"_input_workers", "_element_spec", "_strategy",
"_enable_get_next_as_optional", "_options",
"_canonicalize_devices"
]
def __init__(self,
input_workers,
element_spec,
strategy,
options,
enable_get_next_as_optional=None):
# We don't want to allow deserialization of this class because we don't
# serialize the strategy object. Currently the only places where
# _deserialize is called is when we save/restore using SavedModels.
if isinstance(input_workers, tuple):
raise NotImplementedError("DistributedIteratorSpec does not have support "
"for deserialization.")
else:
self._input_workers = input_workers
self._element_spec = element_spec
self._strategy = strategy
self._enable_get_next_as_optional = enable_get_next_as_optional
self._options = options
if self._strategy:
self._canonicalize_devices = getattr(self._strategy,
"_canonicalize_devices", True)
else:
self._canonicalize_devices = True
def _serialize(self):
# We cannot serialize the strategy object so we convert it to an id that we
# can use for comparison.
return (self._input_workers.serialize(), self._element_spec,
id(self._strategy), id(self._options))
def _deserialize(self):
raise ValueError(
f"Deserialization is currently unsupported for {type(self)}.")
def sanity_check_type(self, other):
"""Returns the most specific TypeSpec compatible with `self` and `other`.
Args:
other: A `TypeSpec`.
Raises:
ValueError: If there is no TypeSpec that is compatible with both `self`
and `other`.
"""
# pylint: disable=protected-access
if type(self) is not type(other):
raise ValueError("No TypeSpec is compatible with both %s and %s" %
(self, other))
if self._input_workers.serialize() != other._input_workers.serialize():
raise ValueError("_input_workers is not compatible with both %s "
"and %s" % (self, other))
if self._strategy is not other._strategy:
raise ValueError("tf.distribute strategy is not compatible with both %s "
"and %s" % (self, other))
class DistributedIteratorSpec(DistributedDatasetAndIteratorSpec):
"""Type specification for `DistributedIterator`."""
def __init__(self, input_workers, element_spec, strategy,
enable_get_next_as_optional, options):
super(DistributedIteratorSpec,
self).__init__(input_workers, element_spec, strategy, options,
enable_get_next_as_optional)
@property
def value_type(self):
return DistributedIterator
# Overriding this method so that we can merge and reconstruct the spec object
def most_specific_compatible_type(self, other):
"""Returns the most specific TypeSpec compatible with `self` and `other`.
Args:
other: A `TypeSpec`.
Raises:
ValueError: If there is no TypeSpec that is compatible with both `self`
and `other`.
"""
# pylint: disable=protected-access
self.sanity_check_type(other)
element_spec = nest.map_structure(
lambda a, b: a.most_specific_compatible_type(b), self._element_spec,
other._element_spec)
return DistributedIteratorSpec(self._input_workers, element_spec,
self._strategy,
self._enable_get_next_as_optional,
self._options)
@property
def _component_specs(self):
specs = []
worker_device_pairs = self._input_workers._worker_device_pairs # pylint: disable=protected-access
for i, (input_device, compute_devices) in enumerate(worker_device_pairs):
element_spec = nest.map_structure(
functools.partial(_replace_per_replica_spec, i=i), self._element_spec)
specs.append(
_SingleWorkerDatasetIteratorSpec(input_device, compute_devices,
element_spec, self._options,
self._canonicalize_devices))
return specs
def _to_components(self, value):
return value._iterators # pylint: disable=protected-access
def _from_components(self, components):
return DistributedIterator(
input_workers=self._input_workers,
iterators=None,
components=components,
element_spec=self._element_spec,
strategy=self._strategy,
enable_get_next_as_optional=self._enable_get_next_as_optional,
options=self._options)
@staticmethod
def from_value(value):
# pylint: disable=protected-access
return DistributedIteratorSpec(value._input_workers, value._element_spec,
value._strategy,
value._enable_get_next_as_optional,
value._options)
def _with_tensor_ranks_only(self):
element_spec = nest.map_structure(
lambda s: s._with_tensor_ranks_only(), # pylint: disable=protected-access
self._element_spec)
return DistributedIteratorSpec(self._input_workers, element_spec,
self._strategy,
self._enable_get_next_as_optional,
self._options)
class DistributedIterator(DistributedIteratorBase,
composite_tensor.CompositeTensor):
"""Input Iterator for a distributed dataset."""
def __init__(self,
input_workers=None,
iterators=None,
strategy=None,
components=None,
element_spec=None,
enable_get_next_as_optional=False,
options=None):
if input_workers is None:
raise ValueError("`input_workers` should be "
"provided.")
error_message = ("Either `input_workers` or "
"both `components` and `element_spec` need to be "
"provided.")
self._options = options
if iterators is None:
if (components is None or element_spec is None):
raise ValueError(error_message)
self._element_spec = element_spec
self._input_workers = input_workers
self._iterators = components
self._strategy = strategy
self._enable_get_next_as_optional = enable_get_next_as_optional
else:
if (components is not None and element_spec is not None):
raise ValueError(error_message)
super(DistributedIterator,
self).__init__(input_workers, iterators, strategy,
enable_get_next_as_optional)
@property
def element_spec(self):
# When partial batch handling is enabled, always set the batch dimension to
# None, otherwise we just follow element_spec of the underlying dataset
# (whose batch dimension may also be None). This is because with partial
# batching handling we could always produce empty batches.
if (self._enable_get_next_as_optional and
self._strategy.extended._in_multi_worker_mode()): # pylint: disable=protected-access
return nest.map_structure(
_rebatch_as_dynamic, self._element_spec, expand_composites=False)
return self._element_spec
@property
def _type_spec(self):
# Note that we use actual element_spec instead of the rebatched-as-dynamic
# one to create DistributedIteratorSpec, to be consistent with the
# underlying iterators' specs.
return DistributedIteratorSpec(self._input_workers, self._element_spec,
self._strategy,
self._enable_get_next_as_optional,
self._options)
class _IterableInput(DistributedDatasetInterface):
"""Base class for iterable inputs for distribution strategies."""
# pylint: disable=super-init-not-called
def __init__(self, input_workers):
assert isinstance(input_workers, InputWorkers)
self._input_workers = input_workers
def __iter__(self):
raise NotImplementedError("must be implemented in descendants")
def reduce(self, initial_state, reduce_fn):
"""Execute a `reduce_fn` over all the elements of the input."""
iterator = iter(self)
has_data, data = _get_next_as_optional(
iterator, self._strategy, return_per_replica=True)
def cond(has_data, data, state):
del data, state # Unused.
return has_data
def loop_body(has_data, data, state):
"""Executes `reduce_fn` in a loop till the dataset is empty."""
del has_data # Unused.
state = reduce_fn(state, data)
has_data, data = _get_next_as_optional(
iterator, self._strategy, return_per_replica=True)
return has_data, data, state
has_data, data, final_state = control_flow_ops.while_loop(
cond, loop_body, [has_data, data, initial_state], parallel_iterations=1)
return final_state
class DistributedDatasetSpec(DistributedDatasetAndIteratorSpec):
"""Type specification for `DistributedDataset."""
def __init__(self, input_workers, element_spec, strategy,
enable_get_next_as_optional, options):
super(DistributedDatasetSpec,
self).__init__(input_workers, element_spec, strategy, options,
enable_get_next_as_optional)
@property
def value_type(self):
return DistributedDataset
# Overriding this method so that we can merge and reconstruct the spec object
def most_specific_compatible_type(self, other):
"""Returns the most specific TypeSpec compatible with `self` and `other`.
Args:
other: A `TypeSpec`.
Raises:
ValueError: If there is no TypeSpec that is compatible with both `self`
and `other`.
"""
# pylint: disable=protected-access
self.sanity_check_type(other)
element_spec = nest.map_structure(
lambda a, b: a.most_specific_compatible_type(b), self._element_spec,
other._element_spec)
return DistributedDatasetSpec(self._input_workers, element_spec,
self._strategy,
self._enable_get_next_as_optional,
self._options)
@property
def _component_specs(self):
specs = []
worker_device_pairs = self._input_workers._worker_device_pairs # pylint: disable=protected-access
for i, _ in enumerate(worker_device_pairs):
element_spec = nest.map_structure(
functools.partial(_replace_per_replica_spec, i=i), self._element_spec)
specs.append(dataset_ops.DatasetSpec(element_spec))
return specs
def _to_components(self, value):
return value._cloned_datasets # pylint: disable=protected-access
def _from_components(self, components):
return DistributedDataset(
input_workers=self._input_workers,
strategy=self._strategy,
components=components,
element_spec=self._element_spec,
enable_get_next_as_optional=self._enable_get_next_as_optional,
options=self._options)
@staticmethod
def from_value(value):
# pylint: disable=protected-access
return DistributedDatasetSpec(value._input_workers, value._element_spec,
value._strategy,
value._enable_get_next_as_optional,
value._options)
class DistributedDataset(_IterableInput, composite_tensor.CompositeTensor):
"""Distributed dataset that supports prefetching to multiple devices."""
def __init__(self,
input_workers,
strategy,
dataset=None,
num_replicas_in_sync=None,
input_context=None,
components=None,
element_spec=None,
enable_get_next_as_optional=None,
build=True,
options=None):
"""Distribute the dataset on all workers.
If `num_replicas_in_sync` is not None, we split each batch of the dataset
into `num_replicas_in_sync` smaller batches, to be distributed among that
worker's replicas, so that the batch size for a global step (across all
workers and replicas) is as expected.
Args:
input_workers: an `InputWorkers` object.
strategy: a `tf.distribute.Strategy` object, used to run all-reduce to
handle last partial batch.
dataset: `tf.data.Dataset` that will be used as the input source. Either
dataset or components field should be passed when constructing
DistributedDataset. Use this when contructing DistributedDataset from a
new `tf.data.Dataset`. Use components when constructing using
DistributedDatasetSpec.
num_replicas_in_sync: Optional integer. If this is not None, the value
is used to decide how to rebatch datasets into smaller batches so that
the total batch size for each step (across all workers and replicas)
adds up to `dataset`'s batch size.
input_context: `InputContext` for sharding. Only pass this in for between
graph multi-worker cases where there is only one `input_worker`. In
these cases, we will shard based on the `input_pipeline_id` and
`num_input_pipelines` in the `InputContext`.
components: datasets when DistributedDataset is constructed from
DistributedDatasetSpec. Either field dataset or components should be
passed.
element_spec: element spec for DistributedDataset when constructing from
DistributedDatasetSpec. This will be used to set the element_spec for
DistributedDataset and verified against element_spec from components.
enable_get_next_as_optional: this is required when components is passed
instead of dataset.
build: whether to build underlying datasets when this object is created.
This is only useful for `ParameterServerStrategy` now.
options: `tf.distribute.InputOptions` used to control options on how this
dataset is distributed.
"""
super(DistributedDataset, self).__init__(input_workers=input_workers)
if input_workers is None or strategy is None:
raise ValueError("input_workers and strategy are required arguments")
if dataset is not None and components is not None:
raise ValueError("Only one of dataset or components should be present")
if dataset is None and components is None:
raise ValueError("At least one of dataset or components should be passed")
self._input_workers = input_workers
self._strategy = strategy
self._options = options
self._input_context = input_context
self._num_replicas_in_sync = num_replicas_in_sync
if dataset is not None:
self._original_dataset = dataset
self._built = False
if build:
self.build()
else:
if not build:
raise ValueError(
"When constructing DistributedDataset with components, build "
"should not be False. This is an internal error. Please file a "
"bug.")
if enable_get_next_as_optional is None:
raise ValueError(
"When constructing DistributedDataset with components, " +
"enable_get_next_as_optional should also be passed")
self._cloned_datasets = components
self._enable_get_next_as_optional = enable_get_next_as_optional
assert element_spec is not None
if element_spec != _create_distributed_tensor_spec(
self._strategy, self._cloned_datasets[0].element_spec):
raise ValueError("Mismatched element_spec from the passed components")
self._element_spec = element_spec
self._built = True
def build(self, dataset_to_replace=None):
assert not self._built
dataset = dataset_to_replace or self._original_dataset
self._create_cloned_datasets_from_dataset(dataset, self._input_context,
self._input_workers,
self._strategy,
self._num_replicas_in_sync)
self._element_spec = _create_distributed_tensor_spec(
self._strategy, self._cloned_datasets[0].element_spec)
self._built = True
def _create_cloned_datasets_from_dataset(self, dataset, input_context,
input_workers, strategy,
num_replicas_in_sync):
# We clone and shard the dataset on each worker. The current setup tries to
# shard the dataset by files if possible so that each worker sees a
# different subset of files. If that is not possible, will attempt to shard
# the final input such that each worker will run the entire preprocessing
# pipeline and only receive its own shard of the dataset.
# Additionally, we rebatch the dataset on each worker into
# `num_replicas_in_sync` smaller batches to be distributed among that
# worker's replicas, so that the batch size for a global step (across all
# workers and replicas) adds up to the original dataset's batch size.
if num_replicas_in_sync is not None:
num_workers = input_context.num_input_pipelines if input_context else len(
input_workers.worker_devices)
rebatch_fn = self._make_rebatch_fn(dataset, num_workers,
num_replicas_in_sync)
else:
rebatch_fn = None
self._cloned_datasets = []
if input_context:
# Between-graph where we rely on the input_context for sharding
assert input_workers.num_workers == 1
if rebatch_fn is not None:
dataset = rebatch_fn(dataset, input_context.input_pipeline_id)
dataset = input_ops.auto_shard_dataset(dataset,
input_context.num_input_pipelines,
input_context.input_pipeline_id,
num_replicas_in_sync)
self._cloned_datasets.append(dataset)
else:
replicated_ds = distribute.replicate(dataset,
input_workers.worker_devices)
for i, worker in enumerate(input_workers.worker_devices):
with ops.device(worker):
cloned_dataset = replicated_ds[worker]
if rebatch_fn is not None:
cloned_dataset = rebatch_fn(cloned_dataset, i)
cloned_dataset = input_ops.auto_shard_dataset(
cloned_dataset, len(input_workers.worker_devices), i,
num_replicas_in_sync)
self._cloned_datasets.append(cloned_dataset)
self._enable_get_next_as_optional = _enable_get_next_as_optional(
strategy, dataset)
def _make_rebatch_fn(self, dataset, num_workers, num_replicas_in_sync):
"""Returns a callable that rebatches the input dataset.
Args:
dataset: A `tf.data.Dataset` representing the dataset to be distributed.
num_workers: An integer representing the number of workers to distribute
`dataset` among.
num_replicas_in_sync: An integer representing the number of replicas in
sync across all workers.
"""
if num_replicas_in_sync % num_workers:
raise ValueError(
"tf.distribute expects every worker to have the same number of "
"replicas. However, encountered `num_replicas_in_sync` ({}) that "
"cannot be divided by `num_workers` ({})".format(
num_replicas_in_sync, num_workers))
num_replicas_per_worker = num_replicas_in_sync // num_workers
with ops.colocate_with(dataset._variant_tensor): # pylint: disable=protected-access
batch_size = distribute.compute_batch_size(dataset)
def rebatch_fn(dataset, worker_index):
try:
# pylint: disable=protected-access
def apply_rebatch():
batch_sizes = distribute.batch_sizes_for_worker(
batch_size, num_workers, num_replicas_per_worker, worker_index)
return distribute._RebatchDataset(
dataset, batch_sizes).prefetch(num_replicas_per_worker)
def apply_legacy_rebatch():
return distribute._LegacyRebatchDataset(
dataset, num_replicas_in_sync).prefetch(num_replicas_per_worker)
with ops.colocate_with(dataset._variant_tensor):
return control_flow_ops.cond(
math_ops.not_equal(batch_size, -1),
true_fn=apply_rebatch,
false_fn=apply_legacy_rebatch)
except errors.InvalidArgumentError as e:
if "without encountering a batch" in str(e):
six.reraise(
ValueError,
ValueError(
"Call the `batch` method on the input Dataset in order to be "
"able to split your input across {} replicas.\n Please see "
"the tf.distribute.Strategy guide. {}".format(
num_replicas_in_sync, e)),
sys.exc_info()[2])
else:
raise
return rebatch_fn
def __iter__(self):
if not (context.executing_eagerly() or
ops.get_default_graph().building_function):
raise RuntimeError("__iter__() is only supported inside of tf.function "
"or when eager execution is enabled.")
if not self._built:
raise ValueError("To use this dataset, you need to pass this dataset to "
"ClusterCoordinator.create_per_worker_dataset.")
# This is an optional flag that can be used to turn off using
# OwnedMultiDeviceIterators and instead use the legacy MultiDeviceIterators
# as a stop gap solution that will allow us to roll out this change.
enable_legacy_iterators = getattr(self._strategy,
"_enable_legacy_iterators", False)
canonicalize_devices = getattr(self._strategy, "_canonicalize_devices",
True)
worker_iterators = _create_iterators_per_worker(self._cloned_datasets,
self._input_workers,
enable_legacy_iterators,
self._options,
canonicalize_devices)
if enable_legacy_iterators:
iterator = DistributedIteratorV1(
self._input_workers,
worker_iterators,
self._strategy,
enable_get_next_as_optional=self._enable_get_next_as_optional)
else:
iterator = DistributedIterator(
self._input_workers,
worker_iterators,
self._strategy,
enable_get_next_as_optional=self._enable_get_next_as_optional,
options=self._options)
iterator._element_spec = self._element_spec # pylint: disable=protected-access
# When async eager is enabled, sometimes the iterator may not finish
# initialization before passing to a multi device function, add a sync point
# here to make sure all underlying iterators are initialized.
if context.executing_eagerly():
context.async_wait()
return iterator
@property
def element_spec(self):
"""The type specification of an element of this dataset."""
# When partial batch handling is enabled, always set the batch dimension to
# None, otherwise we just follow element_spec of the underlying dataset
# (whose batch dimension may also be None). This is because with partial
# batching handling we could always produce empty batches.
if (self._enable_get_next_as_optional and
self._strategy.extended._in_multi_worker_mode()): # pylint: disable=protected-access
return nest.map_structure(
_rebatch_as_dynamic, self._element_spec, expand_composites=False)
return self._element_spec
@property
def _type_spec(self):
return DistributedDatasetSpec(self._input_workers, self._element_spec,
self._strategy,
self._enable_get_next_as_optional,
self._options)
class DistributedDatasetV1(DistributedDataset):
"""Distributed dataset that supports prefetching to multiple devices."""
def __init__(self,
dataset,
input_workers,
strategy,
num_replicas_in_sync=None,
input_context=None,
options=None):
self._input_workers = input_workers
super(DistributedDatasetV1, self).__init__(
input_workers,
strategy,
dataset,
num_replicas_in_sync=num_replicas_in_sync,
input_context=input_context,
options=options)
def make_one_shot_iterator(self):
"""Get a one time use iterator for DistributedDatasetV1.
Note: This API is deprecated. Please use `for ... in dataset:` to iterate
over the dataset or `iter` to create an iterator.
Returns:
A DistributedIteratorV1 instance.
"""
return self._make_one_shot_iterator()
def _make_one_shot_iterator(self):
"""Get an iterator for DistributedDatasetV1."""
# Graph mode with one shot iterator is disabled because we have to call
# `initialize` on the iterator which is only required if we are using a
# tf.distribute strategy.
if not context.executing_eagerly():
raise ValueError("Cannot create a one shot iterator. Please use "
"`make_initializable_iterator()` instead.")
return self._get_iterator()
def make_initializable_iterator(self):
"""Get an initializable iterator for DistributedDatasetV1.
Note: This API is deprecated. Please use
`tf.compat.v1.data.make_initializable_iterator(dataset)` to create an
initializable iterator.
Returns:
A DistributedIteratorV1 instance.
"""
return self._make_initializable_iterator()
def _make_initializable_iterator(self, shared_name=None): # pylint: disable=unused-argument
"""Get an initializable iterator for DistributedDatasetV1."""
# Eager mode generates already initialized iterators. Hence we cannot create
# an initializable iterator.
if context.executing_eagerly():
raise ValueError("Cannot create initializable iterator in Eager mode. "
"Please use `iter()` instead.")
return self._get_iterator()
def _get_iterator(self):
worker_iterators = _create_iterators_per_worker(self._cloned_datasets,
self._input_workers, True,
self._options)
iterator = DistributedIteratorV1(self._input_workers, worker_iterators,
self._strategy,
self._enable_get_next_as_optional)
iterator._element_spec = self.element_spec # pylint: disable=protected-access
# When async eager is enabled, sometimes the iterator may not finish
# initialization before passing to a multi device function, add a sync point
# here to make sure all underlying iterators are initialized.
if context.executing_eagerly():
context.async_wait()
return iterator
def __iter__(self):
if (ops.executing_eagerly_outside_functions() or
ops.get_default_graph().building_function):
return self._get_iterator()
raise RuntimeError("__iter__() is only supported inside of tf.function "
"or when eager execution is enabled.")
class DistributedDatasetsFromFunctionSpec(DistributedDatasetAndIteratorSpec):
"""Type specification for `DistributedDatasetsFromFunction."""
def __init__(self, input_workers, element_spec, strategy, options):
super(DistributedDatasetsFromFunctionSpec,
self).__init__(input_workers, element_spec, strategy, options)
@property
def value_type(self):
return DistributedDatasetsFromFunction
@property
def _component_specs(self):
specs = []
worker_device_pairs = self._input_workers._worker_device_pairs # pylint: disable=protected-access
for i, _ in enumerate(worker_device_pairs):
element_spec = nest.map_structure(
functools.partial(_replace_per_replica_spec, i=i), self._element_spec)
specs.append(dataset_ops.DatasetSpec(element_spec))
return specs
# Overriding this method so that we can merge and reconstruct the spec object
def most_specific_compatible_type(self, other):
"""Returns the most specific TypeSpec compatible with `self` and `other`.
Args:
other: A `TypeSpec`.
Raises:
ValueError: If there is no TypeSpec that is compatible with both `self`
and `other`.
"""
# pylint: disable=protected-access
self.sanity_check_type(other)
element_spec = nest.map_structure(
lambda a, b: a.most_specific_compatible_type(b), self._element_spec,
other._element_spec) # pylint: disable=protected-access
return DistributedDatasetsFromFunctionSpec(self._input_workers,
element_spec, self._strategy,
self._options)
def _to_components(self, value):
return value._datasets # pylint: disable=protected-access
def _from_components(self, components):
return DistributedDatasetsFromFunction(
input_workers=self._input_workers,
strategy=self._strategy,
components=components,
element_spec=self._element_spec,
options=self._options)
@staticmethod
def from_value(value):
# pylint: disable=protected-access
return DistributedDatasetsFromFunctionSpec(
input_workers=value._input_workers,
element_spec=value._element_spec,
strategy=value._strategy,
options=value._options)
# TODO(priyag): Add other replication modes.
class DistributedDatasetsFromFunction(_IterableInput,
composite_tensor.CompositeTensor):
"""Inputs created from dataset function."""
def __init__(self,
input_workers,
strategy,
input_contexts=None,
dataset_fn=None,
options=None,
components=None,
element_spec=None,
build=True):
"""Makes an iterable from datasets created by the given function.
Args:
input_workers: an `InputWorkers` object.
strategy: a `tf.distribute.Strategy` object, used to run all-reduce to
handle last partial batch.
input_contexts: A list of `InputContext` instances to be passed to call(s)
to `dataset_fn`. Length and order should match worker order in
`worker_device_pairs`.
dataset_fn: A function that returns a `Dataset` given an `InputContext`.
Either dataset_fn or components should be passed to construct
DistributedDatasetsFromFunction. Use this when constructing
DistributedDataset using a function. Use components when constructing
using DistributedDatasetsFromFunctionSpec.
options: `tf.distribute.InputOptions` used to control options on how this
dataset is distributed.
components: datasets when DistributedDatasetsFromFunction is constructed
from DistributedDatasetsFromFunctionSpec. Only one of dataset or
components should be passed.
element_spec: element spec for DistributedDataset when constructing from
DistributedDatasetSpec. This will be used to set the element_spec for
DistributedDatasetsFromFunctionSpec and verified against element_spec
from components.
build: whether to build underlying datasets when this object is created.
This is only useful for `ParameterServerStrategy` now.
"""
super(DistributedDatasetsFromFunction, self).__init__(
input_workers=input_workers)
self._input_workers = input_workers
self._strategy = strategy
self._options = options
if dataset_fn is not None and components is not None:
raise ValueError("Only one of dataset_fn or components should be set")
if dataset_fn is None and components is None:
raise ValueError("At least one of dataset_fn or components should be set")
if dataset_fn is not None:
if input_workers.num_workers != len(input_contexts):
raise ValueError(
"Number of input workers (%d) is not same as number of "
"input_contexts (%d)" %
(input_workers.num_workers, len(input_contexts)))
self._input_contexts = input_contexts
self._dataset_fn = dataset_fn
self._built = False
if build:
self.build()
else:
if element_spec is None:
raise ValueError(
"element_spec should also be passed when passing components")
if not build:
raise ValueError(
"When constructing DistributedDatasetFromFunction with components, "
"build should not be False. This is an internal error. Please file "
"a bug.")
self._element_spec = element_spec
self._datasets = components
self._built = True
self._enable_get_next_as_optional = _enable_get_next_as_optional(
self._strategy, self._datasets[0])
def build(self):
assert not self._built
self._datasets, element_spec = (
_create_datasets_from_function_with_input_context(
self._input_contexts, self._input_workers, self._dataset_fn))
self._element_spec = _create_distributed_tensor_spec(
self._strategy, element_spec)
self._enable_get_next_as_optional = _enable_get_next_as_optional(
self._strategy, self._datasets[0])
self._built = True
def __iter__(self):
if not (ops.executing_eagerly_outside_functions() or
ops.get_default_graph().building_function):
raise RuntimeError("__iter__() is only supported inside of tf.function "
"or when eager execution is enabled.")
if not self._built:
raise ValueError("You need to use this dataset in "
"ClusterCoordinator.create_per_worker_dataset.")
# This is an optional flag that can be used to turn off using
# OwnedMultiDeviceIterators and instead use the legacy MultiDeviceIterators
# as a stop gap solution that will allow us to roll out this change.
enable_legacy_iterators = getattr(self._strategy,
"_enable_legacy_iterators", False)
canonicalize_devices = getattr(self._strategy, "_canonicalize_devices",
True)
iterators = _create_iterators_per_worker(self._datasets,
self._input_workers,
enable_legacy_iterators,
self._options,
canonicalize_devices)
if enable_legacy_iterators:
iterator = DistributedIteratorV1(
self._input_workers,
iterators,
self._strategy,
enable_get_next_as_optional=self._enable_get_next_as_optional)
else:
iterator = DistributedIterator(
input_workers=self._input_workers,
iterators=iterators,
strategy=self._strategy,
enable_get_next_as_optional=self._enable_get_next_as_optional,
options=self._options)
iterator._element_spec = self._element_spec # pylint: disable=protected-access
# When async eager is enabled, sometimes the iterator may not finish
# initialization before passing to a multi device function, add a sync
# point here to make sure all underlying iterators are initialized.
if context.executing_eagerly():
context.async_wait()
return iterator
@property
def element_spec(self):
"""The type specification of an element of this dataset."""
# When partial batch handling is enabled, always set the batch dimension to
# None, otherwise we just follow element_spec of the underlying dataset
# (whose batch dimension may also be None). This is because with partial
# batching handling we could always produce empty batches.
if (self._enable_get_next_as_optional and
self._strategy.extended._in_multi_worker_mode()): # pylint: disable=protected-access
return nest.map_structure(
_rebatch_as_dynamic, self._element_spec, expand_composites=False)
return self._element_spec
@property
def _type_spec(self):
return DistributedDatasetsFromFunctionSpec(self._input_workers,
self._element_spec,
self._strategy, self._options)
class DistributedDatasetsFromFunctionV1(DistributedDatasetsFromFunction):
"""Inputs created from dataset function."""
def _make_initializable_iterator(self, shared_name=None):
"""Get an initializable iterator for DistributedDatasetsFromFunctionV1."""
del shared_name # Unused
# Eager mode generates already initialized iterators. Hence we cannot create
# an initializable iterator.
if context.executing_eagerly():
raise ValueError("Cannot create initializable iterator in Eager mode. "
"Please use `iter()` instead.")
return self._get_iterator()
def _make_one_shot_iterator(self):
"""Get an iterator for iterating over DistributedDatasetsFromFunctionV1."""
# Graph mode with one shot iterator is disabled because we have to call
# `initialize` on the iterator which is only required if we are using a
# tf.distribute strategy.
if not context.executing_eagerly():
raise ValueError("Cannot create a one shot iterator. Please use "
"`make_initializable_iterator()` instead.")
return self._get_iterator()
def _get_iterator(self):
iterators = _create_iterators_per_worker(self._datasets,
self._input_workers, True,
self._options)
iterator = DistributedIteratorV1(self._input_workers, iterators,
self._strategy,
self._enable_get_next_as_optional)
iterator._element_spec = self._element_spec # pylint: disable=protected-access
# When async eager is enabled, sometimes the iterator may not finish
# initialization before passing to a multi device function, add a sync point
# here to make sure all underlying iterators are initialized.
if context.executing_eagerly():
context.async_wait()
return iterator
def __iter__(self):
if (ops.executing_eagerly_outside_functions() or
ops.get_default_graph().building_function):
return self._get_iterator()
raise RuntimeError("__iter__() is only supported inside of tf.function "
"or when eager execution is enabled.")
# TODO(anjalisridhar): This class will be soon removed in favor of newer
# APIs.
class InputFunctionIterator(DistributedIteratorV1):
"""Iterator created from input function."""
def __init__(self, input_fn, input_workers, input_contexts, strategy):
"""Make an iterator for input provided via an input function.
Currently implements PER_WORKER mode, in which the `input_fn` is called
once on each worker.
TODO(priyag): Add other replication modes.
Args:
input_fn: Input function that returns a `tf.data.Dataset` object.
input_workers: an `InputWorkers` object.
input_contexts: A list of `InputContext` instances to be passed to call(s)
to `input_fn`. Length and order should match worker order in
`worker_device_pairs`.
strategy: a `tf.distribute.Strategy` object, used to run all-reduce to
handle last partial batch.
"""
assert isinstance(input_workers, InputWorkers)
if input_workers.num_workers != len(input_contexts):
raise ValueError(
"Number of input workers (%d) is not same as number of "
"input_contexts (%d)" %
(input_workers.num_workers, len(input_contexts)))
iterators = []
for i, ctx in enumerate(input_contexts):
worker = input_workers.worker_devices[i]
with ops.device(worker):
result = input_fn(ctx)
devices = input_workers.compute_devices_for_worker(i)
if isinstance(result, dataset_ops.DatasetV2):
iterator = _SingleWorkerDatasetIterator(result, worker, devices)
elif callable(result):
iterator = _SingleWorkerCallableIterator(result, worker, devices)
else:
raise ValueError(
"input_fn must return a tf.data.Dataset or a callable.")
iterators.append(iterator)
super(InputFunctionIterator, self).__init__(
input_workers, iterators, strategy, enable_get_next_as_optional=False)
self._enable_get_next_as_optional = False
# TODO(anjalisridhar): This class will soon be removed and users should move
# to using DistributedIterator.
class DatasetIterator(DistributedIteratorV1):
"""Iterator created from input dataset."""
def __init__(self,
dataset,
input_workers,
strategy,
num_replicas_in_sync=None,
input_context=None):
"""Make an iterator for the dataset on given devices.
If `num_replicas_in_sync` is not None, we split each batch of the dataset
into `num_replicas_in_sync` smaller batches, to be distributed among that
worker's replicas, so that the batch size for a global step (across all
workers and replicas) is as expected.
Args:
dataset: `tf.data.Dataset` that will be used as the input source.
input_workers: an `InputWorkers` object.
strategy: a `tf.distribute.Strategy` object, used to run all-reduce to
handle last partial batch.
num_replicas_in_sync: Optional integer. If this is not None, the value is
used to decide how to rebatch datasets into smaller batches so that the
total batch size for each step (across all workers and replicas) adds up
to `dataset`'s batch size.
input_context: `InputContext` for sharding. Only pass this in for between
graph multi-worker cases where there is only one `input_worker`. In
these cases, we will shard based on the `input_pipeline_id` and
`num_input_pipelines` in the `InputContext`.
"""
dist_dataset = DistributedDatasetV1(
dataset,
input_workers,
strategy,
num_replicas_in_sync=num_replicas_in_sync,
input_context=input_context)
worker_iterators = _create_iterators_per_worker(
dist_dataset._cloned_datasets, input_workers, True) # pylint: disable=protected-access
super(DatasetIterator,
self).__init__(input_workers, worker_iterators, strategy,
dist_dataset._enable_get_next_as_optional) # pylint: disable=protected-access
self._element_spec = dist_dataset.element_spec
def _dummy_tensor_fn(value_structure):
"""A function to create dummy tensors from `value_structure`."""
def create_dummy_tensor(spec):
"""Create a dummy tensor with possible batch dimensions set to 0."""
if hasattr(spec, "_create_empty_value"):
# Type spec may overwrite default dummy values behavior by declaring the
# `_create_empty_value(self)` method. This method must return a value
# compatible with the type spec with batch dimensions set to 0 or fail if
# such a value does not exist. This allows a composite tensor to customize
# dummy values creation as, in general, its dummy value is not composed
# from dummy components (e.g. `row_splits` tensor of a RaggedTensor is
# never allowed to be empty). See b/183969859 for more discussions.
# TODO(b/186079336): reconsider CompositeTensor support.
return spec._create_empty_value() # pylint: disable=protected-access
if isinstance(spec, ragged_tensor.RaggedTensorSpec):
# Splice out the ragged dimensions.
# pylint: disable=protected-access
feature_shape = spec._shape[:1].concatenate(
spec._shape[(1 + spec._ragged_rank):])
feature_type = spec._dtype
# pylint: enable=protected-access
else:
feature_shape = spec.shape
feature_type = spec.dtype
# Ideally we should set the batch dimension to 0, however as in
# DistributionStrategy we don't know the batch dimension, we try to
# guess it as much as possible. If the feature has unknown dimensions, we
# will set them to 0. If the feature shape is already static, we guess the
# first dimension as batch dimension and set it to 0.
dims = ([dim if dim is not None else 0 for dim in feature_shape.as_list()]
if feature_shape else [])
if dims and (isinstance(spec, ragged_tensor.RaggedTensorSpec) or
feature_shape.is_fully_defined()):
dims[0] = tensor_shape.Dimension(0)
if isinstance(spec, sparse_tensor.SparseTensorSpec):
return sparse_tensor.SparseTensor(
values=array_ops.zeros(0, feature_type),
indices=array_ops.zeros((0, len(dims)), dtypes.int64),
dense_shape=dims)
# Create the dummy tensor.
dummy_tensor = array_ops.zeros(tensor_shape.TensorShape(dims), feature_type)
if isinstance(spec, ragged_tensor.RaggedTensorSpec):
# Reinsert the ragged dimensions with size 0.
# pylint: disable=protected-access
row_splits = array_ops.zeros(1, spec._row_splits_dtype)
dummy_tensor = ragged_tensor.RaggedTensor.from_nested_row_splits(
dummy_tensor, (row_splits,) * spec._ragged_rank, validate=False)
# pylint: enable=protected-access
return dummy_tensor
return nest.map_structure(create_dummy_tensor, value_structure)
def _recover_shape_fn(data, value_structure):
"""Recover the shape of `data` the same as shape of `value_structure`."""
flattened_data = nest.flatten(data)
for i, spec in enumerate(nest.flatten(value_structure)):
for target, source in zip(
nest.flatten(flattened_data[i], expand_composites=True),
nest.flatten(spec, expand_composites=True)):
target.set_shape(source.shape)
# `SparseTensor` shape is not determined by the shape of its component
# tensors. Rather, its shape depends on a tensor's values.
if isinstance(spec, sparse_tensor.SparseTensorSpec) and spec.shape:
dense_shape = spec.shape
with ops.device(flattened_data[i].op.device):
# For partially defined shapes, fill in missing values from tensor.
if not dense_shape.is_fully_defined():
dense_shape = array_ops.stack([
flattened_data[i].dense_shape[j] if dim is None else dim
for j, dim in enumerate(dense_shape.as_list())
])
flattened_data[i] = sparse_tensor.SparseTensor(
indices=flattened_data[i].indices,
values=flattened_data[i].values,
dense_shape=dense_shape)
data = nest.pack_sequence_as(data, flattened_data)
return data
class _SingleWorkerDatasetIteratorBase(object):
"""Iterator for a single `tf.data.Dataset`."""
def __init__(self, dataset, worker, devices, options=None):
"""Create iterator for the `dataset` to fetch data to worker's `devices` .
A `MultiDeviceIterator` or `OwnedMultiDeviceIterator` is used to prefetch
input to the devices on the given worker.
Args:
dataset: A `tf.data.Dataset` instance.
worker: Worker on which ops should be created.
devices: Distribute data from `dataset` to these devices.
options: options.
"""
self._dataset = dataset
self._worker = worker
self._devices = devices
self._element_spec = dataset.element_spec
self._options = options
self._make_iterator()
def _make_iterator(self):
raise NotImplementedError("must be implemented in descendants")
def _format_data_list_with_options(self, data_list):
"""Change the data in to a list type if required.
The OwnedMultiDeviceIterator returns the list data type,
while the PER_REPLICA iterator (when used with prefetch disabled)
returns without the enclosed list. This is to fix the inconsistency.
Args:
data_list: data_list
Returns:
list
"""
if (self._options and self._options.experimental_replication_mode ==
InputReplicationMode.PER_REPLICA and
not self._options.experimental_fetch_to_device):
return [data_list]
else:
return data_list
def get_next(self, device, name=None):
"""Get next element for the given device."""
del name
with ops.device(self._worker):
if _should_use_multi_device_iterator(self._options):
return self._iterator.get_next(device)
else:
return self._iterator.get_next()
def get_next_as_list_static_shapes(self, name=None):
"""Get next element from the underlying iterator.
Runs the iterator get_next() within a device scope. Since this doesn't use
get_next_as_optional(), it is considerably faster than get_next_as_list()
(but can only be used when the shapes are static).
Args:
name: not used.
Returns:
A list consisting of the next data from each device.
"""
del name
with ops.device(self._worker):
return self._format_data_list_with_options(self._iterator.get_next())
def get_next_as_list(self, name=None):
"""Get next element from underlying iterator.
If there is no data left, a list of dummy tensors with possible batch
dimensions set to 0 will be returned. Use of get_next_as_optional() and
extra logic adds overhead compared to get_next_as_list_static_shapes(), but
allows us to handle non-static shapes.
Args:
name: not used.
Returns:
A boolean tensor indicates whether there is any data in next element and
the real data as the next element or a list of dummy tensors if no data
left.
"""
del name
with ops.device(self._worker):
data_list = self._format_data_list_with_options(
self._iterator.get_next_as_optional())
result = []
for i, data in enumerate(data_list):
# Place the condition op in the same device as the data so the data
# doesn't need to be sent back to the worker.
with ops.device(self._devices[i]):
# Data will be fetched in order, so we only need to check if the first
# replica has value to see whether there is data left for this single
# worker.
if i == 0:
worker_has_value = data.has_value()
# pylint: disable=unnecessary-lambda
# pylint: disable=cell-var-from-loop
real_data = control_flow_ops.cond(
data.has_value(),
lambda: data.get_value(),
lambda: _dummy_tensor_fn(data.element_spec),
strict=True,
)
# Some dimensions in `replicas` will become unknown after we
# conditionally return the real tensors or the dummy tensors. Recover
# the shapes from `data.element_spec`. We only need to do this in
# non eager mode because we always know the runtime shape of the
# tensors in eager mode.
if not context.executing_eagerly():
real_data = _recover_shape_fn(real_data, data.element_spec)
result.append(real_data)
# pylint: enable=cell-var-from-loop
# pylint: enable=unnecessary-lambda
return worker_has_value, result
class _SingleWorkerDatasetIteratorSpec(type_spec.TypeSpec):
"""Type specification for `_SingleWorkerOwnedDatasetIterator`."""
__slots__ = [
"_worker", "_devices", "_element_spec", "_options",
"_canonicalize_devices"
]
def __init__(self, worker, devices, element_spec, options,
canonicalize_devices=True):
self._worker = worker
if canonicalize_devices:
self._devices = tuple(device_util.canonicalize(d) for d in devices)
else:
self._devices = tuple(
device_util.canonicalize_without_job_and_task(d) for d in devices)
self._element_spec = element_spec
# `self._options` intentionally made not `None` for proper serialization.
self._options = (options if options is not None else
distribute_lib.InputOptions())
self._canonicalize_devices = canonicalize_devices
@property
def value_type(self):
return _SingleWorkerOwnedDatasetIterator
def _serialize(self):
return (self._worker, self._devices, self._element_spec, self._options,
self._canonicalize_devices)
def _get_multi_device_iterator_spec(self, specs):
device_scope = device_util.canonicalize(self._worker, device_util.current())
host_device = device_util.get_host_for_device(device_scope)
# source_device while creating iterator governs the worker device in
# iterator spec.
worker = host_device
specs.append(
multi_device_iterator_ops.MultiDeviceIteratorSpec(
self._devices, worker, element_spec=self._element_spec))
@property
def _component_specs(self):
specs = []
if _should_use_multi_device_iterator(self._options):
self._get_multi_device_iterator_spec(specs)
else:
specs.append(iterator_ops.IteratorSpec(element_spec=self._element_spec))
return specs
def _to_components(self, value):
return [value._iterator] # pylint: disable=protected-access
def _from_components(self, components):
return _SingleWorkerOwnedDatasetIterator(
dataset=None,
worker=self._worker,
devices=self._devices,
components=components,
element_spec=self._element_spec,
options=self._options,
canonicalize_devices=self._canonicalize_devices)
@staticmethod
def from_value(value):
# pylint: disable=protected-access
return _SingleWorkerDatasetIteratorSpec(value._worker, value._devices,
value._element_spec, value._options,
value._canonicalize_devices)
class _SingleWorkerOwnedDatasetIterator(_SingleWorkerDatasetIteratorBase,
composite_tensor.CompositeTensor):
"""Iterator for a DistributedDataset instance."""
def __init__(self,
dataset=None,
worker=None,
devices=None,
components=None,
element_spec=None,
options=None,
canonicalize_devices=None):
"""Create iterator for the `dataset` to fetch data to worker's `devices` .
`OwnedMultiDeviceIterator` is used to prefetch input to the devices on the
given worker. The lifetime of this iterator is tied to the encompassing
python object. Once we go out of scope of the python object or return from
a tf.function the underlying iterator resource is deleted.
Args:
dataset: A `tf.data.Dataset` instance.
worker: Worker on which ops should be created.
devices: Distribute data from `dataset` to these devices.
components: Tensor components to construct the
_SingleWorkerOwnedDatasetIterator from.
element_spec: A nested structure of `TypeSpec` objects that represents the
type specification of elements of the iterator.
options: `tf.distribute.InputOptions` used to control options on how this
dataset is distributed.
canonicalize_devices: Whether to canonicalize devices for workers fully or
partially. If False, it will partially canonicalize devices by removing
job and task.
"""
if worker is None or devices is None:
raise ValueError("Both `worker` and `devices` should be provided")
error_message = ("Either `dataset` or both `components` and `element_spec` "
"need to be provided.")
self._options = options
self._canonicalize_devices = canonicalize_devices
if dataset is None:
if (components is None or element_spec is None):
raise ValueError(error_message)
self._element_spec = element_spec
self._worker = worker
self._devices = devices
self._iterator = components[0]
else:
if (components is not None or element_spec is not None):
raise ValueError(error_message)
super(_SingleWorkerOwnedDatasetIterator,
self).__init__(dataset, worker, devices, self._options)
def _create_owned_multi_device_iterator(self):
# If the worker devices are already canonicalized, canonicalizing again
# would have no impact.
# For strategies running on remote workers such as PS Strategy, the device
# scope will be derived from current worker, if used under init_scope().
device_scope = device_util.canonicalize(self._worker,
device_util.current())
host_device = device_util.get_host_for_device(device_scope)
with ops.device(device_scope):
if self._options is not None:
self._iterator = multi_device_iterator_ops.OwnedMultiDeviceIterator(
self._dataset,
self._devices,
source_device=host_device,
max_buffer_size=self._options
.experimental_per_replica_buffer_size,
prefetch_buffer_size=self._options
.experimental_per_replica_buffer_size)
else:
self._iterator = multi_device_iterator_ops.OwnedMultiDeviceIterator(
self._dataset, self._devices, source_device=host_device)
def _make_iterator(self):
"""Make appropriate iterator on the dataset."""
if not self._worker:
raise ValueError("Worker device must be specified when creating an "
"owned iterator.")
if _should_use_multi_device_iterator(self._options):
self._create_owned_multi_device_iterator()
else:
with ops.device(self._worker):
self._iterator = iter(self._dataset)
@property
def element_spec(self):
return self._element_spec
@property
def _type_spec(self):
return _SingleWorkerDatasetIteratorSpec(self._worker, self._devices,
self._element_spec, self._options,
self._canonicalize_devices)
@property
def output_classes(self):
"""Returns the class of each component of an element of this iterator.
The expected values are `tf.Tensor` and `tf.SparseTensor`.
Returns:
A nested structure of Python `type` objects corresponding to each
component of an element of this dataset.
"""
return nest.map_structure(
lambda component_spec: component_spec._to_legacy_output_classes(), # pylint: disable=protected-access
self._element_spec)
@property
def output_shapes(self):
"""Returns the shape of each component of an element of this iterator.
Returns:
A nested structure of `tf.TensorShape` objects corresponding to each
component of an element of this dataset.
"""
return nest.map_structure(
lambda component_spec: component_spec._to_legacy_output_shapes(), # pylint: disable=protected-access
self._element_spec)
@property
def output_types(self):
"""Returns the type of each component of an element of this iterator.
Returns:
A nested structure of `tf.DType` objects corresponding to each component
of an element of this dataset.
"""
return nest.map_structure(
lambda component_spec: component_spec._to_legacy_output_types(), # pylint: disable=protected-access
self._element_spec)
class _SingleWorkerDatasetIterator(_SingleWorkerDatasetIteratorBase):
"""Iterator for a single DistributedDatasetV1 instance."""
def _make_iterator(self):
"""Make appropriate iterator on the dataset."""
with ops.device(self._worker):
if self._options is not None:
self._iterator = multi_device_iterator_ops.MultiDeviceIterator(
self._dataset,
self._devices,
max_buffer_size=self._options.experimental_per_replica_buffer_size,
prefetch_buffer_size=self._options
.experimental_per_replica_buffer_size)
else:
self._iterator = multi_device_iterator_ops.MultiDeviceIterator(
self._dataset,
self._devices,
)
def initialize(self):
"""Initialize underlying iterator.
In eager execution, this simply recreates the underlying iterator.
In graph execution, it returns the initializer ops for the underlying
iterator.
Returns:
A list of any initializer ops that should be run.
"""
if ops.executing_eagerly_outside_functions():
self._iterator._eager_reset() # pylint: disable=protected-access
return []
else:
return [self._iterator.initializer]
@property
def output_classes(self):
return dataset_ops.get_legacy_output_classes(self._iterator)
@property
def output_shapes(self):
return dataset_ops.get_legacy_output_shapes(self._iterator)
@property
def output_types(self):
return dataset_ops.get_legacy_output_types(self._iterator)
class _SingleWorkerCallableIterator(object):
"""Iterator for a single tensor-returning callable."""
def __init__(self, fn, worker, devices):
self._fn = fn
self._worker = worker
self._devices = devices
def get_next(self, device, name=None):
"""Get next element for the given device from the callable."""
del device, name
with ops.device(self._worker):
return self._fn()
def get_next_as_list_static_shapes(self, name=None):
"""Get next element from the callable."""
del name
with ops.device(self._worker):
data_list = [self._fn() for _ in self._devices]
return data_list
def get_next_as_list(self, name=None):
"""Get next element from the callable."""
del name
with ops.device(self._worker):
data_list = [self._fn() for _ in self._devices]
return constant_op.constant(True), data_list
def initialize(self):
# TODO(petebu) Should this throw an exception instead?
return []
def _create_iterators_per_worker(worker_datasets,
input_workers,
enable_legacy_iterators,
options=None,
canonicalize_devices=False):
"""Create a multidevice iterator on each of the workers."""
assert isinstance(input_workers, InputWorkers)
assert len(worker_datasets) == len(input_workers.worker_devices)
iterators = []
for i, worker in enumerate(input_workers.worker_devices):
with ops.device(worker):
worker_devices = input_workers.compute_devices_for_worker(i)
if tf2.enabled() and not enable_legacy_iterators:
iterator = _SingleWorkerOwnedDatasetIterator(
dataset=worker_datasets[i],
worker=worker,
devices=worker_devices,
options=options,
canonicalize_devices=canonicalize_devices)
else:
iterator = _SingleWorkerDatasetIterator(worker_datasets[i], worker,
worker_devices, options)
iterators.append(iterator)
return iterators
def _create_datasets_from_function_with_input_context(input_contexts,
input_workers,
dataset_fn):
"""Create device datasets per worker given a dataset function."""
datasets = []
for i, ctx in enumerate(input_contexts):
worker = input_workers.worker_devices[i]
with ops.device(worker):
dataset = dataset_fn(ctx)
datasets.append(dataset)
return datasets, dataset.element_spec
# TODO(sourabhbajaj): Remove this in lieu of distributed datasets
def _get_batched_dataset(d):
"""Get the batched dataset from `d`."""
# pylint: disable=protected-access
if isinstance(d, dataset_ops.DatasetV1Adapter):
d = d._dataset
if isinstance(d, (dataset_ops.BatchDataset, batching._MapAndBatchDataset)):
return d
elif isinstance(d, (dataset_ops.PrefetchDataset,
dataset_ops._OptionsDataset)):
return _get_batched_dataset(d._input_dataset)
raise ValueError(
"Unable to get batched dataset from the input dataset. `batch` "
"`map_and_batch` need to be the last operations on the dataset. "
"The batch operations can be followed by a prefetch.")
def _get_batched_dataset_attributes(d):
"""Get `batch_size`, `drop_remainder` of dataset."""
# pylint: disable=protected-access
assert isinstance(d,
(dataset_ops.BatchDataset, batching._MapAndBatchDataset))
if isinstance(d, dataset_ops.BatchDataset):
batch_size = d._batch_size
drop_remainder = d._drop_remainder
elif isinstance(d, batching._MapAndBatchDataset):
batch_size = d._batch_size_t
drop_remainder = d._drop_remainder_t
# pylint: enable=protected-access
if tensor_util.is_tf_type(batch_size):
batch_size = tensor_util.constant_value(batch_size)
if tensor_util.is_tf_type(drop_remainder):
drop_remainder = tensor_util.constant_value(drop_remainder)
return batch_size, drop_remainder
# TODO(sourabhbajaj): Remove this in lieu of distributed datasets
def _get_dataset_attributes(dataset):
"""Get the underlying attributes from the dataset object."""
# pylint: disable=protected-access
# First, get batch_size and drop_remainder from the dataset. We need
# to walk back the dataset creation process and find the batched version in
# order to get the attributes.
batched_dataset = _get_batched_dataset(dataset)
batch_size, drop_remainder = _get_batched_dataset_attributes(batched_dataset)
# Second, prefetch buffer should be get from the original dataset.
prefetch_buffer = None
if isinstance(dataset, dataset_ops.PrefetchDataset):
prefetch_buffer = dataset._buffer_size
elif (isinstance(dataset, dataset_ops.DatasetV1Adapter)
and isinstance(dataset._dataset, dataset_ops.PrefetchDataset)):
prefetch_buffer = dataset._dataset._buffer_size
return batch_size, drop_remainder, prefetch_buffer
def _should_use_multi_device_iterator(options):
"""Determine whether to use multi_device_iterator_ops."""
if (options is None or
options.experimental_replication_mode == InputReplicationMode.PER_WORKER
or
(options.experimental_replication_mode == InputReplicationMode.PER_REPLICA
and options.experimental_fetch_to_device)):
return True
return False
class MultiStepContext(object):
"""A context object that can be used to capture things when running steps.
This context object is useful when running multiple steps at a time using the
`experimental_run_steps_on_iterator` API. For e.g. it allows the user's step
function to specify which outputs to emit at what frequency. Currently it
supports capturing output from the last step, as well as capturing non tensor
outputs. In the future it will be augmented to support other use cases such
as output each N steps.
"""
def __init__(self):
"""Initialize an output context.
Returns:
A context object.
"""
self._last_step_outputs = {}
self._last_step_outputs_reduce_ops = {}
self._non_tensor_outputs = {}
@property
def last_step_outputs(self):
"""A dictionary consisting of outputs to be captured on last step.
Keys in the dictionary are names of tensors to be captured, as specified
when `set_last_step_output` is called.
Values in the dictionary are the tensors themselves. If
`set_last_step_output` was called with a `reduce_op` for this output,
then the value is the reduced value.
Returns:
A dictionary with last step outputs.
"""
return self._last_step_outputs
def _set_last_step_outputs(self, outputs):
"""Replace the entire dictionary of last step outputs."""
if not isinstance(outputs, dict):
raise ValueError("Need a dictionary to set last_step_outputs.")
self._last_step_outputs = outputs
def set_last_step_output(self, name, output, reduce_op=None):
"""Set `output` with `name` to be outputted from the last step.
Args:
name: String, name to identify the output. Doesn't need to match tensor
name.
output: The tensors that should be outputted with `name`. See below for
actual types supported.
reduce_op: Reduction method to use to reduce outputs from multiple
replicas. Required if `set_last_step_output` is called in a replica
context. Optional in cross_replica_context.
When present, the outputs from all the replicas are reduced using the
current distribution strategy's `reduce` method. Hence, the type of
`output` must be what's supported by the corresponding `reduce` method.
For e.g. if using MirroredStrategy and reduction is set, output
must be a `PerReplica` value.
The reduce method is also recorded in a dictionary
`_last_step_outputs_reduce_ops` for later interpreting of the
outputs as already reduced or not.
"""
if distribution_strategy_context.in_cross_replica_context():
self._last_step_outputs_reduce_ops[name] = reduce_op
if reduce_op is None:
self._last_step_outputs[name] = output
else:
distribution = distribution_strategy_context.get_strategy()
self._last_step_outputs[name] = distribution.reduce(reduce_op, output,
axis=None)
else:
assert reduce_op is not None
def merge_fn(distribution, value):
self._last_step_outputs[name] = distribution.reduce(reduce_op, value,
axis=None)
# Setting this inside the `merge_fn` because all replicas share the same
# context object, so it's more robust to set it only once (even if all
# the replicas are trying to set the same value).
self._last_step_outputs_reduce_ops[name] = reduce_op
distribution_strategy_context.get_replica_context().merge_call(
merge_fn, args=(output,))
@property
def non_tensor_outputs(self):
"""A dictionary consisting of any non tensor outputs to be captured."""
return self._non_tensor_outputs
def set_non_tensor_output(self, name, output):
"""Set `output` with `name` to be captured as a non tensor output."""
if distribution_strategy_context.in_cross_replica_context():
self._non_tensor_outputs[name] = output
else:
def merge_fn(distribution, value):
# NOTE(priyag): For non tensor outputs, we simply return all the values
# in a list as reduction doesn't make sense on non tensors.
self._non_tensor_outputs[name] = (
distribution.experimental_local_results(value))
distribution_strategy_context.get_replica_context().merge_call(
merge_fn, args=(output,))
def _create_distributed_tensor_spec(strategy, tensor_spec):
"""Create a `tf.TypeSpec` for a given strategy and input `tensor_spec`.
Args:
strategy: The given `tf.distribute` strategy.
tensor_spec: `tf.TensorSpec` of a given value. The batch dimension of the
shape should be None if you have partial batches.
Returns:
A `tf.TypeSpec` that matches the values produced by a given strategy. This
can be a `tf.TensorSpec` or a `PerRelicaSpec`.
"""
num_replicas = len(strategy.extended.worker_devices)
# For one device strategy that is not MultiWorkerMirroredStrategy, return the
# tensor_spec as is, since we don't wrap the output with PerReplica in this
# case.
# TODO(b/166464552): remove after we always wrap for all strategies.
if not _always_wrap(strategy):
return tensor_spec
# For other cases we assume the input to tf.function is a per replica type.
def _get_value_per_replica(tensor_spec_per_input):
value_specs = [tensor_spec_per_input for _ in range(num_replicas)]
return values.PerReplicaSpec(*value_specs)
return nest.map_structure(_get_value_per_replica, tensor_spec)
def _replace_per_replica_spec(spec, i):
"""If `spec` is a `PerReplicaSpec`, then return its `i`th value_spec."""
if isinstance(spec, values.PerReplicaSpec):
return spec._value_specs[i] # pylint: disable=protected-access
else:
return spec
def _enable_get_next_as_optional(strategy, dataset):
"""Returns whether to enable using partial batch handling."""
# TODO(b/133073708): we currently need a flag to control the usage because
# there is a performance difference between get_next() and
# get_next_as_optional(). And we only enable get_next_as_optional when the
# output shapes are not static.
#
# TODO(rxsang): We want to always enable the get_next_as_optional behavior
# when user passed input_fn instead of dataset.
if not getattr(
strategy.extended, "enable_partial_batch_handling",
getattr(strategy.extended, "experimental_enable_get_next_as_optional",
False)):
return False
if context.executing_eagerly():
# If the dataset is infinite, we don't need to enable last partial batch
# support. Currently the logic only applies to the case that distributed
# dataset is created in eager mode, as we need to evaluate the dataset
# cardinality.
with ops.device(dataset._variant_tensor.device): # pylint: disable=protected-access
if dataset.cardinality().numpy() == cardinality.INFINITE:
return False
return not _is_statically_shaped(
dataset.element_spec) or strategy.extended._in_multi_worker_mode() # pylint: disable=protected-access
def _create_per_replica(value_list, strategy):
"""Creates a PerReplica.
For strategies other than OneDeviceStrategy, it creates a PerReplica whose
type spec is set to the element spec of the dataset. This helps avoid
retracing for partial batches. Retracing is problematic for multi client when
different client retraces different time, since retracing changes the
collective keys in the tf.function, and causes mismatches among clients.
For single client strategies, this simply calls distribute_utils.regroup().
Args:
value_list: a list of values, one for each replica.
strategy: the `tf.distribute.Strategy`.
Returns:
a structure of PerReplica.
"""
# TODO(b/166464552): always wrap for all one device strategies as well.
always_wrap = _always_wrap(strategy)
per_replicas = distribute_utils.regroup(value_list, always_wrap=always_wrap)
return per_replicas
def _always_wrap(strategy):
"""Returns whether to always wrap the values in a DistributedValues."""
return strategy.extended._in_multi_worker_mode() or len( # pylint: disable=protected-access
strategy.extended.worker_devices) > 1
def _rebatch_as_dynamic(per_replica_spec):
"""Rebatch the spec to have a dynamic batch dimension."""
assert isinstance(per_replica_spec, values.PerReplicaSpec), per_replica_spec
# pylint: disable=protected-access
def _rebatch(spec):
# Rebatch if possible.
try:
return spec._unbatch()._batch(None)
except ValueError:
pass
return spec
return values.PerReplicaSpec(
*nest.map_structure(_rebatch, per_replica_spec._value_specs))
# pylint: enable=protected-access
|
frreiss/tensorflow-fred
|
tensorflow/python/distribute/input_lib.py
|
Python
|
apache-2.0
| 110,264
|
[
"VisIt"
] |
8e75d4f3521b6afa769a03ce167c951c1e4d61ec46590ad908666518e1b0bc3f
|
"""
Deployment file to facilitate releases of pymatgen.
Note that this file is meant to be run from the root directory of the pymatgen
repo.
"""
__author__ = "Shyue Ping Ong"
__email__ = "[email protected]"
__date__ = "Sep 1, 2014"
import glob
import os
import json
import webbrowser
import requests
import re
import subprocess
from fabric.api import local, lcd
from pymatgen import __version__ as ver
def make_doc():
with open("CHANGES.rst") as f:
contents = f.read()
toks = re.split("\-{3,}", contents)
n = len(toks[0].split()[-1])
changes = [toks[0]]
changes.append("\n" + "\n".join(toks[1].strip().split("\n")[0:-1]))
changes = ("-" * n).join(changes)
with open("docs/latest_changes.rst", "w") as f:
f.write(changes)
with lcd("examples"):
local("ipython nbconvert --to html *.ipynb")
local("mv *.html ../docs/_static")
with lcd("docs"):
local("cp ../CHANGES.rst change_log.rst")
local("sphinx-apidoc -d 6 -o . -f ../pymatgen")
local("rm pymatgen.*.tests.rst")
for f in glob.glob("docs/*.rst"):
if f.startswith('docs/pymatgen') and f.endswith('rst'):
newoutput = []
suboutput = []
subpackage = False
with open(f, 'r') as fid:
for line in fid:
clean = line.strip()
if clean == "Subpackages":
subpackage = True
if not subpackage and not clean.endswith("tests"):
newoutput.append(line)
else:
if not clean.endswith("tests"):
suboutput.append(line)
if clean.startswith("pymatgen") and not clean.endswith("tests"):
newoutput.extend(suboutput)
subpackage = False
suboutput = []
with open(f, 'w') as fid:
fid.write("".join(newoutput))
local("make html")
local("cp _static/* _build/html/_static")
#This makes sure pymatgen.org works to redirect to the Gihub page
local("echo \"pymatgen.org\" > _build/html/CNAME")
#Avoid ths use of jekyll so that _dir works as intended.
local("touch _build/html/.nojekyll")
def publish():
local("python setup.py release")
def setver():
local("sed s/version=.*,/version=\\\"{}\\\",/ setup.py > newsetup"
.format(ver))
local("mv newsetup setup.py")
def update_doc():
make_doc()
with lcd("docs/_build/html/"):
local("git add .")
local("git commit -a -m \"Update dev docs\"")
local("git push origin gh-pages")
def merge_stable():
local("git commit -a -m \"v%s release\"" % ver)
local("git push")
local("git checkout stable")
local("git pull")
local("git merge master")
local("git push")
local("git checkout master")
def release_github():
with open("CHANGES.rst") as f:
contents = f.read()
toks = re.split("\-+", contents)
desc = toks[1].strip()
toks = desc.split("\n")
desc = "\n".join(toks[:-1]).strip()
payload = {
"tag_name": "v" + ver,
"target_commitish": "master",
"name": "v" + ver,
"body": desc,
"draft": False,
"prerelease": False
}
response = requests.post(
"https://api.github.com/repos/materialsproject/pymatgen/releases",
data=json.dumps(payload),
headers={"Authorization": "token " + os.environ["GITHUB_RELEASES_TOKEN"]})
print response.text
def update_changelog():
output = subprocess.check_output(["git", "log", "--pretty=format:%s",
"v%s..HEAD" % ver])
lines = ["* " + l for l in output.strip().split("\n")]
with open("CHANGES.rst") as f:
contents = f.read()
toks = contents.split("==========")
toks.insert(-1, "\n\n" + "\n".join(lines))
with open("CHANGES.rst", "w") as f:
f.write("==========".join(toks))
def log_ver():
filepath = os.path.join(os.environ["HOME"], "Dropbox", "Public",
"pymatgen", ver)
with open(filepath, "w") as f:
f.write("Release")
def release(skip_test=False):
setver()
if not skip_test:
local("nosetests")
publish()
log_ver()
update_doc()
merge_stable()
release_github()
def open_doc():
pth = os.path.abspath("docs/_build/html/index.html")
webbrowser.open("file://" + pth)
|
Dioptas/pymatgen
|
fabfile.py
|
Python
|
mit
| 4,612
|
[
"pymatgen"
] |
96f4288050e2f6ec0268d7a34cba278cdecc9ebd0daec1554f6d8f2d8d6c99e3
|
# Orca
#
# Copyright 2005-2009 Sun Microsystems Inc.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc., Franklin Street, Fifth Floor,
# Boston MA 02110-1301 USA.
"""Displays a GUI for the user to quit Orca."""
__id__ = "$Id$"
__version__ = "$Revision$"
__date__ = "$Date$"
__copyright__ = "Copyright (c) 2005-2009 Sun Microsystems Inc."
__license__ = "LGPL"
import gettext
from gi.repository import Gtk
from orca_i18n import _
class GtkBuilderWrapper:
"""
Superclass for GtkBuilder based applications. Just derive from this
and your subclass should create methods whose names correspond to
the signal handlers defined in the GtkBuilder file. Any other attributes
in your class will be safely ignored.
This class will give you the ability to do:
subclass_instance.GtkWindow.method(...)
subclass_instance.widget_name...
"""
def __init__(self, fileName, windowName):
# Load GtkBuilder file.
self.builder = Gtk.Builder()
self.builder.set_translation_domain(gettext.textdomain())
self.builder.add_from_file(fileName)
self.gtkWindow = self.builder.get_object(windowName)
# Force the localization of widgets to work around a GtkBuilder
# bug. See bgo bug 589362.
#
for obj in self.builder.get_objects():
success = self.localize_widget(obj)
# Set default application icon.
self.set_orca_icon()
instance_attributes = {}
for attribute in dir(self.__class__):
instance_attributes[attribute] = getattr(self, attribute)
self.builder.connect_signals(instance_attributes)
def set_orca_icon(self):
"""Get the icon in all sizes from the current theme and set them as
default for all application windows.
"""
icon_theme = Gtk.IconTheme.get_default()
try:
icon16 = icon_theme.load_icon("orca", 16, 0)
icon22 = icon_theme.load_icon("orca", 22, 0)
icon24 = icon_theme.load_icon("orca", 24, 0)
icon32 = icon_theme.load_icon("orca", 32, 0)
icon48 = icon_theme.load_icon("orca", 48, 0)
except:
return
else:
Gtk.Window.set_default_icon_list((icon16,
icon22,
icon24,
icon32,
icon48))
def get_widget(self, attribute):
"""Return the requested widget. This routine has been introduced
(and calls to it made by the Orca GtkBuilder sub-classes), to prevent
"No class attribute" pychecker errors caused when using __getattr__.
Arguments:
- attribute: name of the widget to return.
"""
widget = self.builder.get_object(attribute)
if widget is None:
raise AttributeError("Widget [" + attribute + "] not found")
return widget
def __getattr__(self, attribute): # Called when no attribute in __dict__
widget = self.builder.get_object(attribute)
if widget is None:
raise AttributeError("Widget [" + attribute + "] not found")
self.__dict__[attribute] = widget # Add reference to cache.
return widget
def localize_widget(self, obj):
"""Force the localization of the label/title of GtkBuilder objects
Arguments:
- obj: the GtkBuilder object whose label or title should be localized
"""
# TODO - JD: This is a workaround for a GtkBuilder bug which prevents
# the strings displayed by widgets from being translated. See bgo bug
# 589362.
#
try:
useMarkup = obj.get_use_markup()
useUnderline = obj.get_use_underline()
except:
useMarkup = False
useUnderline = False
if isinstance(obj, Gtk.Frame):
# For some reason, if we localize the frame, which has a label
# but does not (itself) support use_markup, we get unmarked
# labels which are not bold but which do have <b></b>. If we
# skip the frames, the labels get processed as expected. And
# there was much rejoicing. Yea.
#
return
try:
title = obj.get_title()
if title and len(title):
obj.set_title(_(title))
except:
try:
text = obj.get_label()
except:
return False
if text and len(text):
if useMarkup:
obj.set_markup(_(text))
else:
obj.set_label(_(text))
if useUnderline:
obj.set_use_underline(True)
return True
|
Alberto-Beralix/Beralix
|
i386-squashfs-root/usr/share/pyshared/orca/orca_gtkbuilder.py
|
Python
|
gpl-3.0
| 5,479
|
[
"ORCA"
] |
327b462cac8b3ceb74ab913c18ad4daa1ad002a832444e63a527ae409e07b50d
|
#!/usr/bin/env python
#Retreives data from UCSC and stores in a file. UCSC parameters are provided in the input/output file.
import urllib, sys, os, gzip, tempfile, shutil
from galaxy import eggs
#from galaxy.datatypes import data
from galaxy.util import gzip_magic
assert sys.version_info[:2] >= ( 2, 4 )
def stop_err( msg ):
sys.stderr.write( msg )
sys.exit()
def check_gzip( filename ):
temp = open( filename, "U" )
magic_check = temp.read( 2 )
temp.close()
if magic_check != gzip_magic:
return False
return True
def __main__():
filename = sys.argv[1]
params = {}
for line in open( filename, 'r' ):
try:
line = line.strip()
fields = line.split( '\t' )
params[ fields[0] ] = fields[1]
except:
continue
URL = params.get( 'URL', None )
if not URL:
open( filename, 'w' ).write( "" )
stop_err( 'The remote data source application has not sent back a URL parameter in the request.' )
URL_method = params.get( 'URL_method', None )
out = open( filename, 'w' )
CHUNK_SIZE = 2**20 # 1Mb
try:
if not URL_method or URL_method == 'get':
page = urllib.urlopen( URL )
elif URL_method == 'post':
page = urllib.urlopen( URL, urllib.urlencode( params ) )
except:
stop_err( 'It appears that the remote data source application is currently off line. Please try again later.' )
while 1:
chunk = page.read( CHUNK_SIZE )
if not chunk:
break
out.write( chunk )
out.close()
if check_gzip( filename ):
fd, uncompressed = tempfile.mkstemp()
gzipped_file = gzip.GzipFile( filename )
while 1:
try:
chunk = gzipped_file.read( CHUNK_SIZE )
except IOError:
os.close( fd )
os.remove( uncompressed )
gzipped_file.close()
stop_err( 'Problem uncompressing gzipped data, please try retrieving the data uncompressed.' )
if not chunk:
break
os.write( fd, chunk )
os.close( fd )
gzipped_file.close()
# Replace the gzipped file with the uncompressed file
shutil.move( uncompressed, filename )
if __name__ == "__main__": __main__()
|
dbcls/dbcls-galaxy
|
tools/experimental/data_source.py
|
Python
|
mit
| 2,363
|
[
"Galaxy"
] |
498425d6b4d5716cfacd3b704c7484656bc132e7c567d0cce998d3ab154771e8
|
"""
__init__.py
State Estimation and Analysis for PYthon
Module for working with oceanographic data and models
Copyright (c)2010--2021 University of Hawaii under the MIT-License.
Requires the following packages: joblib, rich, cartopy, numpy_groupies
Import classes include:
- :class:`~seapy.environ.opt`
- :class:`~seapy.tidal_energy.energetics`
Imported functions include:
- :func:`~seapy.lib.adddim`
- :func:`~seapy.lib.chunker`
- :func:`~seapy.lib.convolve_mask`
- :func:`~seapy.lib.day2date`
- :func:`~seapy.lib.date2day`
- :func:`~seapy.lib.earth_angle`
- :func:`~seapy.lib.earth_distance`
- :func:`~seapy.lib.flatten`
- :func:`~seapy.lib.list_files`
- :func:`~seapy.lib.netcdf`
- :func:`~seapy.lib.rotate`
- :func:`~seapy.lib.today2day`
- :func:`~seapy.lib.unique_rows`
- :func:`~seapy.lib.vecfind`
- :func:`~seapy.mapping.map`
- :func:`~seapy.mapping.hawaii`
- :func:`~seapy.oa.oasurf`
- :func:`~seapy.oa.oavol`
- :func:`~seapy.tidal_energy.tidal_energy`
"""
from .lib import *
from . import roms
from . import model
from . import qserver
from . import filt
from . import plot
from . import seawater
from . import tide
from .tidal_energy import tidal_energy
from .environ import opt
from .mapping import map, hawaii
from .oa import *
__version__ = "0.7"
|
powellb/seapy
|
seapy/__init__.py
|
Python
|
mit
| 1,330
|
[
"NetCDF"
] |
cc7cca627a6a8aebf162b92967089b2bc3a80090970ccff66a14ad6c8a885a7b
|
# -*- coding: utf-8 -*-
import order
import re
class ParseConfig(object):
"""
This class parse a template file of a configuration file for a server application.
the `self.input` of the instance variable is the template file for the server application.
the `self.abstract` of the instance variable is a intermediate representation for the tuning.
the `self.kvv` of the instance variable is a information of a parameters.
the `self.kvp` of the instance variable is a information of the parameters for the Information-DB.
:param input: the template file for the server application.
"""
def __init__(self, input):
self.input = input + "\n"
self.abstract = None
self.kvv = None
self.kvp = None
def parse(self):
"""
Parse the configuration file.
"""
self.abstract = self.input
self.kvv = {} # Key! Value! Variable! {"hoge": [Value, Range, Visitor]}
self.kvp = {} # Key! Value! Paramater!
# Escape
self.abstract = self.abstract.replace('%', '%%')
# Create KVV
# Find `#{ hoge[min,max] }`
for match in re.finditer(r'#{\s*(.+?)\s*(?:\[\s*([+-]?(?:inf|\d+))?\s*[:,]\s*([+-]?(?:inf|\d+))?\s*\])?\s*}', self.abstract):
key = match.group(1)
value = None
visit = 1
mini = maxi = None
if match.group(2) is not None and 'inf' not in match.group(2):
mini = int(match.group(2))
if match.group(3) is not None and 'inf' not in match.group(3):
maxi = int(match.group(3))
if key in self.kvv:
old = self.kvv[key][1] # range
if mini is None and old['min'] is not None:
mini = old['min']
if mini is not None and old['min'] is not None and old['min'] > mini:
mini = old['min']
if maxi is None and old['max'] is not None:
maxi = old['max']
if maxi is not None and old['max'] is not None and old['max'] < maxi:
maxi = old['max']
visit += self.kvv[key][2] # visitor
range = order.renge(mini, maxi)
self.kvv[key] = [value, range, visit]
self.abstract = self.abstract.replace(match.group(0), "%%(%s)s" % key)
# Create KVP
# Find `Paramater = Value`
NEWLINE = ["\r\n", "\n", ";"]
SEPARATOR = ["\t", " ", ":", "=", "=>"]
ns = NEWLINE + SEPARATOR
item_reg = re.compile(r'[\s]*(.+?)\s*(?::|=>|=| |\t)\s*(.+?)(?:\r\n|\n|;)+')
var_reg = re.compile(r'%\((.+?)\)s')
for match in var_reg.finditer(self.abstract):
match_start = match.start()
idx = -1
while (True):
break_flg = True
for s in NEWLINE:
idx = self.abstract.rfind(s, 0, match_start)
if idx == -1:
continue
if s != ";":
break
idx -= 1
if idx == -1:
continue
if self.abstract[idx] in ns:
match_start = idx
break_flg = False
idx += 2
if break_flg:
break
if idx == -1:
idx = 0
m = item_reg.search(self.abstract, idx)
key = m.group(1)
value = m.group(2).strip()
if key.startswith(";") or key.startswith("#") or key.startswith("//"):
self.kvv[match.group(1)][2] -= 1
continue
self.kvp[key] = value
# Modify KVP
# for `Timeout 10 # #{old}`
comm_reg = re.compile(r'\s+(?:#|;|//).*$')
for k, v in self.kvp.items():
comment = comm_reg.search(v)
if comment is None:
continue
self.kvp[k] = v.replace(comment.group(0), "").strip()
if len(self.kvp[k]) == 0 or var_reg.search(self.kvp[k]) is None:
del self.kvp[k]
for match in var_reg.finditer(v):
self.kvv[match.group(1)][2] -= 1
# Modify KVV
# Bocchi is dead.
for k, v in self.kvv.items():
if v[2] > 0:
continue
del self.kvv[k]
|
KNCT-KPC/RapidHouse
|
rapidhouse/lib/parseconfig.py
|
Python
|
mit
| 3,514
|
[
"VisIt"
] |
850d947d4898f5ee295e24a6f3f3427e978d7d6761565dc467ee0f241f4d89dc
|
from random import *
# sig : doc
rrand_dists = {
# (a, b) : in half-open or close interval, "depending on rounding" (???)
'uniform': uniform,
# (low=0.0, high=1.0, mode=None) : low and hight are bounds. for mode wikipedia /Triangular_distribution
'triangular': triangular,
'normal': [
# (mu, sigma)
normalvariate,
# (mu, sigma)
gauss,
# ??? thought
# normal _was_ gaussian
],
# (mu, sigma) : composition with ln (log_e) yields normal distribution
'lognormal': lognormvariate,
'negative exponential': None,
# 'gamma':,
# 'beta',
# 'pareto',
# 'Weibull',
}
|
ransomw/dotfiles
|
pyutils/cookbook/plot_random.py
|
Python
|
apache-2.0
| 657
|
[
"Gaussian"
] |
062138aea0cc0572026227249ee3768f9bf2b3ce1fa5af5cbc238bfac7ab67bb
|
#
# plots.py -- Utility functions for plotting.
#
# Eric Jeschke ([email protected])
#
# Copyright (c) Eric R. Jeschke. All rights reserved.
# This is open-source software licensed under a BSD license.
# Please see the file LICENSE.txt for details.
#
import numpy
import matplotlib as mpl
from matplotlib.figure import Figure
# fix issue of negative numbers rendering incorrectly with default font
mpl.rcParams['axes.unicode_minus'] = False
from ginga.util import iqcalc
from ginga.misc import Callback
class Plot(Callback.Callbacks):
def __init__(self, figure=None, logger=None, width=500, height=500):
Callback.Callbacks.__init__(self)
if figure is None:
dpi = 100
wd_in, ht_in = float(width)/dpi, float(height)/dpi
figure = Figure(figsize=(wd_in, ht_in), dpi=dpi)
self.fig = figure
if hasattr(self.fig, 'set_tight_layout'):
self.fig.set_tight_layout(True)
self.logger = logger
self.fontsize = 10
self.ax = None
self.logx = False
self.logy = False
self.xdata = []
self.ydata = []
# For callbacks
for name in ('draw-canvas', ):
self.enable_callback(name)
def get_figure(self):
return self.fig
def get_widget(self):
return self.fig.canvas
def add_axis(self, **kwdargs):
self.ax = self.fig.add_subplot(111, **kwdargs)
return self.ax
def get_axis(self):
return self.ax
def set_axis(self, ax):
self.ax = ax
def set_titles(self, xtitle=None, ytitle=None, title=None,
rtitle=None):
if xtitle is not None:
self.ax.set_xlabel(xtitle)
if ytitle is not None:
self.ax.set_ylabel(ytitle)
if title is not None:
self.ax.set_title(title)
if rtitle is not None:
pass
ax = self.ax
for item in ([ax.title, ax.xaxis.label, ax.yaxis.label] +
ax.get_xticklabels() + ax.get_yticklabels()):
item.set_fontsize(self.fontsize)
def clear(self):
self.logger.debug('clearing canvas...')
self.ax.cla()
self.xdata = []
self.ydata = []
def draw(self):
self.fig.canvas.draw()
self.make_callback('draw-canvas')
def plot(self, xarr, yarr, xtitle=None, ytitle=None, title=None,
rtitle=None, **kwdargs):
if self.ax is None:
self.add_axis()
if self.logx:
self.ax.set_xscale('log')
if self.logy:
self.ax.set_yscale('log')
self.xdata = xarr
self.ydata = yarr
self.set_titles(xtitle=xtitle, ytitle=ytitle, title=title,
rtitle=rtitle)
self.ax.grid(True)
self.ax.plot(xarr, yarr, **kwdargs)
for item in self.ax.get_xticklabels() + self.ax.get_yticklabels():
item.set_fontsize(self.fontsize)
# Make x axis labels a little more readable
lbls = self.ax.xaxis.get_ticklabels()
for lbl in lbls:
lbl.set(rotation=45, horizontalalignment='right')
#self.fig.tight_layout()
self.draw()
def get_data(self):
return self.fig, self.xdata, self.ydata
class HistogramPlot(Plot):
def histogram(self, data, numbins=2048,
xtitle=None, ytitle=None, title=None, rtitle=None):
minval = numpy.nanmin(data)
maxval = numpy.nanmax(data)
substval = (minval + maxval)/2.0
data[numpy.isnan(data)] = substval
dist, bins = numpy.histogram(data, bins=numbins, density=False)
# used with 'steps-post' drawstyle, this gives correct histogram-steps
x = bins
y = numpy.append(dist, dist[-1])
self.clear()
self.plot(x, y, alpha=1.0, linewidth=1.0, linestyle='-',
xtitle=xtitle, ytitle=ytitle, title=title, rtitle=rtitle,
drawstyle='steps-post')
class CutsPlot(Plot):
def cuts(self, data,
xtitle=None, ytitle=None, title=None, rtitle=None,
color=None):
"""data: pixel values along a line.
"""
y = data
x = numpy.arange(len(data))
self.plot(x, y, color=color, drawstyle='steps-mid',
xtitle=xtitle, ytitle=ytitle, title=title, rtitle=rtitle,
alpha=1.0, linewidth=1.0, linestyle='-')
class ContourPlot(Plot):
def __init__(self, *args, **kwargs):
super(ContourPlot, self).__init__(*args, **kwargs)
self.num_contours = 8
self.plot_panx = 0
self.plot_pany = 0
self.plot_zoomlevel = 1.0
def connect_zoom_callbacks(self):
canvas = self.fig.canvas
connect = canvas.mpl_connect
# These are not ready for prime time...
# connect("motion_notify_event", self.plot_motion_notify)
# connect("button_press_event", self.plot_button_press)
connect("scroll_event", self.plot_scroll)
def plot_contours(self, x, y, data, num_contours=None):
# Make a contour plot
if num_contours is None:
num_contours = self.num_contours
if self.ax is None:
self.add_axis()
ht, wd = data.shape
self.ax.set_aspect('equal', adjustable='box')
self.set_titles(title='Contours')
#self.fig.tight_layout()
#self.ax.grid(True)
# Set pan position in contour plot
self.plot_panx = float(x) / wd
self.plot_pany = float(y) / ht
self.ax.cla()
try:
# Create a contour plot
self.xdata = numpy.arange(wd)
self.ydata = numpy.arange(ht)
self.ax.contourf(self.xdata, self.ydata, data, num_contours)
# Mark the center of the object
self.ax.plot([x], [y], marker='x', ms=20.0,
color='black')
# Set the pan and zoom position & redraw
self.plot_panzoom()
except Exception as e:
self.logger.error("Error making contour plot: %s" % (
str(e)))
def plot_panzoom(self):
ht, wd = len(self.ydata), len(self.xdata)
x = int(self.plot_panx * wd)
y = int(self.plot_pany * ht)
if self.plot_zoomlevel >= 1.0:
scalefactor = 1.0 / self.plot_zoomlevel
elif self.plot_zoomlevel < -1.0:
scalefactor = - self.plot_zoomlevel
else:
# wierd condition?--reset to 1:1
scalefactor = 1.0
self.plot_zoomlevel = 1.0
xdelta = int(scalefactor * (wd/2.0))
ydelta = int(scalefactor * (ht/2.0))
xlo, xhi = x-xdelta, x+xdelta
# distribute remaining x space from plot
if xlo < 0:
xsh = abs(xlo)
xlo, xhi = 0, min(wd-1, xhi+xsh)
elif xhi >= wd:
xsh = xhi - wd
xlo, xhi = max(0, xlo-xsh), wd-1
self.ax.set_xlim(xlo, xhi)
ylo, yhi = y-ydelta, y+ydelta
# distribute remaining y space from plot
if ylo < 0:
ysh = abs(ylo)
ylo, yhi = 0, min(ht-1, yhi+ysh)
elif yhi >= ht:
ysh = yhi - ht
ylo, yhi = max(0, ylo-ysh), ht-1
self.ax.set_ylim(ylo, yhi)
self.draw()
def plot_zoom(self, val):
self.plot_zoomlevel = val
self.plot_panzoom()
def plot_scroll(self, event):
# Matplotlib only gives us the number of steps of the scroll,
# positive for up and negative for down.
direction = None
if event.step > 0:
#delta = 0.9
self.plot_zoomlevel += 1.0
elif event.step < 0:
#delta = 1.1
self.plot_zoomlevel -= 1.0
self.plot_panzoom()
# x1, x2 = self.ax.get_xlim()
# y1, y2 = self.ax.get_ylim()
# self.ax.set_xlim(x1*delta, x2*delta)
# self.ax.set_ylim(y1*delta, y2*delta)
# self.draw()
return True
def plot_button_press(self, event):
if event.button == 1:
self.plot_x, self.plot_y = event.x, event.y
return True
def plot_motion_notify(self, event):
if event.button == 1:
xdelta = event.x - self.plot_x
#ydelta = event.y - self.plot_y
ydelta = self.plot_y - event.y
self.pan_plot(xdelta, ydelta)
def pan_plot(self, xdelta, ydelta):
x1, x2 = self.ax.get_xlim()
y1, y2 = self.ax.get_ylim()
self.ax.set_xlim(x1+xdelta, x2+xdelta)
self.ax.set_ylim(y1+ydelta, y2+ydelta)
self.draw()
class RadialPlot(Plot):
def plot_radial(self, x, y, radius, image):
img_data, x1, y1, x2, y2 = image.cutout_radius(x, y, radius)
self.ax.cla()
# Make a radial plot
self.ax.set_xlim(-0.1, radius)
self.set_titles(title="Radial plot", xtitle='Radius [pixels]',
ytitle='Pixel Value (ADU)')
self.ax.grid(True)
try:
ht, wd = img_data.shape
off_x, off_y = x1, y1
maxval = numpy.nanmax(img_data)
# create arrays of radius and value
r = []
v = []
for i in range(0, wd):
for j in range(0, ht):
r.append( numpy.sqrt( (off_x + i - x)**2 + (off_y + j - y)**2 ) )
v.append(img_data[j, i])
r, v = numpy.array(r), numpy.array(v)
# compute and plot radial fitting
# note: you might wanna change `deg` here.
coefficients = numpy.polyfit(x=r, y=v, deg=10)
polynomial = numpy.poly1d(coefficients)
x_curve = numpy.linspace(numpy.min(r), numpy.max(r), len(r))
y_curve = polynomial(x_curve)
yerror = 0 # for now, no error bars
self.ax.errorbar(r, v, yerr=yerror, marker='x', ls='none',
color='blue')
self.ax.plot(x_curve, y_curve, '-', color='green', lw=2)
#self.fig.tight_layout()
self.draw()
except Exception as e:
self.logger.error("Error making radial plot: %s" % (
str(e)))
class FWHMPlot(Plot):
def __init__(self, *args, **kwargs):
super(FWHMPlot, self).__init__(*args, **kwargs)
self.iqcalc = iqcalc.IQCalc(self.logger)
def _plot_fwhm_axis(self, arr, iqcalc, skybg, color1, color2, color3):
N = len(arr)
X = numpy.array(list(range(N)))
Y = arr
# subtract sky background
Y = Y - skybg
maxv = Y.max()
# clamp to 0..max
Y = Y.clip(0, maxv)
self.logger.debug("Y=%s" % (str(Y)))
self.ax.plot(X, Y, color=color1, marker='.')
fwhm, mu, sdev, maxv = iqcalc.calc_fwhm(arr)
# Make a little smoother gaussian curve by plotting intermediate
# points
XN = numpy.linspace(0.0, float(N), N*10)
Z = numpy.array([iqcalc.gaussian(x, (mu, sdev, maxv))
for x in XN])
self.ax.plot(XN, Z, color=color1, linestyle=':')
self.ax.axvspan(mu-fwhm/2.0, mu+fwhm/2.0,
facecolor=color3, alpha=0.25)
return (fwhm, mu, sdev, maxv)
def plot_fwhm(self, x, y, radius, cutout_data, image, iqcalc=None):
x0, y0, xarr, yarr = image.cutout_cross(x, y, radius)
if iqcalc is None:
iqcalc = self.iqcalc
self.ax.cla()
#self.ax.set_aspect('equal', adjustable='box')
self.set_titles(ytitle='Brightness', xtitle='Pixels',
title='FWHM')
self.ax.grid(True)
# Make a FWHM plot
try:
# get median value from the cutout area
skybg = numpy.median(cutout_data)
self.logger.debug("cutting x=%d y=%d r=%d med=%f" % (
x, y, radius, skybg))
self.logger.debug("xarr=%s" % (str(xarr)))
fwhm_x, mu, sdev, maxv = self._plot_fwhm_axis(xarr, iqcalc, skybg,
'blue', 'blue', 'skyblue')
self.logger.debug("yarr=%s" % (str(yarr)))
fwhm_y, mu, sdev, maxv = self._plot_fwhm_axis(yarr, iqcalc, skybg,
'green', 'green', 'seagreen')
self.ax.legend(('data x', 'gauss x', 'data y', 'gauss y'),
loc='upper right', shadow=False, fancybox=False,
prop={'size': 8}, labelspacing=0.2)
self.set_titles(title="FWHM X: %.2f Y: %.2f" % (fwhm_x, fwhm_y))
#self.fig.tight_layout()
self.draw()
except Exception as e:
self.logger.error("Error making fwhm plot: %s" % (
str(e)))
#END
|
Cadair/ginga
|
ginga/util/plots.py
|
Python
|
bsd-3-clause
| 12,879
|
[
"Gaussian"
] |
d92ca9cf3e02c4fee60c99eaed87252887d9f0f453d846b6109836e96b347a81
|
# Copyright (C) 2019 The ESPResSo project
#
# This file is part of ESPResSo.
#
# ESPResSo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import unittest as ut
import os
import sys
import nbformat
import traceback
sys.path.insert(0, '@CMAKE_BINARY_DIR@/doc/tutorials')
import convert
def skipIfMissingModules(x): return x
try:
import yaml # pylint: disable=unused-import
import autopep8 # pylint: disable=unused-import
except ImportError:
skipIfMissingModules = ut.skip(
"Python modules 'yaml' or 'autopep8' not available, skipping test!")
else:
def skipIfMissingModules(x): return x
class HtmlRunner(ut.TestCase):
"""
Test the :file:`doc/tutorials/convert.py` script. A new Jupyter
notebook and a new python script are created, and both are supplied to
convert.py, which will include the python script in a new code cell,
substitute global variables, run the code and then save the result in
a new notebook. The input notebook contains IPython magic commands and
imports matplotlib and an ESPResSo visualizer, all of which require
special treatment.
"""
cell_md_src = '''
Cannot plot in the same cell where `matplotlib` is imported for the first
time, so CI/CD needs to split the code cell after the first matplotlib
import statement. IPython magic commands `%matplotlib` must be set to inline.
'''.strip()
cell_py_src = '''
import numpy as np
%matplotlib notebook
import matplotlib as mpl # split here
import matplotlib.pyplot as plt # don't split
try:
from espressomd.visualization_opengl import openglLive
except ImportError:
mpl.use('Agg') # running in CI without graphical output
plt.ion()
global_var = 5
plt.plot([1, 2], [3, global_var])
plt.show()
'''.strip()
nb_metadata = {
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"},
"language_info": {
"codemirror_mode": {"name": "ipython", "version": 3},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": ".".join(map(str, sys.version_info[:3]))}
}
def failed_to_run(self, cmd):
traceback.print_exc()
self.fail('Could not run @CMAKE_BINARY_DIR@/pypresso '
'@CMAKE_BINARY_DIR@/doc/tutorials/convert.py ' +
' '.join(cmd))
def test_html_wrapper(self):
f_input = '@CMAKE_CURRENT_BINARY_DIR@/test_convert_notebook.ipynb'
f_output = '@CMAKE_CURRENT_BINARY_DIR@/test_convert_notebook.run.ipynb'
f_script = '@CMAKE_CURRENT_BINARY_DIR@/test_convert_script.py'
# setup
if os.path.isfile(f_output):
os.remove(f_output)
with open(f_script, 'w') as f:
f.write('global_var = 5')
with open(f_input, 'w', encoding='utf-8') as f:
nb = nbformat.v4.new_notebook(metadata=self.nb_metadata)
cell_md = nbformat.v4.new_markdown_cell(source=self.cell_md_src)
nb['cells'].append(cell_md)
cell_code = nbformat.v4.new_code_cell(source=self.cell_py_src)
nb['cells'].append(cell_code)
nbformat.write(nb, f)
# run command and check for errors
cmd = ['ci',
'--input', f_input,
'--output', f_output,
'--scripts', f_script,
'--substitutions', 'global_var=20',
'--execute']
try:
args = convert.parser.parse_args(cmd)
args.callback(args)
except BaseException:
self.failed_to_run(cmd)
self.assertTrue(os.path.isfile(f_output), f_output + ' not created')
# read processed notebook
with open(f_output, encoding='utf-8') as f:
nb_output = nbformat.read(f, as_version=4)
# the first Markdown cell must be identical
self.assertEqual(nb_output['cells'][0]['cell_type'], 'markdown')
self.assertEqual(nb_output['cells'][0]['source'], self.cell_md_src)
# the first Python cell must have been split
self.assertEqual(nb_output['cells'][1]['cell_type'], 'code')
lines = (self.cell_py_src
.replace('%matplotlib notebook', '%matplotlib inline')
.replace('global_var = 5', 'global_var = 20')
).split('\n')
self.assertEqual(nb_output['cells'][1]['source'], '\n'.join(lines[:3]))
self.assertEqual(nb_output['cells'][2]['source'], '\n'.join(lines[3:]))
# the cell should have produced a plot
graphical_plots = True
try:
from espressomd.visualization_opengl import openglLive # pylint: disable=unused-import
except ImportError:
graphical_plots = False # running in CI without graphical output
if graphical_plots:
outputs = nb_output['cells'][2]['outputs']
self.assertTrue(outputs, 'cell has no output')
self.assertIn('image/png', outputs[0]['data'])
self.assertGreater(len(outputs[0]['data']['image/png']), 6000)
# check the external script was correctly inserted
self.assertEqual(nb_output['cells'][3]['cell_type'], 'markdown')
self.assertEqual(nb_output['cells'][3]['source'],
'Solution from test_convert_script.py')
self.assertEqual(nb_output['cells'][4]['cell_type'], 'code')
self.assertEqual(nb_output['cells'][4]['source'], 'global_var = 20')
def test_exercise2_plugin(self):
f_input = '@CMAKE_CURRENT_BINARY_DIR@/test_convert_exercise2.ipynb'
f_output = '@CMAKE_CURRENT_BINARY_DIR@/test_convert_exercise2.run.ipynb'
# setup
if os.path.isfile(f_output):
os.remove(f_output)
with open(f_input, 'w', encoding='utf-8') as f:
nb = nbformat.v4.new_notebook(metadata=self.nb_metadata)
# question with 2 answers and an empty cell
cell_md = nbformat.v4.new_markdown_cell(source='Question 1')
cell_md['metadata']['solution2_first'] = True
cell_md['metadata']['solution2'] = 'shown'
nb['cells'].append(cell_md)
code = '```python\n1\n```'
cell_md = nbformat.v4.new_markdown_cell(source=code)
cell_md['metadata']['solution2'] = 'shown'
cell_md['metadata']['key'] = 'value'
nb['cells'].append(cell_md)
cell_md = nbformat.v4.new_markdown_cell(source='1b')
cell_md['metadata']['solution2'] = 'shown'
nb['cells'].append(cell_md)
cell_code = nbformat.v4.new_code_cell(source='')
nb['cells'].append(cell_code)
# question with 1 answer and a non-empty cell
cell_md = nbformat.v4.new_markdown_cell(source='Question 2')
cell_md['metadata']['solution2_first'] = True
cell_md['metadata']['solution2'] = 'hidden'
nb['cells'].append(cell_md)
code = '```python\n2\nimport matplotlib.pyplot\nglobal_var = 5\n```'
cell_md = nbformat.v4.new_markdown_cell(source=code)
cell_md['metadata']['solution2'] = 'hidden'
nb['cells'].append(cell_md)
cell_code = nbformat.v4.new_code_cell(source='3')
nb['cells'].append(cell_code)
nbformat.write(nb, f)
# run command and check for errors
cmd = ['ci',
'--input', f_input,
'--output', f_output,
'--substitutions', 'global_var=20',
'--exercise2', '--remove-empty-cells']
try:
args = convert.parser.parse_args(cmd)
args.callback(args)
except BaseException:
self.failed_to_run(cmd)
self.assertTrue(os.path.isfile(f_output), f_output + ' not created')
# read processed notebook
with open(f_output, encoding='utf-8') as f:
nb_output = nbformat.read(f, as_version=4)
# check cells
cells = iter(nb_output['cells'])
cell = next(cells)
self.assertEqual(cell['cell_type'], 'markdown')
self.assertEqual(cell['source'], 'Question 1')
cell = next(cells)
self.assertEqual(cell['cell_type'], 'code')
self.assertEqual(cell['source'], '1')
self.assertEqual(cell['metadata']['key'], 'value')
cell = next(cells)
self.assertEqual(cell['cell_type'], 'markdown')
self.assertEqual(cell['source'], '1b')
cell = next(cells)
self.assertEqual(cell['cell_type'], 'markdown')
self.assertEqual(cell['source'], 'Question 2')
cell = next(cells)
self.assertEqual(cell['cell_type'], 'code')
self.assertEqual(cell['source'], '2\nimport matplotlib.pyplot')
cell = next(cells)
self.assertEqual(cell['cell_type'], 'code')
self.assertEqual(cell['source'], 'global_var = 20')
cell = next(cells)
self.assertEqual(cell['cell_type'], 'code')
self.assertEqual(cell['source'], '3')
self.assertEqual(next(cells, 'EOF'), 'EOF')
def test_exercise2_conversion(self):
f_input = '@CMAKE_CURRENT_BINARY_DIR@/test_convert_exercise2_conversion.ipynb'
# setup
with open(f_input, 'w', encoding='utf-8') as f:
nb = nbformat.v4.new_notebook(metadata=self.nb_metadata)
# question and code answer
cell_md = nbformat.v4.new_markdown_cell(source='Question 1')
cell_md['metadata']['solution2_first'] = True
cell_md['metadata']['solution2'] = 'hidden'
nb['cells'].append(cell_md)
code = '```python\n1\n```'
cell_md = nbformat.v4.new_markdown_cell(source=code)
cell_md['metadata']['solution2'] = 'hidden'
cell_md['metadata']['key'] = 'value'
nb['cells'].append(cell_md)
nbformat.write(nb, f)
# run command and check for errors
cmd = ['exercise2', '--to-py', f_input]
try:
args = convert.parser.parse_args(cmd)
args.callback(args)
except BaseException:
self.failed_to_run(cmd)
# read processed notebook
with open(f_input, encoding='utf-8') as f:
nb_output = nbformat.read(f, as_version=4)
# check cells
cells = iter(nb_output['cells'])
cell = next(cells)
self.assertEqual(cell['cell_type'], 'markdown')
self.assertEqual(cell['source'], 'Question 1')
cell = next(cells)
self.assertEqual(cell['cell_type'], 'code')
self.assertEqual(cell['source'], '1')
self.assertEqual(cell['metadata']['solution2'], 'shown')
self.assertEqual(cell['metadata']['key'], 'value')
self.assertEqual(next(cells, 'EOF'), 'EOF')
# run command and check for errors
cmd = ['exercise2', '--to-md', f_input]
try:
args = convert.parser.parse_args(cmd)
args.callback(args)
except BaseException:
self.failed_to_run(cmd)
# read processed notebook
with open(f_input, encoding='utf-8') as f:
nb_output = nbformat.read(f, as_version=4)
# check cells
cells = iter(nb_output['cells'])
cell = next(cells)
self.assertEqual(cell['cell_type'], 'markdown')
self.assertEqual(cell['source'], 'Question 1')
cell = next(cells)
self.assertEqual(cell['cell_type'], 'markdown')
self.assertEqual(cell['source'], '```python\n1\n```')
self.assertEqual(cell['metadata']['solution2'], 'hidden')
self.assertEqual(cell['metadata']['key'], 'value')
self.assertEqual(next(cells, 'EOF'), 'EOF')
@skipIfMissingModules
def test_exercise2_autopep8(self):
f_input = '@CMAKE_CURRENT_BINARY_DIR@/test_convert_exercise2_autopep8.ipynb'
# setup
with open(f_input, 'w', encoding='utf-8') as f:
nb = nbformat.v4.new_notebook(metadata=self.nb_metadata)
# question and code answer
cell_md = nbformat.v4.new_markdown_cell(source='Question 1')
cell_md['metadata']['solution2_first'] = True
cell_md['metadata']['solution2'] = 'hidden'
nb['cells'].append(cell_md)
code = '```python\n\nif 1: #comment\n print( [5+1,4])\n\n```'
cell_md = nbformat.v4.new_markdown_cell(source=code)
cell_md['metadata']['solution2'] = 'hidden'
nb['cells'].append(cell_md)
nbformat.write(nb, f)
# run command and check for errors
cmd = ['exercise2', '--pep8', f_input]
try:
args = convert.parser.parse_args(cmd)
args.callback(args)
except BaseException:
self.failed_to_run(cmd)
# read processed notebook
with open(f_input, encoding='utf-8') as f:
nb_output = nbformat.read(f, as_version=4)
# check cells
cells = iter(nb_output['cells'])
cell = next(cells)
self.assertEqual(cell['cell_type'], 'markdown')
self.assertEqual(cell['source'], 'Question 1')
cell = next(cells)
self.assertEqual(cell['cell_type'], 'markdown')
self.assertEqual(
cell['source'],
'```python\nif 1: # comment\n print([5 + 1, 4])\n```')
self.assertEqual(cell['metadata']['solution2'], 'hidden')
self.assertEqual(next(cells, 'EOF'), 'EOF')
if __name__ == "__main__":
ut.main()
|
fweik/espresso
|
testsuite/scripts/tutorials/test_convert.py
|
Python
|
gpl-3.0
| 14,187
|
[
"ESPResSo"
] |
1cd84ceb5a7e4733d0262ea6a50bd4056a3e4614277d741d5756e8a82143513b
|
# -*- coding: utf-8 -*-
import tweepy
def performOauthDance(auth):
print('Please visit:', auth.get_authorization_url())
verifier = input('Verification token:')
try:
auth.get_access_token(verifier)
print('\n'.join([
'Authentication data is:',
'ACCESS_TOKEN=%s' % auth.access_token,
'ACCESS_TOKEN_SECRET=%s' % auth.access_token_secret]))
except tweepy.TweepError:
print('Failed to authenticate correctly.')
|
runjak/hoodedfigure
|
oauthDance.py
|
Python
|
mit
| 483
|
[
"VisIt"
] |
e4209638aba9070bfe5459d349a0da719704536b23300a6819b6fd3229401089
|
##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, [email protected], All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class RSnprelate(RPackage):
"""Genome-wide association studies (GWAS) are widely used to investigate
the genetic basis of diseases and traits, but they pose many
computational challenges. We developed an R package SNPRelate to
provide a binary format for single-nucleotide polymorphism (SNP) data
in GWAS utilizing CoreArray Genomic Data Structure (GDS) data files.
The GDS format offers the efficient operations specifically designed
for integers with two bits, since a SNP could occupy only two bits.
SNPRelate is also designed to accelerate two key computations on SNP
data using parallel computing for multi-core symmetric multiprocessing
computer architectures: Principal Component Analysis (PCA) and
relatedness analysis using Identity-By-Descent measures. The SNP GDS
format is also used by the GWASTools package with the support of S4
classes and generic functions. The extended GDS format is implemented
in the SeqArray package to support the storage of single nucleotide
variations (SNVs), insertion/deletion polymorphism (indel) and
structural variation calls."""
homepage = "https://bioconductor.org/packages/SNPRelate"
url = "https://git.bioconductor.org/packages/SNPRelate"
version('1.12.2', git='https://git.bioconductor.org/packages/SNPRelate', commit='dce2e2b6f36483a9f905bb5df6ae834a9f1136fe')
depends_on('[email protected]:3.4.9', when='@1.12.2')
depends_on('[email protected]:', type=('build', 'run'))
|
EmreAtes/spack
|
var/spack/repos/builtin/packages/r-snprelate/package.py
|
Python
|
lgpl-2.1
| 2,803
|
[
"Bioconductor"
] |
73841b59fbbe4ea15fddbf460c3d110b226a435451b2fe66b5722dfa0405764e
|
import sys
import pyranges as pr
import pandas as pd
def get_fasta(gr, path=None, pyfaidx_fasta=None):
"""Get fasta sequence.
Parameters
----------
gr : PyRanges
Coordinates.
path : str
Path to fasta file. It will be indexed using pyfaidx if an index is not found
pyfaidx_fasta : pyfaidx.Fasta
Alternative method to provide fasta target, as a pyfaidx.Fasta object
Returns
-------
Series
Sequences, one per interval.
Note
----
Sorting the PyRanges is likely to improve the speed.
Intervals on the negative strand will be reverse complemented.
Warning
-------
Note that the names in the fasta header and gr must be the same.
Examples
--------
>>> gr = pr.from_dict({"Chromosome": ["chr1", "chr1"],
... "Start": [5, 0], "End": [8, 5]})
>>> gr
+--------------+-----------+-----------+
| Chromosome | Start | End |
| (category) | (int32) | (int32) |
|--------------+-----------+-----------|
| chr1 | 5 | 8 |
| chr1 | 0 | 5 |
+--------------+-----------+-----------+
Unstranded PyRanges object has 2 rows and 3 columns from 1 chromosomes.
For printing, the PyRanges was sorted on Chromosome.
>>> tmp_handle = open("temp.fasta", "w+")
>>> _ = tmp_handle.write("> chr1\\n")
>>> _ = tmp_handle.write("ATTACCAT")
>>> tmp_handle.close()
>>> seq = pr.get_fasta(gr, "temp.fasta")
>>> seq
0 CAT
1 ATTAC
dtype: object
>>> gr.seq = seq
>>> gr
+--------------+-----------+-----------+------------+
| Chromosome | Start | End | seq |
| (category) | (int32) | (int32) | (object) |
|--------------+-----------+-----------+------------|
| chr1 | 5 | 8 | CAT |
| chr1 | 0 | 5 | ATTAC |
+--------------+-----------+-----------+------------+
Unstranded PyRanges object has 2 rows and 4 columns from 1 chromosomes.
For printing, the PyRanges was sorted on Chromosome.
"""
try:
import pyfaidx
except ModuleNotFoundError as e:
print("pyfaidx must be installed to get fasta sequences. Use `conda install -c bioconda pyfaidx` or `pip install pyfaidx` to install it.")
sys.exit(1)
if pyfaidx_fasta is None:
pyfaidx_fasta = pyfaidx.Fasta(path, read_ahead=int(1e5))
seqs = []
for k, df in gr:
if type(k) is tuple: #input is Stranded
_fasta = pyfaidx_fasta[k[0]]
if k[1]=='-':
for start, end in zip(df.Start, df.End):
seqs.append( (-_fasta[start:end]).seq ) # reverse complement
else:
for start, end in zip(df.Start, df.End):
seqs.append(_fasta[start:end].seq)
else:
_fasta = pyfaidx_fasta[k]
for start, end in zip(df.Start, df.End):
seqs.append(_fasta[start:end].seq)
return pd.concat([pd.Series(s) for s in seqs]).reset_index(drop=True)
|
biocore-ntnu/pyranges
|
pyranges/get_fasta.py
|
Python
|
mit
| 3,179
|
[
"Bioconda"
] |
9c44533c81533dfdda72ce4018434e00fe8baf834042bcf1be646b79dc3790b9
|
#
# Copyright 2018 Analytics Zoo Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import numpy as np
import tempfile
import os
import torch
from zoo.chronos.forecaster.seq2seq_forecaster import Seq2SeqForecaster
from zoo.orca import init_orca_context, stop_orca_context
from unittest import TestCase
import pytest
def create_data():
num_train_samples = 1000
num_val_samples = 400
num_test_samples = 400
input_time_steps = 24
input_feature_dim = 1
output_time_steps = 5
output_feature_dim = 1
def get_x_y(num_samples):
x = np.random.rand(num_samples, input_time_steps, input_feature_dim).astype(np.float32)
y = x[:, -output_time_steps:, :]*2 + \
np.random.rand(num_samples, output_time_steps, output_feature_dim).astype(np.float32)
return x, y
train_data = get_x_y(num_train_samples)
val_data = get_x_y(num_val_samples)
test_data = get_x_y(num_test_samples)
return train_data, val_data, test_data
class TestChronosModelTCNForecaster(TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_tcn_forecaster_fit_eva_pred(self):
train_data, val_data, test_data = create_data()
forecaster = Seq2SeqForecaster(past_seq_len=24,
future_seq_len=5,
input_feature_num=1,
output_feature_num=1,
loss="mae",
lr=0.01)
train_loss = forecaster.fit(train_data, epochs=2)
test_pred = forecaster.predict(test_data[0])
assert test_pred.shape == test_data[1].shape
test_mse = forecaster.evaluate(test_data)
def test_tcn_forecaster_onnx_methods(self):
train_data, val_data, test_data = create_data()
forecaster = Seq2SeqForecaster(past_seq_len=24,
future_seq_len=5,
input_feature_num=1,
output_feature_num=1,
loss="mae",
lr=0.01)
forecaster.fit(train_data, epochs=2)
try:
import onnx
import onnxruntime
pred = forecaster.predict(test_data[0])
pred_onnx = forecaster.predict_with_onnx(test_data[0])
np.testing.assert_almost_equal(pred, pred_onnx, decimal=5)
mse = forecaster.evaluate(test_data, multioutput="raw_values")
mse_onnx = forecaster.evaluate_with_onnx(test_data,
multioutput="raw_values")
np.testing.assert_almost_equal(mse, mse_onnx, decimal=5)
with pytest.raises(RuntimeError):
forecaster.build_onnx(sess_options=1)
forecaster.build_onnx(thread_num=1)
mse = forecaster.evaluate(test_data)
mse_onnx = forecaster.evaluate_with_onnx(test_data)
np.testing.assert_almost_equal(mse, mse_onnx, decimal=5)
except ImportError:
pass
def test_tcn_forecaster_save_load(self):
train_data, val_data, test_data = create_data()
forecaster = Seq2SeqForecaster(past_seq_len=24,
future_seq_len=5,
input_feature_num=1,
output_feature_num=1,
loss="mae",
lr=0.01)
train_mse = forecaster.fit(train_data, epochs=2)
with tempfile.TemporaryDirectory() as tmp_dir_name:
ckpt_name = os.path.join(tmp_dir_name, "ckpt")
test_pred_save = forecaster.predict(test_data[0])
forecaster.save(ckpt_name)
forecaster.load(ckpt_name)
test_pred_load = forecaster.predict(test_data[0])
np.testing.assert_almost_equal(test_pred_save, test_pred_load)
def test_tcn_forecaster_runtime_error(self):
train_data, val_data, test_data = create_data()
forecaster = Seq2SeqForecaster(past_seq_len=24,
future_seq_len=5,
input_feature_num=1,
output_feature_num=1,
loss="mae",
lr=0.01)
with pytest.raises(RuntimeError):
with tempfile.TemporaryDirectory() as tmp_dir_name:
ckpt_name = os.path.join(tmp_dir_name, "ckpt")
forecaster.save(ckpt_name)
with pytest.raises(RuntimeError):
forecaster.predict(test_data[0])
with pytest.raises(RuntimeError):
forecaster.evaluate(test_data)
def test_tcn_forecaster_shape_error(self):
train_data, val_data, test_data = create_data()
forecaster = Seq2SeqForecaster(past_seq_len=24,
future_seq_len=5,
input_feature_num=1,
output_feature_num=2,
loss="mae",
lr=0.01)
with pytest.raises(AssertionError):
forecaster.fit(train_data, epochs=2)
def test_tcn_forecaster_xshard_input(self):
train_data, val_data, test_data = create_data()
print("original", train_data[0].dtype)
init_orca_context(cores=4, memory="2g")
from zoo.orca.data import XShards
def transform_to_dict(data):
return {'x': data[0], 'y': data[1]}
def transform_to_dict_x(data):
return {'x': data[0]}
train_data = XShards.partition(train_data).transform_shard(transform_to_dict)
val_data = XShards.partition(val_data).transform_shard(transform_to_dict)
test_data = XShards.partition(test_data).transform_shard(transform_to_dict_x)
for distributed in [True, False]:
forecaster = Seq2SeqForecaster(past_seq_len=24,
future_seq_len=5,
input_feature_num=1,
output_feature_num=1,
loss="mae",
lr=0.01,
distributed=distributed)
forecaster.fit(train_data, epochs=2)
distributed_pred = forecaster.predict(test_data)
distributed_eval = forecaster.evaluate(val_data)
stop_orca_context()
def test_tcn_forecaster_distributed(self):
train_data, val_data, test_data = create_data()
init_orca_context(cores=4, memory="2g")
forecaster = Seq2SeqForecaster(past_seq_len=24,
future_seq_len=5,
input_feature_num=1,
output_feature_num=1,
loss="mae",
lr=0.01,
distributed=True)
forecaster.fit(train_data, epochs=2)
distributed_pred = forecaster.predict(test_data[0])
distributed_eval = forecaster.evaluate(val_data)
model = forecaster.get_model()
assert isinstance(model, torch.nn.Module)
forecaster.to_local()
local_pred = forecaster.predict(test_data[0])
local_eval = forecaster.evaluate(val_data)
np.testing.assert_almost_equal(distributed_pred, local_pred, decimal=5)
try:
import onnx
import onnxruntime
local_pred_onnx = forecaster.predict_with_onnx(test_data[0])
local_eval_onnx = forecaster.evaluate_with_onnx(val_data)
np.testing.assert_almost_equal(distributed_pred, local_pred_onnx, decimal=5)
except ImportError:
pass
model = forecaster.get_model()
assert isinstance(model, torch.nn.Module)
stop_orca_context()
|
intel-analytics/analytics-zoo
|
pyzoo/test/zoo/chronos/forecaster/test_seq2seq_forecaster.py
|
Python
|
apache-2.0
| 8,709
|
[
"ORCA"
] |
e2bebd990c6e2d412023336d00bbdb5d13351b27c7df1ae7f4cfb848d20ff338
|
#!/usr/bin/env python
# Generate a number of FLML files from a base directory
# manipulating various parameters to explore parameter
# space
# Ouput structure is:
# output_dir/
# template (copied in if not already here)
# runs/
# 1/
# run.flml
# other defined files
# 2
# 3
# directory_listing.csv
import shutil
import sys
import string
import os
import itertools
import glob
import libspud
import argparse
def main():
parser = argparse.ArgumentParser(
description="""This script produces the FLML and input files required for parameter """+
"""sweeps. The user supplies a template file, output location and a text """+
"""file that contains the option paths and parameter sets that are to be """+
"""used."""
)
parser.add_argument(
'-v',
'--verbose',
action='store_true',
help="Verbose output: mainly progress reports",
default=False
)
# parser.add_argument(
# "-m",
# "--move-files",
# help="A file to move along with the flml, for example forcing NetCDF, initialisation files, etc." +
# "Files will need to be in the template directory."+
# "\nYou do not need to move the mesh files.\n"+
# "Add as many -m flags as requried",
# action="append",
# dest="extras",
# )
# positional args:
parser.add_argument(
'template_dir',
help="A directory containing the meshes, FLML, and any associated files"
)
parser.add_argument(
'output_dir',
help="A directory where output will be stored. Will be created is it doesn't exist"
)
parser.add_argument(
'param_space_file',
help="A text file containing a human-readable name; option path; comma-seperated list of values"
)
args = parser.parse_args()
output_dir = str(args.output_dir)
template_dir = str(args.template_dir)
param_space_file = str(args.param_space_file)
verbose = args.verbose
# check template dir exists
if (not os.path.exists(template_dir)):
print "Your template directory does not exist or you don't have permissions to read it"
sys.exit(-1)
# check it contains an FLML
if (len(glob.glob(os.path.join(template_dir,'*.flml'))) == 0):
print "Your template directory does not contain an FLML file. Can't do much without it."
sys.exit(-1)
elif (len(glob.glob(template_dir+'*.flml')) > 1):
print "Warning: your template directory contains >1 FLML. We'll be using: "+ glob.glob(template_dir/+'*.flml')[0]
# get the name of the template dir, discarding the path
direc = template_dir.rsplit('/')[0]
# then we have the output directory
# We'll create a dir called "runs" and dump output in there, with a directory listing file
# strip of any trailing /
if output_dir[-1] == '/':
output_dir = output_dir[:-1]
# check it exists
if (not os.path.exists(output_dir)):
os.mkdir(output_dir)
if (verbose):
print "Warning: Creating output directory: "+output_dir
# if the template dir is not already there, copy it in
if (not os.path.exists(os.path.join(output_dir,direc))):
shutil.copytree(template_dir,os.path.join(output_dir,direc))
if (verbose):
print "Copying template directory, into output folder"
# reset template_dir variable to point to the new one instead
template_dir = os.path.join(output_dir,direc)
# create "runs" directory
if (not os.path.exists(os.path.join(output_dir,"runs"))):
os.mkdir(os.path.join(output_dir,"runs"))
if (verbose):
print "Creating runs folder"
# third arg is the param space file
# Plain text file with the following format:
# Name; spud_path; value_1, value_2, value3, etc
# Name; spud_path; value_1, value_2
# check file exists
# read in the param space file
if (verbose):
print "Reading in parameter space"
param_space, paths, names = read_param_space(param_space_file)
# generate all the combinations
params = gen_param_combinations(param_space)
# make the FLMLs
gen_flmls(params, template_dir, output_dir, paths, names, verbose)
if (verbose):
print "Done generating files"
return 0
# set up parameter space
def read_param_space(param_space_file):
f = open(param_space_file,'r')
param_space = []
paths = []
names = []
for l in f:
line = l.strip()
data = line.split(';')
name = data[0]
path = data[1]
values = data[2].strip().split(':')
param_space.append(values)
paths.append(path)
names.append(name)
return param_space, paths, names
def gen_param_combinations(param_space):
# thought this was going to be difficult, but it's one line...
return list(itertools.product(*param_space))
def gen_flmls(params, template_dir, output_dir, paths, names, verbose):
import types
# get flml file from template_dir - first one we come across
# If you have more in there, tough.
full_flml = glob.glob(os.path.join(template_dir,'*.flml'))[0]
# strip to the filename only
flml = os.path.basename(full_flml)
f = open(os.path.join(output_dir,'runs',"directory_listing.csv"),"w")
# append data to directory listing file
line = "Directory number"
for n in names:
line = line+","+n
line = line + "\n"
f.write(line)
# loop over paramas
# create a new directory, with a unqiue number, starting from 1
# This makes it easier to use in an array job on CX1
dir_num = 1
for p_set in params:
if (verbose):
print "Processing "+str(dir_num)
# copy contents from template folder to directory number
dirname = os.path.join(output_dir,'runs',str(dir_num))
if (os.path.exists(os.path.join(dirname))):
shutil.rmtree(dirname)
shutil.copytree(template_dir,dirname)
# open FLML file
output_file = os.path.join(dirname,flml)
# This load the data into the memory of the libspud library
libspud.load_options(output_file)
i = 0
for path in paths:
path = path.strip()
# get type
path_type = libspud.get_option_type(path)
path_rank = libspud.get_option_rank(path)
path_shape = libspud.get_option_shape(path)
if (path_type == libspud.pytype_map[libspud.SPUD_REAL] and path_rank == 0):
libspud.set_option(path,float(p_set[i]))
elif (path_rank == 1 and path_type == libspud.pytype_map[libspud.SPUD_REAL]):
value = eval(p_set[i])
val = list(map(float, value))
libspud.set_option(path,val)
elif (path_rank == 2 and path_type == libspud.pytype_map[libspud.SPUD_REAL]):
value = eval(p_set[i])
val = []
for row in value:
val.append(list(map(float, row)))
libspud.set_option(path,val)
i = i+1
# save file
libspud.write_options(output_file)
# append data to directory listing file
line = str(dir_num)
for p in p_set:
# quoting the params so csv parsers can get the columns right
line = line+","+'"'+str(p)+'"'
line = line +"\n"
f.write(line)
dir_num += 1
f.close()
if __name__ == "__main__":
main()
|
FluidityProject/multifluids
|
scripts/create_param_sweep.py
|
Python
|
lgpl-2.1
| 7,778
|
[
"NetCDF"
] |
293b9dbaa7f1cabe1f4e6e7b2c6ac49f80961a8e947fab83927603f59a03648e
|
""" core implementation of testing process: init, session, runtest loop. """
import py
import pytest, _pytest
import inspect
import os, sys, imp
try:
from collections import MutableMapping as MappingMixin
except ImportError:
from UserDict import DictMixin as MappingMixin
from _pytest.mark import MarkInfo
tracebackcutdir = py.path.local(_pytest.__file__).dirpath()
# exitcodes for the command line
EXIT_OK = 0
EXIT_TESTSFAILED = 1
EXIT_INTERRUPTED = 2
EXIT_INTERNALERROR = 3
EXIT_USAGEERROR = 4
name_re = py.std.re.compile("^[a-zA-Z_]\w*$")
def pytest_addoption(parser):
parser.addini("norecursedirs", "directory patterns to avoid for recursion",
type="args", default=('.*', 'CVS', '_darcs', '{arch}'))
#parser.addini("dirpatterns",
# "patterns specifying possible locations of test files",
# type="linelist", default=["**/test_*.txt",
# "**/test_*.py", "**/*_test.py"]
#)
group = parser.getgroup("general", "running and selection options")
group._addoption('-x', '--exitfirst', action="store_true", default=False,
dest="exitfirst",
help="exit instantly on first error or failed test."),
group._addoption('--maxfail', metavar="num",
action="store", type="int", dest="maxfail", default=0,
help="exit after first num failures or errors.")
group._addoption('--strict', action="store_true",
help="run pytest in strict mode, warnings become errors.")
group = parser.getgroup("collect", "collection")
group.addoption('--collectonly',
action="store_true", dest="collectonly",
help="only collect tests, don't execute them."),
group.addoption('--pyargs', action="store_true",
help="try to interpret all arguments as python packages.")
group.addoption("--ignore", action="append", metavar="path",
help="ignore path during collection (multi-allowed).")
group.addoption('--confcutdir', dest="confcutdir", default=None,
metavar="dir",
help="only load conftest.py's relative to specified dir.")
group = parser.getgroup("debugconfig",
"test session debugging and configuration")
group.addoption('--basetemp', dest="basetemp", default=None, metavar="dir",
help="base temporary directory for this test run.")
def pytest_namespace():
collect = dict(Item=Item, Collector=Collector, File=File, Session=Session)
return dict(collect=collect)
def pytest_configure(config):
py.test.config = config # compatibiltiy
if config.option.exitfirst:
config.option.maxfail = 1
def wrap_session(config, doit):
"""Skeleton command line program"""
session = Session(config)
session.exitstatus = EXIT_OK
initstate = 0
try:
try:
config.pluginmanager.do_configure(config)
initstate = 1
config.hook.pytest_sessionstart(session=session)
initstate = 2
doit(config, session)
except pytest.UsageError:
msg = sys.exc_info()[1].args[0]
sys.stderr.write("ERROR: %s\n" %(msg,))
session.exitstatus = EXIT_USAGEERROR
except KeyboardInterrupt:
excinfo = py.code.ExceptionInfo()
config.hook.pytest_keyboard_interrupt(excinfo=excinfo)
session.exitstatus = EXIT_INTERRUPTED
except:
excinfo = py.code.ExceptionInfo()
config.pluginmanager.notify_exception(excinfo, config.option)
session.exitstatus = EXIT_INTERNALERROR
if excinfo.errisinstance(SystemExit):
sys.stderr.write("mainloop: caught Spurious SystemExit!\n")
else:
if session._testsfailed:
session.exitstatus = EXIT_TESTSFAILED
finally:
if initstate >= 2:
config.hook.pytest_sessionfinish(
session=session,
exitstatus=session.exitstatus)
if initstate >= 1:
config.pluginmanager.do_unconfigure(config)
return session.exitstatus
def pytest_cmdline_main(config):
return wrap_session(config, _main)
def _main(config, session):
""" default command line protocol for initialization, session,
running tests and reporting. """
config.hook.pytest_collection(session=session)
config.hook.pytest_runtestloop(session=session)
def pytest_collection(session):
return session.perform_collect()
def pytest_runtestloop(session):
if session.config.option.collectonly:
return True
def getnextitem(i):
# this is a function to avoid python2
# keeping sys.exc_info set when calling into a test
# python2 keeps sys.exc_info till the frame is left
try:
return session.items[i+1]
except IndexError:
return None
for i, item in enumerate(session.items):
nextitem = getnextitem(i)
item.config.hook.pytest_runtest_protocol(item=item, nextitem=nextitem)
if session.shouldstop:
raise session.Interrupted(session.shouldstop)
return True
def pytest_ignore_collect(path, config):
p = path.dirpath()
ignore_paths = config._getconftest_pathlist("collect_ignore", path=p)
ignore_paths = ignore_paths or []
excludeopt = config.getvalue("ignore")
if excludeopt:
ignore_paths.extend([py.path.local(x) for x in excludeopt])
return path in ignore_paths
class HookProxy:
def __init__(self, fspath, config):
self.fspath = fspath
self.config = config
def __getattr__(self, name):
hookmethod = getattr(self.config.hook, name)
def call_matching_hooks(**kwargs):
plugins = self.config._getmatchingplugins(self.fspath)
return hookmethod.pcall(plugins, **kwargs)
return call_matching_hooks
def compatproperty(name):
def fget(self):
# deprecated - use pytest.name
return getattr(pytest, name)
return property(fget)
class NodeKeywords(MappingMixin):
def __init__(self, node):
parent = node.parent
bases = parent and (parent.keywords._markers,) or ()
self._markers = type("dynmarker", bases, {node.name: True})
def __getitem__(self, key):
try:
return getattr(self._markers, key)
except AttributeError:
raise KeyError(key)
def __setitem__(self, key, value):
setattr(self._markers, key, value)
def __delitem__(self, key):
delattr(self._markers, key)
def __iter__(self):
return iter(self.keys())
def __len__(self):
return len(self.keys())
def keys(self):
return dir(self._markers)
class Node(object):
""" base class for Collector and Item the test collection tree.
Collector subclasses have children, Items are terminal nodes."""
def __init__(self, name, parent=None, config=None, session=None):
#: a unique name within the scope of the parent node
self.name = name
#: the parent collector node.
self.parent = parent
#: the pytest config object
self.config = config or parent.config
#: the session this node is part of
self.session = session or parent.session
#: filesystem path where this node was collected from (can be None)
self.fspath = getattr(parent, 'fspath', None)
#: keywords/markers collected from all scopes
self.keywords = NodeKeywords(self)
#self.extrainit()
@property
def ihook(self):
""" fspath sensitive hook proxy used to call pytest hooks"""
return self.session.gethookproxy(self.fspath)
#def extrainit(self):
# """"extra initialization after Node is initialized. Implemented
# by some subclasses. """
Module = compatproperty("Module")
Class = compatproperty("Class")
Instance = compatproperty("Instance")
Function = compatproperty("Function")
File = compatproperty("File")
Item = compatproperty("Item")
def _getcustomclass(self, name):
cls = getattr(self, name)
if cls != getattr(pytest, name):
py.log._apiwarn("2.0", "use of node.%s is deprecated, "
"use pytest_pycollect_makeitem(...) to create custom "
"collection nodes" % name)
return cls
def __repr__(self):
return "<%s %r>" %(self.__class__.__name__,
getattr(self, 'name', None))
# methods for ordering nodes
@property
def nodeid(self):
""" a ::-separated string denoting its collection tree address. """
try:
return self._nodeid
except AttributeError:
self._nodeid = x = self._makeid()
return x
def _makeid(self):
return self.parent.nodeid + "::" + self.name
def __eq__(self, other):
if not isinstance(other, Node):
return False
return (self.__class__ == other.__class__ and
self.name == other.name and self.parent == other.parent)
def __ne__(self, other):
return not self == other
def __hash__(self):
return hash((self.name, self.parent))
def setup(self):
pass
def teardown(self):
pass
def _memoizedcall(self, attrname, function):
exattrname = "_ex_" + attrname
failure = getattr(self, exattrname, None)
if failure is not None:
py.builtin._reraise(failure[0], failure[1], failure[2])
if hasattr(self, attrname):
return getattr(self, attrname)
try:
res = function()
except py.builtin._sysex:
raise
except:
failure = py.std.sys.exc_info()
setattr(self, exattrname, failure)
raise
setattr(self, attrname, res)
return res
def listchain(self):
""" return list of all parent collectors up to self,
starting from root of collection tree. """
chain = []
item = self
while item is not None:
chain.append(item)
item = item.parent
chain.reverse()
return chain
def listnames(self):
return [x.name for x in self.listchain()]
def getplugins(self):
return self.config._getmatchingplugins(self.fspath)
def getparent(self, cls):
current = self
while current and not isinstance(current, cls):
current = current.parent
return current
def _prunetraceback(self, excinfo):
pass
def _repr_failure_py(self, excinfo, style=None):
fm = self.session._fixturemanager
if excinfo.errisinstance(fm.FixtureLookupError):
return excinfo.value.formatrepr()
if self.config.option.fulltrace:
style="long"
else:
self._prunetraceback(excinfo)
# XXX should excinfo.getrepr record all data and toterminal()
# process it?
if style is None:
if self.config.option.tbstyle == "short":
style = "short"
else:
style = "long"
return excinfo.getrepr(funcargs=True,
showlocals=self.config.option.showlocals,
style=style)
repr_failure = _repr_failure_py
class Collector(Node):
""" Collector instances create children through collect()
and thus iteratively build a tree.
"""
class CollectError(Exception):
""" an error during collection, contains a custom message. """
def collect(self):
""" returns a list of children (items and collectors)
for this collection node.
"""
raise NotImplementedError("abstract")
def repr_failure(self, excinfo):
""" represent a collection failure. """
if excinfo.errisinstance(self.CollectError):
exc = excinfo.value
return str(exc.args[0])
return self._repr_failure_py(excinfo, style="short")
def _memocollect(self):
""" internal helper method to cache results of calling collect(). """
return self._memoizedcall('_collected', lambda: list(self.collect()))
def _prunetraceback(self, excinfo):
if hasattr(self, 'fspath'):
path = self.fspath
traceback = excinfo.traceback
ntraceback = traceback.cut(path=self.fspath)
if ntraceback == traceback:
ntraceback = ntraceback.cut(excludepath=tracebackcutdir)
excinfo.traceback = ntraceback.filter()
class FSCollector(Collector):
def __init__(self, fspath, parent=None, config=None, session=None):
fspath = py.path.local(fspath) # xxx only for test_resultlog.py?
name = fspath.basename
if parent is not None:
rel = fspath.relto(parent.fspath)
if rel:
name = rel
name = name.replace(os.sep, "/")
super(FSCollector, self).__init__(name, parent, config, session)
self.fspath = fspath
def _makeid(self):
if self == self.session:
return "."
relpath = self.session.fspath.bestrelpath(self.fspath)
if os.sep != "/":
relpath = relpath.replace(os.sep, "/")
return relpath
class File(FSCollector):
""" base class for collecting tests from a file. """
class Item(Node):
""" a basic test invocation item. Note that for a single function
there might be multiple test invocation items.
"""
nextitem = None
def reportinfo(self):
return self.fspath, None, ""
@property
def location(self):
try:
return self._location
except AttributeError:
location = self.reportinfo()
# bestrelpath is a quite slow function
cache = self.config.__dict__.setdefault("_bestrelpathcache", {})
try:
fspath = cache[location[0]]
except KeyError:
fspath = self.session.fspath.bestrelpath(location[0])
cache[location[0]] = fspath
location = (fspath, location[1], str(location[2]))
self._location = location
return location
class NoMatch(Exception):
""" raised if matching cannot locate a matching names. """
class Session(FSCollector):
class Interrupted(KeyboardInterrupt):
""" signals an interrupted test run. """
__module__ = 'builtins' # for py3
def __init__(self, config):
FSCollector.__init__(self, py.path.local(), parent=None,
config=config, session=self)
self.config.pluginmanager.register(self, name="session", prepend=True)
self._testsfailed = 0
self.shouldstop = False
self.trace = config.trace.root.get("collection")
self._norecursepatterns = config.getini("norecursedirs")
def pytest_collectstart(self):
if self.shouldstop:
raise self.Interrupted(self.shouldstop)
def pytest_runtest_logreport(self, report):
if report.failed and not hasattr(report, 'wasxfail'):
self._testsfailed += 1
maxfail = self.config.getvalue("maxfail")
if maxfail and self._testsfailed >= maxfail:
self.shouldstop = "stopping after %d failures" % (
self._testsfailed)
pytest_collectreport = pytest_runtest_logreport
def isinitpath(self, path):
return path in self._initialpaths
def gethookproxy(self, fspath):
return HookProxy(fspath, self.config)
def perform_collect(self, args=None, genitems=True):
hook = self.config.hook
try:
items = self._perform_collect(args, genitems)
hook.pytest_collection_modifyitems(session=self,
config=self.config, items=items)
finally:
hook.pytest_collection_finish(session=self)
return items
def _perform_collect(self, args, genitems):
if args is None:
args = self.config.args
self.trace("perform_collect", self, args)
self.trace.root.indent += 1
self._notfound = []
self._initialpaths = set()
self._initialparts = []
self.items = items = []
for arg in args:
parts = self._parsearg(arg)
self._initialparts.append(parts)
self._initialpaths.add(parts[0])
self.ihook.pytest_collectstart(collector=self)
rep = self.ihook.pytest_make_collect_report(collector=self)
self.ihook.pytest_collectreport(report=rep)
self.trace.root.indent -= 1
if self._notfound:
for arg, exc in self._notfound:
line = "(no name %r in any of %r)" % (arg, exc.args[0])
raise pytest.UsageError("not found: %s\n%s" %(arg, line))
if not genitems:
return rep.result
else:
if rep.passed:
for node in rep.result:
self.items.extend(self.genitems(node))
return items
def collect(self):
for parts in self._initialparts:
arg = "::".join(map(str, parts))
self.trace("processing argument", arg)
self.trace.root.indent += 1
try:
for x in self._collect(arg):
yield x
except NoMatch:
# we are inside a make_report hook so
# we cannot directly pass through the exception
self._notfound.append((arg, sys.exc_info()[1]))
self.trace.root.indent -= 1
break
self.trace.root.indent -= 1
def _collect(self, arg):
names = self._parsearg(arg)
path = names.pop(0)
if path.check(dir=1):
assert not names, "invalid arg %r" %(arg,)
for path in path.visit(fil=lambda x: x.check(file=1),
rec=self._recurse, bf=True, sort=True):
for x in self._collectfile(path):
yield x
else:
assert path.check(file=1)
for x in self.matchnodes(self._collectfile(path), names):
yield x
def _collectfile(self, path):
ihook = self.gethookproxy(path)
if not self.isinitpath(path):
if ihook.pytest_ignore_collect(path=path, config=self.config):
return ()
return ihook.pytest_collect_file(path=path, parent=self)
def _recurse(self, path):
ihook = self.gethookproxy(path.dirpath())
if ihook.pytest_ignore_collect(path=path, config=self.config):
return
for pat in self._norecursepatterns:
if path.check(fnmatch=pat):
return False
ihook = self.gethookproxy(path)
ihook.pytest_collect_directory(path=path, parent=self)
return True
def _tryconvertpyarg(self, x):
mod = None
path = [os.path.abspath('.')] + sys.path
for name in x.split('.'):
# ignore anything that's not a proper name here
# else something like --pyargs will mess up '.'
# since imp.find_module will actually sometimes work for it
# but it's supposed to be considered a filesystem path
# not a package
if name_re.match(name) is None:
return x
try:
fd, mod, type_ = imp.find_module(name, path)
except ImportError:
return x
else:
if fd is not None:
fd.close()
if type_[2] != imp.PKG_DIRECTORY:
path = [os.path.dirname(mod)]
else:
path = [mod]
return mod
def _parsearg(self, arg):
""" return (fspath, names) tuple after checking the file exists. """
arg = str(arg)
if self.config.option.pyargs:
arg = self._tryconvertpyarg(arg)
parts = str(arg).split("::")
relpath = parts[0].replace("/", os.sep)
path = self.fspath.join(relpath, abs=True)
if not path.check():
if self.config.option.pyargs:
msg = "file or package not found: "
else:
msg = "file not found: "
raise pytest.UsageError(msg + arg)
parts[0] = path
return parts
def matchnodes(self, matching, names):
self.trace("matchnodes", matching, names)
self.trace.root.indent += 1
nodes = self._matchnodes(matching, names)
num = len(nodes)
self.trace("matchnodes finished -> ", num, "nodes")
self.trace.root.indent -= 1
if num == 0:
raise NoMatch(matching, names[:1])
return nodes
def _matchnodes(self, matching, names):
if not matching or not names:
return matching
name = names[0]
assert name
nextnames = names[1:]
resultnodes = []
for node in matching:
if isinstance(node, pytest.Item):
if not names:
resultnodes.append(node)
continue
assert isinstance(node, pytest.Collector)
node.ihook.pytest_collectstart(collector=node)
rep = node.ihook.pytest_make_collect_report(collector=node)
if rep.passed:
has_matched = False
for x in rep.result:
if x.name == name:
resultnodes.extend(self.matchnodes([x], nextnames))
has_matched = True
# XXX accept IDs that don't have "()" for class instances
if not has_matched and len(rep.result) == 1 and x.name == "()":
nextnames.insert(0, name)
resultnodes.extend(self.matchnodes([x], nextnames))
node.ihook.pytest_collectreport(report=rep)
return resultnodes
def genitems(self, node):
self.trace("genitems", node)
if isinstance(node, pytest.Item):
node.ihook.pytest_itemcollected(item=node)
yield node
else:
assert isinstance(node, pytest.Collector)
node.ihook.pytest_collectstart(collector=node)
rep = node.ihook.pytest_make_collect_report(collector=node)
if rep.passed:
for subnode in rep.result:
for x in self.genitems(subnode):
yield x
node.ihook.pytest_collectreport(report=rep)
def getfslineno(obj):
# xxx let decorators etc specify a sane ordering
if hasattr(obj, 'place_as'):
obj = obj.place_as
fslineno = py.code.getfslineno(obj)
assert isinstance(fslineno[1], int), obj
return fslineno
|
c0710204/mirrorsBistu
|
pypi/bandersnatch/lib/python2.7/site-packages/_pytest/main.py
|
Python
|
mit
| 22,859
|
[
"VisIt"
] |
94df7d5b2cb7df463bf31fb802af1951fa488491cef29c27fb8cf06c140a46b5
|
from unittest import TestCase
from pymatgen.util.sequence import get_chunks, PBarSafe
class SequenceUtilsTest(TestCase):
def setUp(self):
self.sequence = list(range(100))
def test_get_chunks(self):
lengths = [len(chunk) for chunk in get_chunks(self.sequence, 30)]
self.assertTrue(all(length == 30 for length in lengths[:-1]))
self.assertEqual(lengths[-1], 10)
def test_pbar_safe(self):
pbar = PBarSafe(len(self.sequence))
self.assertEqual(pbar.total, len(self.sequence))
self.assertEqual(pbar.done, 0)
pbar.update(10)
self.assertEqual(pbar.done, 10)
|
gVallverdu/pymatgen
|
pymatgen/util/tests/test_sequence.py
|
Python
|
mit
| 639
|
[
"pymatgen"
] |
8944cf93106f6de65cc09b74b6cd683de2ce4d8d48c76e6589ec8bdaf56428d3
|
#!/usr/bin/env python
# Reports a beta diversity matrix for tabular input file
# using scikit-bio
# Daniel Blankenberg
import io
import optparse
import sys
from skbio import TreeNode
from skbio.diversity import beta_diversity
__VERSION__ = "0.0.1"
DELIMITER = '\t'
NEEDS_TREE = ['unweighted_unifrac', 'weighted_unifrac']
NEEDS_OTU_NAMES = ['unweighted_unifrac', 'weighted_unifrac']
def __main__():
parser = optparse.OptionParser(usage="%prog [options]")
parser.add_option('-v', '--version', dest='version', action='store_true', default=False, help='print version and exit')
parser.add_option('-i', '--input', dest='input', action='store', type="string", default=None, help='Input abundance Filename')
parser.add_option('', '--otu_column', dest='otu_column', action='store', type="int", default=None, help='OTU ID Column (1 based)')
parser.add_option('', '--sample_columns', dest='sample_columns', action='store', type="string",
default=None, help='Comma separated list of sample columns, unset to use all.')
parser.add_option('', '--header', dest='header', action='store_true', default=False, help='Abundance file has a header line')
parser.add_option('', '--distance_metric', dest='distance_metric', action='store', type="string", default=None, help='Distance metric to use')
parser.add_option('', '--tree', dest='tree', action='store', type="string", default=None, help='Newick Tree Filename')
parser.add_option('-o', '--output', dest='output', action='store', type="string", default=None, help='Output Filename')
(options, args) = parser.parse_args()
if options.version:
print("scikit-bio betadiversity from tabular file", __VERSION__, file=sys.stderr)
sys.exit()
if options.otu_column is not None:
otu_column = options.otu_column - 1
else:
otu_column = None
if options.sample_columns is None:
with open(options.input, 'rb') as fh:
line = fh.readline()
columns = list(range(len(line.split(DELIMITER))))
if otu_column in columns:
columns.remove(otu_column)
else:
columns = list(map(lambda x: int(x) - 1, options.sample_columns.split(",")))
max_col = max(columns + [otu_column])
counts = [[] for x in columns]
sample_names = []
otu_names = []
with io.open(options.input, 'r', encoding="utf-8") as fh:
if options.header:
header = fh.readline().rstrip('\n\r').split(DELIMITER)
sample_names = [header[i] for i in columns]
else:
sample_names = ["SAMPLE_%i" % x for x in range(len(columns))]
for i, line in enumerate(fh):
fields = line.rstrip('\n\r').split(DELIMITER)
if len(fields) <= max_col:
print("Bad data line: ", fields, file=sys.stderr)
continue
if otu_column is not None:
otu_names.append(fields[otu_column])
else:
otu_names.append("OTU_%i" % i)
for j, col in enumerate(columns):
counts[j].append(int(fields[col]))
extra_kwds = {}
if options.distance_metric in NEEDS_OTU_NAMES:
extra_kwds['otu_ids'] = otu_names
if options.distance_metric in NEEDS_TREE:
assert options.tree, Exception("You must provide a newick tree when using '%s'" % options.distance_metric)
with io.open(options.tree, 'r', encoding='utf-8') as fh:
extra_kwds['tree'] = TreeNode.read(fh)
bd_dm = beta_diversity(options.distance_metric, counts, ids=sample_names, **extra_kwds)
bd_dm.write(options.output)
if __name__ == "__main__":
__main__()
|
loraine-gueguen/tools-iuc
|
tools/scikit-bio/scikit_bio_diversity_beta_diversity.py
|
Python
|
mit
| 3,695
|
[
"scikit-bio"
] |
80334950bfeff7ac91089ad2372fc30935a77b4c6ff9daab6842de77223a379e
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#**************************************************************************
#
# $Id: PGG_Simulator.py $
# $Revision: v18 $
# $Author: epichler $
# $Date: 2014-11-07 $
# Type: Python program.
#
# Comments: Runs a Public Goods Game simulator.
#
# Usage:
# Use the `-h' flag for further instructions on how to use this script.
#
# To do:
# - if simulation is a continuation of a previous run all previous output
# files have to be parsed and rewritten (the last record of the _ts file
# has to be recomputed and all of the _pss file has to be recomputed)
# - possibly introduce error checking for command line arguments, e.g.:
# - all max values >= respective min values
# - t_window >= (t_max - t_min)
#
# Copyright © 2012-2014 Elgar E. Pichler & Avi M. Shapiro.
# All rights reserved.
#
#**************************************************************************
#**************************************************************************
#
# Modifications:
# 2014/11/07 E. Pichler v18
# - merged again with PGG_Simulator_v02.py and added functionality from
# that program variant
# - introduced command line argument program_variant to select program
# variant
# - renamed utility container variable program to program_variant
# 2014/10/16 E. Pichler v17
# - eliminated computation and recording of powerlaw fit parameters
# 2014/08/07 E. Pichler v16
# - added computation and output of number of edges
# 2014/06/23 E. Pichler v15
# - added new command line argument t_graph, which is a list containing
# timesteps at which the PGG graph should be output to a file
# - moved nx.write_gml() from main program to simulate_pgg() in pgg.py
# 2014/03/07 E. Pichler v14
# - in main program changed looping over synergy and greediness so that
# utilities.erange() is used, thereby hopefully avoiding unexpected
# behavior because of float representation precision
# 2013/12/19 E. Pichler v13
# - set _alpha_delta_default to 0.05
# 2013/10/08 E. Pichler v12
# - corrected command line argument parsing
# - minor cosmetic formatting changes
# 2013/09/16 E. Pichler v12
# - introduced or changed definition and usage of n_changers,
# n_movers, n_unstable_nodes and made them arrays
# 2013/06/27 E. Pichler v11
# - changed type of n_cooperators container variable to list to enable
# computation of steady state value for this variable
# 2013/06/03 E. Pichler v10
# - replaced computation of weighted average of a PGG graph's components'
# average shortest path lengths with computation of a PGG graph's
# largest component's shortest path length
# - added corresponding container variables for recording of largest
# component's shortest path length
# 2013/05/16 E. Pichler v09
# - added container variables for recording of
# - number of PGG graph components
# - weighted average of a PGG graph's components' average shortest
# path lengths
# - a PGG graph's average clustering component
# 2013/04/25 E. Pichler v08
# - adapted to move of pgg_v01.py back to pgg.py
# 2013/04/25 E. Pichler v07
# - added handling of power law parameters in utility container
# 2013/04/04 E. Pichler v06
# - changed (corrected) synergy min, max, delta to reasonable values
# 2013/03/11 E. Pichler v05
# - added command line argument debug flag
# - continued adaptation of code to use of utility container
# - merged with Avi Shapiro's changes from 20130211
# - changed to conditional debug output
# - added t_window command line argument and default value
# - added initialization to steady state computation variables in container
# - changed k default to 8 (as in Roca)
# 2013/01/26 E. Pichler v04
# - corrected header comments
# - added command line argument for degree normalization factor
# - redirected pgg.record_run_summary output to output_file_pss
# - adapted code to availability of new utility container object
# 2012/12/07 E. Pichler v03
# - made use of pgg.py functions explicit
# - added additional program options
# - merged with Avi Shapiro's changes
# 2012/11/30 E. Pichler v02
# - as an intermediate solution branched Avi's pgg.py to pgg_v01.py and
# utilized the branched version in this file
# - added additional program options
# 2012/11/16 E. Pichler
# - added first interface with Avi's code from
# PublicGoods.py
# animatePGG.py
# pgg.py
# playPGG.py
# 2012/11/09 E. Pichler
# - defined additional min/max/delta for parameters
# - added additional program options
# - standardized parameter naming conventions
# - added main loops
# - added initial API definitions
# 2012/05/25 E. Pichler
# - added additional program options
# - introduced standardized output for input parameters and time series
# recording
# 2012/04/18 E. Pichler v01
# - added to do list
# 2012/04/08 E. Pichler
# - initial version
#
#**************************************************************************
#--------------------------------------------------------------------------
# ***** preliminaries *****
# --- import modules and packages ---
# - system packages
from __future__ import (
division,
print_function,
)
import argparse
#import math
#import networkx as nx
import os
import random as rd
import time
# - project packages
import pgg
import utilities
# --- program specific defaults ---
# - program name
program_name = os.path.basename(__file__)
# - debug flag
_debug = False
# - greediness
_alpha_delta_default = 0.05
_alpha_max_default = 1.0
_alpha_min_default = 0.
# - graph type
# choices: 1 ... Erdos-Renyi
# 2 ... Watts-Strogatz
# 3 ... Barabasi-Albert [default]
# 4 ... graph without edges
# 5 ... fully connected graph
_graph_type_default = 3
# - graph input file
_input_name_g_default = ""
# - "lattice" degree / degree scaling factor (k=8 in Roca)
_k_default = 8
# - mu
_mu_default = 0.01
# - players
_n_cooperators_init_default = 0
_n_players_default = 5000
# - number of simulations per parameter set
_n_simulations_default = 5
# - standard deviation of Gaussian noise
_noise_sigma_default = .1
# - directory for output files
_output_dir = "../data/"
# - program variant
# choices: 1 ... partial tanh change probability function and Gaussian
# noise term in satisfaction function (Roca_2011-like)
# [default]
# 2 ... smooth tanh change probability function and no noise
# term in satisfaction function
_program_variant_default = 1
# - stem of output script file
_script_name_stem = ""
# - synergy
_synergy_delta_default = 0.5
_synergy_max_default = 10. # should be k+1
_synergy_min_default = 1.
# - time
_t_delta_default = 1
_t_max_default = 10000
_t_min_default = 0
_t_graph_default = [_t_max_default]
# - steady state averaging time window
_t_window_default = 100
# --- function definitions ---
# PGG functions should be defined in pgg.py
# return a list of integers mapped from a string
def intlist(s):
try:
l = map(int, s.split(','))
return l
except:
raise argparse.ArgumentTypeError("argument must be of type list of integers: i1,i2,i3,...")
# ***** main program *****
# define command line arguments and parser
parser = argparse.ArgumentParser(description='Run a Public Goods Game simulator.')
parser.add_argument('-D', dest='debug',
action="store_true", default=_debug,
help='debug flag [default: %(default)s]')
parser.add_argument('--alpha_delta', metavar='alpha_delta', dest='alpha_delta',
type=float, nargs='?', default=_alpha_delta_default,
help='greediness minimum [default: %(default)s]')
parser.add_argument('--alpha_max', metavar='alpha_max', dest='alpha_max',
type=float, nargs='?', default=_alpha_max_default,
help='greediness maximum [default: %(default)s]')
parser.add_argument('--alpha_min', metavar='alpha_min', dest='alpha_min',
type=float, nargs='?', default=_alpha_min_default,
help='greediness minimum [default: %(default)s]')
parser.add_argument('--graph_type', metavar='initial graph_type', dest='graph_type',
type=int, nargs='?', default=_graph_type_default,
help='graph types: 1 ... Erdos-Renyi, 2 ... Watts-Strogatz, 3 ... Barabasi-Albert, 4 ... graph without edges, 5 ... fully connected graph [default: %(default)s]')
parser.add_argument('--input_name_g', metavar='input_name_g', dest='input_name_g',
type=str, nargs='?', default=_input_name_g_default,
help='graph input file [default: %(default)s]')
parser.add_argument('--k', metavar='k', dest='k',
type=float, nargs='?', default=_k_default,
help='degree (scaling factor) / average degree [default: %(default)s]')
parser.add_argument('--mu', metavar='mu', dest='mu',
type=float, nargs='?', default=_mu_default,
help='memory loss parameter [default: %(default)s]')
parser.add_argument('--n_cooperators_init', metavar='n_cooperators_init', dest='n_cooperators_init',
type=int, nargs='?', default=_n_cooperators_init_default,
help='total number of initial cooperators [default: %(default)s]')
parser.add_argument('--n_players', metavar='n_players', dest='n_players',
type=int, nargs='?', default=_n_players_default,
help='total number of players [default: %(default)s]')
parser.add_argument('--n_simulations', metavar='n_simulations', dest='n_simulations',
type=int, nargs='?', default=_n_simulations_default,
help='number of simulations for given parameter set [default: %(default)s]')
parser.add_argument('--noise_sigma', metavar='noise_sigma', dest='noise_sigma',
type=float, nargs='?', default=_noise_sigma_default,
help='standard deviation of Gaussian noise [default: %(default)s]')
parser.add_argument('--output_dir', metavar='output_dir', dest='output_dir',
type=str, nargs='?', default=_output_dir,
help='directory for output files [default: %(default)s]')
parser.add_argument('--program_variant', metavar='program_variant', dest='program_variant',
type=int, nargs='?', default=_program_variant_default,
help='program variant: 1 ... partial tanh change probability function and Gaussian noise term in satisfaction function, 2 ... smooth tanh change probability function and no noise term in satisfaction function [default: %(default)s]')
parser.add_argument('--synergy_delta', metavar='synergy_delta', dest='synergy_delta',
type=float, nargs='?', default=_synergy_delta_default,
help='synergy factor delta [default: %(default)s]')
parser.add_argument('--synergy_max', metavar='synergy_max', dest='synergy_max',
type=float, nargs='?', default=_synergy_max_default,
help='synergy factor maximum [default: %(default)s]')
parser.add_argument('--synergy_min', metavar='synergy_min', dest='synergy_min',
type=float, nargs='?', default=_synergy_min_default,
help='synergy factor minimum [default: %(default)s]')
parser.add_argument('--t_graph', metavar='t_graph', dest='t_graph',
type=intlist, nargs='?', default=_t_graph_default,
help='list of times at which PGG graph is output [default: %(default)s]')
parser.add_argument('--t_max', metavar='t_max', dest='t_max',
type=int, nargs='?', default=_t_max_default,
help='number of generations to simulate game [default: %(default)s]')
parser.add_argument('--t_min', metavar='t_min', dest='t_min',
type=int, nargs='?', default=_t_min_default,
help='initial generation [default: %(default)s]')
parser.add_argument('--t_window', metavar='t_window', dest='t_window',
type=int, nargs='?', default=_t_window_default,
help='steady state averaging time window [default: %(default)s]')
# process command line arguments
args = parser.parse_args()
C = pgg.utility_container()
C.alpha_delta = args.alpha_delta
C.alpha_max = args.alpha_max
C.alpha_min = args.alpha_min
C.debug = args.debug
C.graph_type = args.graph_type
C.input_name_g = args.input_name_g
C.k = args.k
C.mu = args.mu
C.n_cooperators_init = args.n_cooperators_init
C.n_players = args.n_players
C.n_simulations = args.n_simulations
C.noise_sigma = args.noise_sigma
C.output_dir = args.output_dir
C.program_variant = args.program_variant
C.synergy_delta = args.synergy_delta
C.synergy_max = args.synergy_max
C.synergy_min = args.synergy_min
C.t_graph = args.t_graph
C.t_max = args.t_max
C.t_min = args.t_min
C.t_window = args.t_window
# initialize the other utility container variables
C.aspiration_average = [0]*C.t_window
C.aspiration_max = [0]*C.t_window
C.aspiration_min = [0]*C.t_window
C.clustering_coefficient_average = [0]*C.t_window
C.component_path_length_average = [0]*C.t_window
C.degree_average = [0]*C.t_window
C.degree_max = [0]*C.t_window
C.degree_min = [0]*C.t_window
C.largest_component_path_length = [0]*C.t_window
C.n_changers = [0]*C.t_window
C.n_components = [0]*C.t_window
C.n_cooperators = [0]*C.t_window
C.n_edges = [0]*C.t_window
C.n_edges_CC = [0]*C.t_window
C.n_edges_CD = [0]*C.t_window
C.n_edges_DD = [0]*C.t_window
C.n_largest_component = [0]*C.t_window
C.n_movers = [0]*C.t_window
C.n_unstable_nodes = [0]*C.t_window
C.payoff_average = [0]*C.t_window
C.payoff_max = [0]*C.t_window
C.payoff_min = [0]*C.t_window
#C.powerlaw_C = [0]*C.t_window
#C.powerlaw_gamma = [0]*C.t_window
C.satisfaction_average = [0]*C.t_window
C.satisfaction_max = [0]*C.t_window
C.satisfaction_min = [0]*C.t_window
C.t_delta = _t_delta_default
# debug mode check
if C.debug:
print("alpha_delta =", C.alpha_delta)
print("alpha_max =", C.alpha_max)
print("alpha_min =", C.alpha_min)
print("debug =", C.debug)
print("graph_type =", C.graph_type)
print("input_name_g =", C.input_name_g)
print("k =", C.k)
print("mu =", C.mu)
print("n_cooperators_init =", C.n_cooperators_init)
print("n_players =", C.n_players)
print("n_simulations =", C.n_simulations)
print("noise_sigma =", C.noise_sigma)
print("output_dir =", C.output_dir)
print("program_variant =", C.program_variant)
print("synergy_delta =", C.synergy_delta)
print("synergy_max =", C.synergy_max)
print("synergy_min =", C.synergy_min)
print("t_delta =", C.t_delta)
print("t_graph =", C.t_graph)
print("t_max =", C.t_max)
print("t_min =", C.t_min)
print("t_window =", C.t_window)
# execute the main program
if C.debug:
print("----- ", program_name, ": start ... -----", sep="")
#exit()
# initialize random number generator
rd.seed()
# begin of loop over synergy values
for C.synergy in utilities.erange(C.synergy_min, C.synergy_max, C.synergy_delta, True):
# begin of loop over greediness values
for C.alpha in utilities.erange(C.alpha_min, C.alpha_max, C.alpha_delta, True):
# begin of loop over number of simulations
for i in range(C.n_simulations):
# get execution time stamp and output file names
C.time_stamp = time.strftime("%Y%m%d%H%M%S", time.gmtime())
C.index = C.time_stamp + "_%06d" % rd.randint(0, 999999)
C.stem = C.output_dir + C.index
C.output_name_ts = C.stem + "_ts.csv" # output for time series
C.output_name_pss = C.stem + "_pss.csv" # output for parameters and steady state values
# generate graph
G = pgg.create_pgg_graph(C)
# simulate PGG; loop over time
pgg.simulate_pgg(G, C)
# record run summary
pgg.record_run_summary(G, C)
# end of loop over number of simulations
# end of loop over greediness values
# end of loop over synergy values
if C.debug:
print("----- ", program_name, ": ... end -----", sep="")
#--------------------------------------------------------------------------
|
avishapiro/public-goods-game-simulator
|
src/PGG_Simulator.py
|
Python
|
lgpl-3.0
| 16,592
|
[
"Gaussian"
] |
66b8f24cc265ce23536f7c3b923ef3422ef60ef968c37f5e39c7d67bc860e7ee
|
#!/usr/bin/python
"""
Transform FreeSurfer surfaces to the original space of a given image.
Copyright 2011 Arno Klein ([email protected]) after
https://gist.github.com/1459725 by Satrajit Ghosh
Apache License, Version 2.0
"""
import os, sys
import numpy as np
import nibabel as nb
from tvtk.api import tvtk
from mayavi import mlab
import nibabel.gifti as gifti
transform_volume = 0
convert_volumes = 0
get_transforms = 1
transform_surfaces = 1
plot_output = 1
plot_output_files = 1
surfaces = ['pial'] #,'smoothwm'] #['inflated','sphere']
hemis = ['lh','rh']
# Inputs
if len(sys.argv) < 3:
print "Usage: python freesurfer_to_native_space.py <path to FreeSurfer subject> <output directory>"
exit(-1)
else:
subject_path = sys.argv[1]
output_path = sys.argv[2]
original_volume_mgz = subject_path + '/mri/orig/001.mgz'
conformed_volume_mgz = subject_path + '/mri/brain.mgz'
original_volume_nii = output_path + '/original.nii.gz'
conformed_volume_nii = output_path + '/conformed.nii.gz'
transformed_volume_nii = output_path + '/transformed.nii.gz'
# Reslice a conformed volume in native ("transformed") space:
if transform_volume and os.path.exists(conformed_volume_mgz) and os.path.exists(original_volume_mgz):
args = ['mri_convert -rl',original_volume_mgz,'-rt nearest',conformed_volume_mgz,transformed_volume_nii]
print(" ".join(args)); os.system(" ".join(args)); # p = Popen(args); p.close()
# Convert volume files from FreeSurfer's mgz format to nifti format:
if convert_volumes:
if os.path.exists(original_volume_mgz):
cmd = 'mri_convert ' + original_volume_mgz + ' ' + original_volume_nii; os.system(cmd)
if os.path.exists(conformed_volume_mgz):
cmd = 'mri_convert ' + conformed_volume_mgz + ' ' + conformed_volume_nii; os.system(cmd)
# Load the original and conformed volume files' affine transform matrices:
if get_transforms and os.path.exists(conformed_volume_nii) and os.path.exists(original_volume_nii):
affine_conformed = nb.load(conformed_volume_nii).get_affine()
affine_original = nb.load(original_volume_nii).get_affine()
# Create and apply a transform matrix to FreeSurfer's surface meshes:
if transform_surfaces:
# Create the transform matrix from FreeSurfer's conformed surface to conformed volume:
M = np.array([[-1,0,0,128],
[0,0,1,-128],
[0,-1,0,128],
[0,0,0,1]],dtype=float)
print('xfm = np.dot( np.linalg.inv(affine_original), np.dot(affine_conformed, np.linalg.inv(M)))')
xfm = np.dot( np.linalg.inv(affine_original), np.dot(affine_conformed, np.linalg.inv(M)))
# Apply the above transform to FreeSurfer's surface meshes:
for surface in surfaces:
for hemi in hemis:
freesurfer_surface = subject_path + '/surf/' + hemi + '.' + surface
gifti_surface = output_path + '/' + hemi + '.' + surface + '.gii'
vtk_surface = output_path + '/' + hemi + '.' + surface + '.vtp'
if os.path.exists(freesurfer_surface):
# Convert files from FreeSurfer surface format to gifti:
args = ['mris_convert', freesurfer_surface, gifti_surface]
print(" ".join(args)); os.system(" ".join(args)); # p = Popen(args); p.close()
# Load gifti surface:
surf = gifti.read(gifti_surface)
# Transform the vertex data array:
transformed_vertices = np.dot(xfm, np.hstack((surf.darrays[0].data, \
np.ones((surf.darrays[0].data.shape[0],1)))).T)[:3,:].T
surf=nb.freesurfer.read_geometry(freesurfer_surface)
xfm = np.dot(affine_conformed, np.linalg.inv(M))
xfmda0 = np.dot(xfm, np.hstack((surf[0], np.ones((surf[0].shape[0],1)))).T)[:3,:].T
mesh = tvtk.PolyData(points=xfmda0, polys=surf[1])
# Create a mesh:
# mesh = tvtk.PolyData(points=transformed_vertices, polys=surf.darrays[1].data)
if plot_output and not plot_output_files:
mlab.pipeline.surface(mesh)
# Write gifti surface in original space to vtk file:
print('w = tvtk.XMLPolyDataWriter(input=mesh, file_name=vtk_surface)')
w = tvtk.XMLPolyDataWriter(input=mesh, file_name=vtk_surface)
w.write()
# Plot the brain and surfaces transformed to native space:
if plot_output:
cdata = nb.load(transformed_volume_nii).get_data()
#mlab.pipeline.volume(mlab.pipeline.scalar_field(cdata))
if plot_output_files:
for surface in surfaces:
for hemi in hemis:
vtk_surface = output_path + '/' + hemi + '.' + surface + '.vtp'
mesh_reader = tvtk.XMLPolyDataReader(file_name=vtk_surface)
mesh2 = mesh_reader.output
mlab.pipeline.surface(mesh2)
mlab.show()
|
binarybottle/mindboggle_sidelined
|
freesurfer_to_native_space_OLD_gifti.py
|
Python
|
apache-2.0
| 4,961
|
[
"Mayavi",
"VTK"
] |
4b1c201d1aa295e65c9c06cd911e712109e50150374da46f77e006d6e4ecc399
|
# Mantid Repository : https://github.com/mantidproject/mantid
#
# Copyright © 2018 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source
# & Institut Laue - Langevin
# SPDX - License - Identifier: GPL - 3.0 +
import unittest
from mantid.simpleapi import *
from vesuvio.profiles import (create_from_str, GaussianMassProfile,
MultivariateGaussianMassProfile,
GramCharlierMassProfile)
# --------------------------------------------------------------------------------
# Gaussian
# --------------------------------------------------------------------------------
class GaussianMassProfileTest(unittest.TestCase):
# ---------------- Success cases ---------------------------
def test_string_with_fixed_width_produces_valid_object(self):
function_str = "function=Gaussian,width=10"
mass = 16.0
profile = create_from_str(function_str, mass)
self.assertTrue(isinstance(profile, GaussianMassProfile))
self.assertAlmostEqual(mass, profile.mass)
self.assertAlmostEqual(10.0, profile.width)
def test_string_with_constrained_width_produces_valid_object(self):
function_str = "function=Gaussian,width=[2, 5, 7]"
mass = 16.0
profile = create_from_str(function_str, mass)
self.assertTrue(isinstance(profile, GaussianMassProfile))
self.assertAlmostEqual(mass, profile.mass)
self.assertEqual([2, 5, 7], profile.width)
def test_function_string_has_expected_form_with_no_defaults(self):
test_profiles = GaussianMassProfile(10, 16)
expected = "name=GaussianComptonProfile,Mass=16.000000,Width=10.000000;"
self.assertEqual(expected, test_profiles.create_fit_function_str())
def test_function_string_has_expected_form_with_defaults_given(self):
test_profiles = GaussianMassProfile(10, 16)
param_prefix = "f1."
param_vals = {"f1.Width": 11.0, "f1.Intensity": 4.5}
expected = "name=GaussianComptonProfile,Mass=16.000000,Width=11.000000,Intensity=4.500000;"
self.assertEqual(expected, test_profiles.create_fit_function_str(param_vals, param_prefix))
def test_constraint_str_is_only_intensity_for_fixed_width(self):
test_profile = GaussianMassProfile(10, 16)
self.assertEqual("Intensity > 0.0", test_profile.create_constraint_str())
def test_constraint_str_for_constrained_width(self):
test_profile = GaussianMassProfile([2,5,7], 16)
self.assertEqual("2.000000 < Width < 7.000000,Intensity > 0.0", test_profile.create_constraint_str())
# and with prefix
self.assertEqual("2.000000 < f0.Width < 7.000000,f0.Intensity > 0.0", test_profile.create_constraint_str("f0."))
def test_ties_str_for_fixed_width(self):
test_profile = GaussianMassProfile(10, 16)
self.assertEqual("Mass=16.000000,Width=10.000000", test_profile.create_ties_str())
def test_ties_str_for_constrained_width_contains_only_mass(self):
test_profile = GaussianMassProfile([2,5,7], 16)
self.assertEqual("Mass=16.000000", test_profile.create_ties_str())
# and with prefix
self.assertEqual("f0.Mass=16.000000", test_profile.create_ties_str("f0."))
# ---------------- Failure cases ---------------------------
def test_string_not_starting_with_function_equals_name_gives_error(self):
function_str = "function=Gaussia,width=[2, 5, 7]"
mass = 16.0
self.assertRaises(TypeError, GaussianMassProfile.from_str,
function_str, mass)
def test_string_not_starting_with_function_gives_error(self):
function_str = "Gaussian,width=[2, 5, 7]"
mass = 16.0
self.assertRaises(TypeError, GaussianMassProfile.from_str,
function_str, mass)
def test_string_with_wrong_function_gives_error(self):
function_str = "function=GramCharlier,width=[2, 5, 7]"
mass = 16.0
self.assertRaises(TypeError, GaussianMassProfile.from_str,
function_str, mass)
# --------------------------------------------------------------------------------
# Multivariate Gaussian
# --------------------------------------------------------------------------------
class MultivariateGaussianMassProfileTest(unittest.TestCase):
# ---------------- Success cases ---------------------------
def test_function_string_has_expected_form_with_no_defaults(self):
test_profiles = MultivariateGaussianMassProfile(None, 16)
expected = "name=MultivariateGaussianComptonProfile,IntegrationSteps=64,Mass=16.000000,SigmaX=1.000000,SigmaY=1.000000,SigmaZ=1.000000;"
self.assertEqual(expected, test_profiles.create_fit_function_str())
def test_function_string_has_expected_form_with_defaults_given(self):
test_profiles = MultivariateGaussianMassProfile(None, 16)
param_prefix = "f1."
param_vals = {
"f1.SigmaX": 5.0,
"f1.SigmaY": 8.0,
"f1.SigmaZ": 6.0,
"f1.Intensity": 4.5}
expected = "name=MultivariateGaussianComptonProfile,IntegrationSteps=64,Mass=16.000000,SigmaX=5.000000,SigmaY=8.000000,SigmaZ=6.000000,Intensity=4.500000;"
self.assertEqual(expected, test_profiles.create_fit_function_str(param_vals, param_prefix))
def test_function_string_integration_steps(self):
test_profiles = MultivariateGaussianMassProfile(None, 16)
test_profiles.integration_steps = 256
expected = "name=MultivariateGaussianComptonProfile,IntegrationSteps=256,Mass=16.000000,SigmaX=1.000000,SigmaY=1.000000,SigmaZ=1.000000;"
self.assertEqual(expected, test_profiles.create_fit_function_str())
def test_constraint_str(self):
test_profile = MultivariateGaussianMassProfile(None, 16)
self.assertEqual("Intensity > 0.0,SigmaX > 0.0,SigmaY > 0.0,SigmaZ > 0.0", test_profile.create_constraint_str())
def test_ties_str_for_fixed_width(self):
test_profile = MultivariateGaussianMassProfile(None, 16)
self.assertEqual("Mass=16.000000", test_profile.create_ties_str())
# ---------------- Failure cases ---------------------------
def test_string_not_starting_with_function_equals_name_gives_error(self):
function_str = "functipn=MultivariateGaussia,SigmaX=1.0,SigmaY=1.0,SigmaZ=1.0"
mass = 16.0
self.assertRaises(TypeError, MultivariateGaussianMassProfile.from_str,
function_str, mass)
def test_string_not_starting_with_function_gives_error(self):
function_str = "MultivariateGaussian,SigmaX=1.0,SigmaY=1.0,SigmaZ=1.0"
mass = 16.0
self.assertRaises(TypeError, MultivariateGaussianMassProfile.from_str,
function_str, mass)
def test_string_with_wrong_function_gives_error(self):
function_str = "function=Gaussian,width=[2, 5, 7]"
mass = 16.0
self.assertRaises(TypeError, MultivariateGaussianMassProfile.from_str,
function_str, mass)
# --------------------------------------------------------------------------------
# GramCharlier
# --------------------------------------------------------------------------------
class GramCharlierMassProfileTest(unittest.TestCase):
def test_string_with_fixed_width_produces_valid_object(self):
function_str = "function=GramCharlier,width=[2, 5,7],k_free=1,hermite_coeffs=[1,0,1],sears_flag=0,"
mass = 16.0
profile = create_from_str(function_str, mass)
self.assertTrue(isinstance(profile, GramCharlierMassProfile))
self.assertAlmostEqual(mass, profile.mass)
self.assertEqual([2, 5, 7], profile.width)
self.assertEqual([1, 0, 1], profile.hermite_co)
self.assertEqual(0, profile.sears_flag)
self.assertEqual(1, profile.k_free)
def test_function_string_has_expected_form_with_no_defaults(self):
test_profile = GramCharlierMassProfile(10, 16,[1,0,1],1,1)
expected = "name=GramCharlierComptonProfile,Mass=16.000000,HermiteCoeffs=1 0 1,Width=10.000000;"
self.assertEqual(expected, test_profile.create_fit_function_str())
def test_function_string_has_expected_form_with_given_values(self):
test_profile = GramCharlierMassProfile(10, 16,[1,0,1],1,1)
param_prefix = "f1."
param_vals = {"f1.Width": 11.0, "f1.FSECoeff": 0.1, "f1.C_0": 0.25,
"f1.C_2": 0.5, "f1.C_4": 0.75}
expected = "name=GramCharlierComptonProfile,Mass=16.000000,HermiteCoeffs=1 0 1,"\
"Width=11.000000,FSECoeff=0.100000,C_0=0.250000,C_4=0.750000;"
self.assertEqual(expected, test_profile.create_fit_function_str(param_vals, param_prefix))
def test_constraint_str_for_fixed_width(self):
test_profile = GramCharlierMassProfile(10, 16, [1, 0, 1], k_free=1, sears_flag=1)
expected = "C_0 > 0.0,C_4 > 0.0"
self.assertEqual(expected, test_profile.create_constraint_str())
def test_constraint_str_for_constrained_width(self):
test_profile = GramCharlierMassProfile([2,5,7], 16, [1,0,1], k_free=1, sears_flag=1)
expected = "2.000000 < Width < 7.000000,C_0 > 0.0,C_4 > 0.0"
self.assertEqual(expected, test_profile.create_constraint_str())
prefix = "f0."
expected = "2.000000 < f0.Width < 7.000000,f0.C_0 > 0.0,f0.C_4 > 0.0"
self.assertEqual(expected, test_profile.create_constraint_str(prefix))
def test_ties_str_for_constrained_width_and_k_is_free_is_empty(self):
test_profile = GramCharlierMassProfile([2,5,7], 16, [1,0,1], k_free=1, sears_flag=1)
expected = "Mass=16.000000"
self.assertEqual(expected, test_profile.create_ties_str())
def test_ties_str_for_constrained_width(self):
# k is free
test_profile = GramCharlierMassProfile([2,5,7], 16, [1,0,1], k_free=1, sears_flag=1)
expected = "Mass=16.000000"
self.assertEqual(expected, test_profile.create_ties_str())
# k is tied, sears=0
test_profile = GramCharlierMassProfile([2,5,7], 16, [1,0,1], k_free=0, sears_flag=0)
expected = "f0.Mass=16.000000,f0.FSECoeff=0"
self.assertEqual(expected, test_profile.create_ties_str("f0."))
# k is tied, sears=1
test_profile = GramCharlierMassProfile([2,5,7], 16, [1,0,1], k_free=0, sears_flag=1)
expected = "f0.Mass=16.000000,f0.FSECoeff=f0.Width*sqrt(2)/12"
self.assertEqual(expected, test_profile.create_ties_str("f0."))
def test_ties_str_for_fixed_width(self):
test_profile = GramCharlierMassProfile(5, 16, [1,0,1], k_free=1, sears_flag=1)
expected = "Mass=16.000000,Width=5.000000"
self.assertEqual(expected, test_profile.create_ties_str())
# k is tied, sears=0
test_profile = GramCharlierMassProfile(5, 16, [1,0,1], k_free=0, sears_flag=0)
expected = "f0.Mass=16.000000,f0.Width=5.000000,f0.FSECoeff=0"
self.assertEqual(expected, test_profile.create_ties_str("f0."))
# k is tied, sears=1
test_profile = GramCharlierMassProfile(5, 16, [1,0,1], k_free=0, sears_flag=1)
expected = "f0.Mass=16.000000,f0.Width=5.000000,f0.FSECoeff=f0.Width*sqrt(2)/12"
self.assertEqual(expected, test_profile.create_ties_str("f0."))
# ---------------- Failure cases ---------------------------
def test_string_not_starting_with_function_equals_name_gives_error(self):
function_str = "function=GramCharlie,width=[2, 5, 7]"
mass = 16.0
self.assertRaises(TypeError, GramCharlierMassProfile.from_str,
function_str, mass)
def test_string_not_starting_with_function_gives_error(self):
function_str = "GramCharlie,width=[2, 5, 7]"
mass = 16.0
self.assertRaises(TypeError, GramCharlierMassProfile.from_str,
function_str, mass)
def test_string_with_wrong_function_gives_error(self):
function_str = "function=Gaussian,width=[2, 5, 7]"
mass = 16.0
self.assertRaises(TypeError, GramCharlierMassProfile.from_str,
function_str, mass)
if __name__ == '__main__':
unittest.main()
|
mganeva/mantid
|
scripts/test/VesuvioProfileTest.py
|
Python
|
gpl-3.0
| 12,366
|
[
"Gaussian"
] |
ce9262c2279004de43e06c709b0f87c8f9505c5cf23a44368841f91aee5c7d88
|
# Orca
#
# Copyright 2010 Joanmarie Diggs, Mesar Hameed.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc., Franklin Street, Fifth Floor,
# Boston MA 02110-1301 USA.
""" A list of common keybindings and unbound keys
pulled out from script.py: __getLaptopBindings()
with the goal of being more readable and less monolithic.
"""
__id__ = "$Id$"
__version__ = "$Revision$"
__date__ = "$Date$"
__copyright__ = "Copyright (c) 2010 Joanmarie Diggs, Mesar Hameed."
__license__ = "LGPL"
import orca.settings as settings
# Storing values
defaultModifierMask = settings.defaultModifierMask
ORCA_MODIFIER_MASK = settings.ORCA_MODIFIER_MASK
NO_MODIFIER_MASK = settings.NO_MODIFIER_MASK
ORCA_SHIFT_MODIFIER_MASK = settings.ORCA_SHIFT_MODIFIER_MASK
ORCA_CTRL_MODIFIER_MASK = settings.ORCA_CTRL_MODIFIER_MASK
CTRL_MODIFIER_MASK = settings.CTRL_MODIFIER_MASK
ALT_MODIFIER_MASK = settings.ALT_MODIFIER_MASK
SHIFT_MODIFIER_MASK = settings.SHIFT_MODIFIER_MASK
# KeyBindings that use the arrow keys for navigating HTML content.
arrowKeymap = (
("Right", defaultModifierMask, NO_MODIFIER_MASK, "goNextCharacterHandler"),
("Left", defaultModifierMask, NO_MODIFIER_MASK,
"goPreviousCharacterHandler"),
("Right", defaultModifierMask, CTRL_MODIFIER_MASK, "goNextWordHandler"),
("Left", defaultModifierMask, CTRL_MODIFIER_MASK, "goPreviousWordHandler"),
("Up", defaultModifierMask, NO_MODIFIER_MASK, "goPreviousLineHandler"),
("Down", defaultModifierMask, NO_MODIFIER_MASK, "goNextLineHandler"),
("Down", defaultModifierMask, ALT_MODIFIER_MASK, "expandComboBoxHandler"),
("Home", defaultModifierMask, CTRL_MODIFIER_MASK, "goTopOfFileHandler"),
("End", defaultModifierMask, CTRL_MODIFIER_MASK, "goBottomOfFileHandler"),
("Home", defaultModifierMask, NO_MODIFIER_MASK, "goBeginningOfLineHandler"),
("End", defaultModifierMask, NO_MODIFIER_MASK, "goEndOfLineHandler"),
)
commonKeymap = (
# keybindings to provide chat room message history.
("F1", defaultModifierMask, ORCA_MODIFIER_MASK, "reviewLiveAnnouncement"),
("F2", defaultModifierMask, ORCA_MODIFIER_MASK, "reviewLiveAnnouncement"),
("F3", defaultModifierMask, ORCA_MODIFIER_MASK, "reviewLiveAnnouncement"),
("F4", defaultModifierMask, ORCA_MODIFIER_MASK, "reviewLiveAnnouncement"),
("F5", defaultModifierMask, ORCA_MODIFIER_MASK, "reviewLiveAnnouncement"),
("F6", defaultModifierMask, ORCA_MODIFIER_MASK, "reviewLiveAnnouncement"),
("F7", defaultModifierMask, ORCA_MODIFIER_MASK, "reviewLiveAnnouncement"),
("F8", defaultModifierMask, ORCA_MODIFIER_MASK, "reviewLiveAnnouncement"),
("F9", defaultModifierMask, ORCA_MODIFIER_MASK, "reviewLiveAnnouncement"),
# misc
("backslash", defaultModifierMask, SHIFT_MODIFIER_MASK,
"setLivePolitenessOff"),
("backslash", defaultModifierMask, ORCA_SHIFT_MODIFIER_MASK,
"monitorLiveRegions"),
("backslash", defaultModifierMask, NO_MODIFIER_MASK,
"advanceLivePoliteness"),
("F12", defaultModifierMask, ORCA_MODIFIER_MASK,
"toggleCaretNavigationHandler"),
("Right", defaultModifierMask, ORCA_MODIFIER_MASK,
"goNextObjectInOrderHandler"),
("Left", defaultModifierMask, ORCA_MODIFIER_MASK,
"goPreviousObjectInOrderHandler"),
)
desktopKeymap = (
("KP_Multiply", defaultModifierMask, ORCA_MODIFIER_MASK,
"moveToMouseOverHandler"),
)
laptopKeymap = (
("0", defaultModifierMask, ORCA_MODIFIER_MASK, "moveToMouseOverHandler"),
)
|
h4ck3rm1k3/orca-sonar
|
src/orca/scripts/toolkits/Gecko/keymaps.py
|
Python
|
lgpl-2.1
| 4,099
|
[
"ORCA"
] |
6c3a20872dee7de76120a87e97dec6cd1ef9dec5a388ef243d1c5912c4aaaf62
|
from qcodes.instrument.base import Instrument
from qcodes.utils import validators as vals
from qcodes.instrument.parameter import ManualParameter
import numpy as np
class NoiseParametersCZ(Instrument):
'''
Noise and other parameters for cz_superoperator_simulation_new
'''
def __init__(self, name, **kw):
super().__init__(name, **kw)
# Noise parameters
self.add_parameter('T1_q0', unit='s',
label='T1 fluxing qubit',
parameter_class=ManualParameter,
vals=vals.Numbers(), initial_value=0)
self.add_parameter('T1_q1', unit='s',
label='T1 static qubit',
parameter_class=ManualParameter,
vals=vals.Numbers(), initial_value=0)
self.add_parameter('T2_q1', unit='s',
label='T2 static qubit',
parameter_class=ManualParameter,
vals=vals.Numbers(), initial_value=0)
self.add_parameter('T2_q0_amplitude_dependent',
label='fitcoefficients giving T2_q0 or Tphi_q0 as a function of inverse sensitivity (in units of w_q0/Phi_0): a, b. Function is ax+b',
parameter_class=ManualParameter,
vals=vals.Arrays(), initial_value=np.array([-1,-1,]))
# for flux noise simulations
self.add_parameter('sigma_q0', unit='flux quanta',
label='standard deviation of the Gaussian from which we sample the flux bias, q0',
parameter_class=ManualParameter,
vals=vals.Numbers(), initial_value=0)
self.add_parameter('sigma_q1', unit='flux quanta',
label='standard deviation of the Gaussian from which we sample the flux bias, q1',
parameter_class=ManualParameter,
vals=vals.Numbers(), initial_value=0)
# Some system parameters
# w_bus was implemented in the fluxlutman as bus_freq_{which_gate}
# self.add_parameter('w_bus', unit='Hz',
# label='omega of the bus resonator',
# parameter_class=ManualParameter,
# vals=vals.Numbers())
# alpha_q1 was implemented in the fluxlutman as anharm_q1_{which_gate}
# self.add_parameter('alpha_q1', unit='Hz',
# label='anharmonicity of the static qubit',
# parameter_class=ManualParameter,
# vals=vals.Numbers())
self.add_parameter('w_q1_sweetspot',
label='NB: different from the operating point in general',
parameter_class=ManualParameter,
vals=vals.Numbers())
self.add_parameter('w_q0_sweetspot',
label='NB: different from the operating point in general',
parameter_class=ManualParameter,
vals=vals.Numbers())
self.add_parameter('Z_rotations_length', unit='s',
label='duration of the single qubit Z rotations at the end of the pulse',
parameter_class=ManualParameter,
vals=vals.Numbers(), initial_value=0)
self.add_parameter('total_idle_time', unit='s',
label='duration of the idle time',
parameter_class=ManualParameter,
vals=vals.Numbers(), initial_value=0)
# Control parameters for the simulations
self.add_parameter('dressed_compsub',
label='true if we use the definition of the comp subspace that uses the dressed 00,01,10,11 states',
parameter_class=ManualParameter,
vals=vals.Bool())
self.add_parameter('distortions',
parameter_class=ManualParameter,
vals=vals.Bool(), initial_value=False)
self.add_parameter('voltage_scaling_factor', unit='a.u.',
label='scaling factor for the voltage for a CZ pulse',
parameter_class=ManualParameter,
vals=vals.Numbers(), initial_value=1)
self.add_parameter('n_sampling_gaussian_vec',
label='array. each element is a number of samples from the gaussian distribution. Std to guarantee convergence is [11]. More are used only to verify convergence',
parameter_class=ManualParameter,
vals=vals.Arrays(),
initial_value=np.array([11]))
self.add_parameter('cluster',
label='true if we want to use the cluster',
parameter_class=ManualParameter,
vals=vals.Bool(), initial_value=False)
self.add_parameter('look_for_minimum',
label='changes cost function to optimize either research of minimum of avgatefid_pc or to get the heat map in general',
parameter_class=ManualParameter,
vals=vals.Bool(), initial_value=False)
self.add_parameter('T2_scaling', unit='a.u.',
label='scaling factor for T2_q0_amplitude_dependent',
parameter_class=ManualParameter,
vals=vals.Numbers(), initial_value=1)
self.add_parameter('waiting_at_sweetspot', unit='s',
label='time spent at sweetspot during the two halves of a netzero pulse',
parameter_class=ManualParameter,
vals=vals.Numbers(), initial_value=0)
# for ramsey/Rabi simulations
self.add_parameter('detuning', unit='Hz',
label='detuning of w_q0 from its sweet spot value',
parameter_class=ManualParameter,
vals=vals.Numbers(), initial_value=0)
self.add_parameter('initial_state',
label='determines initial state for ramsey_simulations_new',
parameter_class=ManualParameter,
vals=vals.Strings(), initial_value='changeme')
# for spectral tomo
self.add_parameter('repetitions',
label='Repetitions of CZ gate, used for spectral tomo',
parameter_class=ManualParameter,
vals=vals.Numbers(), initial_value=1)
self.add_parameter('time_series',
label='',
parameter_class=ManualParameter,
vals=vals.Bool(), initial_value=False)
self.add_parameter('overrotation_sims',
label='instead of constant shift in flux, we use constant rotations around some axis',
parameter_class=ManualParameter,
vals=vals.Bool(), initial_value=False)
self.add_parameter('axis_overrotation',
label='',
parameter_class=ManualParameter,
vals=vals.Arrays(), initial_value=np.array([1,0,0]))
|
DiCarloLab-Delft/PycQED_py3
|
pycqed/instrument_drivers/virtual_instruments/noise_parameters_CZ_new.py
|
Python
|
mit
| 7,569
|
[
"Gaussian"
] |
41592b1d8ea32a0d9ac990ea24067e0904711e14d5d202ad533b5b91354413c8
|
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2017-2018, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
A shared experiment class for recognizing 2D objects by using path integration
of unions of locations that are specific to objects.
"""
import abc
import math
import random
from collections import defaultdict
import numpy as np
from htmresearch.algorithms.apical_tiebreak_temporal_memory import (
ApicalTiebreakPairMemory)
from htmresearch.algorithms.location_modules import (
Superficial2DLocationModule, ThresholdedGaussian2DLocationModule)
RAT_BUMP_SIGMA = 0.18172
def computeRatModuleParametersFromCellCount(cellsPerAxis,
baselineCellsPerAxis=6):
"""
Compute 'cellsPerAxis', 'bumpSigma', and 'activeFiringRate' parameters for
:class:`ThresholdedGaussian2DLocationModule` given the number of cells per
axis. See :func:`createRatModuleFromCellCount`
"""
bumpSigma = RAT_BUMP_SIGMA * (baselineCellsPerAxis / float(cellsPerAxis))
activeFiringRate = ThresholdedGaussian2DLocationModule.chooseReliableActiveFiringRate(
cellsPerAxis, bumpSigma)
return {
"cellsPerAxis": cellsPerAxis,
"bumpSigma": bumpSigma,
"activeFiringRate": activeFiringRate
}
def computeRatModuleParametersFromReadoutResolution(inverseReadoutResolution,
enlargeModuleFactor=1.):
"""
Compute 'cellsPerAxis', 'bumpSigma', and 'activeFiringRate' parameters for
:class:`ThresholdedGaussian2DLocationModule` given the
inverseReadoutResolution. See :func:`createRatModuleFromReadoutResolution`
"""
# Give the module enough precision in its learning so that the bump is the
# specified diameter when properly accounting for uncertainty.
cellsPerAxis = int(math.ceil(2*inverseReadoutResolution*enlargeModuleFactor))
bumpSigma = RAT_BUMP_SIGMA / enlargeModuleFactor
readoutResolution = 1. / (enlargeModuleFactor*inverseReadoutResolution)
activeFiringRate = ThresholdedGaussian2DLocationModule.chooseReliableActiveFiringRate(
cellsPerAxis, bumpSigma, readoutResolution)
return {
"cellsPerAxis": cellsPerAxis,
"bumpSigma": bumpSigma,
"activeFiringRate": activeFiringRate
}
def createRatModuleFromCellCount(cellsPerAxis, baselineCellsPerAxis=6,
**kwargs):
"""
@param baselineCellsPerAxis (int or float)
When cellsPerAxis == baselineCellsPerAxis, the bump of firing rates will
resemble a bump in rat entorhinal cortex. We'll then apply a threshold to this
firing rate, converting the bump into 4 - 7 active cells (It could be 2x2
cells, or it could be a hexagon of cells, depending on where the bump is
relative to the cells). As cellsPerAxis grows, the bump of firing rates and
bump of active cells will stay fixed relative to the cells, so they will
shrink relative to the module as a whole. Given this approach, the
baselineCellsPerAxis implies the readout resolution of a grid cell module.
Because the bump of thresholded active cells will always be the same size, if
baselineCellsPerAxis=6, that implies that the readout resolution is
approximately 1/3. If baselineCellsPerAxis=8, the readout resolution is
approximately 1/4.
"""
params = computeRatModuleParametersFromCellCount(cellsPerAxis,
baselineCellsPerAxis)
params.update(kwargs)
return ThresholdedGaussian2DLocationModule(**params)
def createRatModuleFromReadoutResolution(inverseReadoutResolution, scale,
enlargeModuleFactor=1.,
fixedScale=False, **kwargs):
"""
@param inverseReadoutResolution (int or float)
Equivalent to 1/readoutResolution, but specified this way as a convenience
(because it's easier and less ambiguous to type 3 than to type 0.3333333). The
readout resolution specifies the diameter of the circle of phases in the
rhombus encoded by a bump. So when a bump of activity is converted into a set
of active cells, this circle of active cells will have a diameter of at least
this amount.
@param enlargeModuleFactor (float)
A multiplicative factor that's used to simulate the effect of having a larger
module, keeping the bump size fixed but making the module larger, so that the
bump is smaller relative to the size of the module. Equivalently, this shrinks
the bump, increases the precision of the readout, adds more cells, and
increases the scale so that the bump is the same size when overlayed on the
real world.
@param fixedScale (bool)
By default, the enlargeModuleFactor will increase the scale, effectively
holding the bump size constant relative to physical space. Set this to True to
hold the scale constant, so enlarging the module causes the bump size to
shrink relative to physical space.
"""
params = computeRatModuleParametersFromReadoutResolution(inverseReadoutResolution,
enlargeModuleFactor)
params.update(kwargs)
params["scale"] = (scale if fixedScale else scale * enlargeModuleFactor)
return ThresholdedGaussian2DLocationModule(**params)
class PIUNCorticalColumn(object):
"""
A L4 + L6a network. Sensory input causes minicolumns in L4 to activate,
which drives activity in L6a. Motor input causes L6a to perform path
integration, updating its activity, which then depolarizes cells in L4.
Whenever the sensor moves, call movementCompute. Whenever a sensory input
arrives, call sensoryCompute.
"""
def __init__(self, locationConfigs, L4Overrides=None, bumpType="gaussian"):
"""
@param L4Overrides (dict)
Custom parameters for L4
@param locationConfigs (sequence of dicts)
Parameters for the location modules
"""
self.bumpType = bumpType
L4cellCount = 150*16
if bumpType == "gaussian":
self.L6aModules = [
createRatModuleFromCellCount(
anchorInputSize=L4cellCount,
**config)
for config in locationConfigs]
elif bumpType == "gaussian2":
self.L6aModules = [
createRatModuleFromReadoutResolution(
anchorInputSize=L4cellCount,
**config)
for config in locationConfigs]
elif bumpType == "square":
self.L6aModules = [
Superficial2DLocationModule(
anchorInputSize=L4cellCount,
**config)
for config in locationConfigs]
else:
raise ValueError("Invalid bumpType", bumpType)
L4Params = {
"columnCount": 150,
"cellsPerColumn": 16,
"basalInputSize": sum(module.numberOfCells()
for module in self.L6aModules)
}
if L4Overrides is not None:
L4Params.update(L4Overrides)
self.L4 = ApicalTiebreakPairMemory(**L4Params)
def movementCompute(self, displacement, noiseFactor = 0, moduleNoiseFactor = 0):
"""
@param displacement (dict)
The change in location. Example: {"top": 10, "left", 10}
@return (dict)
Data for logging/tracing.
"""
if noiseFactor != 0:
xdisp = np.random.normal(0, noiseFactor)
ydisp = np.random.normal(0, noiseFactor)
else:
xdisp = 0
ydisp = 0
locationParams = {
"displacement": [displacement["top"] + ydisp,
displacement["left"] + xdisp],
"noiseFactor": moduleNoiseFactor
}
for module in self.L6aModules:
module.movementCompute(**locationParams)
return locationParams
def sensoryCompute(self, activeMinicolumns, learn):
"""
@param activeMinicolumns (numpy array)
List of indices of minicolumns to activate.
@param learn (bool)
If True, the two layers should learn this association.
@return (tuple of dicts)
Data for logging/tracing.
"""
inputParams = {
"activeColumns": activeMinicolumns,
"basalInput": self.getLocationRepresentation(),
"basalGrowthCandidates": self.getLearnableLocationRepresentation(),
"learn": learn
}
self.L4.compute(**inputParams)
locationParams = {
"anchorInput": self.L4.getActiveCells(),
"anchorGrowthCandidates": self.L4.getWinnerCells(),
"learn": learn,
}
for module in self.L6aModules:
module.sensoryCompute(**locationParams)
return (inputParams, locationParams)
def reset(self):
"""
Clear all cell activity.
"""
self.L4.reset()
for module in self.L6aModules:
module.reset()
def activateRandomLocation(self):
"""
Activate a random location in the location layer.
"""
for module in self.L6aModules:
module.activateRandomLocation()
def getSensoryRepresentation(self):
"""
Gets the active cells in the sensory layer.
"""
return self.L4.getActiveCells()
def getLocationRepresentation(self):
"""
Get the full population representation of the location layer.
"""
activeCells = np.array([], dtype="uint32")
totalPrevCells = 0
for module in self.L6aModules:
activeCells = np.append(activeCells,
module.getActiveCells() + totalPrevCells)
totalPrevCells += module.numberOfCells()
return activeCells
def getLearnableLocationRepresentation(self):
"""
Get the cells in the location layer that should be associated with the
sensory input layer representation. In some models, this is identical to the
active cells. In others, it's a subset.
"""
learnableCells = np.array([], dtype="uint32")
totalPrevCells = 0
for module in self.L6aModules:
learnableCells = np.append(learnableCells,
module.getLearnableCells() + totalPrevCells)
totalPrevCells += module.numberOfCells()
return learnableCells
def getSensoryAssociatedLocationRepresentation(self):
"""
Get the location cells in the location layer that were driven by the input
layer (or, during learning, were associated with this input.)
"""
cells = np.array([], dtype="uint32")
totalPrevCells = 0
for module in self.L6aModules:
cells = np.append(cells,
module.sensoryAssociatedCells + totalPrevCells)
totalPrevCells += module.numberOfCells()
return cells
class PIUNExperiment(object):
"""
An experiment class which passes sensory and motor inputs into a special two
layer network, tracks the location of a sensor on an object, and provides
hooks for tracing.
The network learns 2D "objects" which consist of arrangements of
"features". Whenever this experiment moves the sensor to a feature, it always
places it at the center of the feature.
The network's location layer represents "the location of the sensor in the
space of the object". Because it's a touch sensor, and because it always
senses the center of each feature, this is synonymous with "the location of
the feature in the space of the object" (but in other situations these
wouldn't be equivalent).
"""
def __init__(self, column,
featureNames,
numActiveMinicolumns=15,
noiseFactor = 0,
moduleNoiseFactor = 0):
"""
@param column (PIUNColumn)
A two-layer network.
@param featureNames (list)
A list of the features that will ever occur in an object.
"""
self.column = column
self.numActiveMinicolumns = numActiveMinicolumns
# Use these for classifying SDRs and for testing whether they're correct.
# Allow storing multiple representations, in case the experiment learns
# multiple points on a single feature. (We could switch to indexing these by
# objectName, featureIndex, coordinates.)
# Example:
# (objectName, featureIndex): [(0, 26, 54, 77, 101, ...), ...]
self.locationRepresentations = defaultdict(list)
self.inputRepresentations = {
# Example:
# (objectName, featureIndex, featureName): [0, 26, 54, 77, 101, ...]
}
# Generate a set of feature SDRs.
self.features = dict(
(k, np.array(sorted(random.sample(xrange(self.column.L4.numberOfColumns()),
self.numActiveMinicolumns)), dtype="uint32"))
for k in featureNames)
# For example:
# [{"name": "Object 1",
# "features": [
# {"top": 40, "left": 40, "width": 10, "height" 10, "name": "A"},
# {"top": 80, "left": 80, "width": 10, "height" 10, "name": "B"}]]
self.learnedObjects = []
# The location of the sensor. For example: {"top": 20, "left": 20}
self.locationOnObject = None
self.maxSettlingTime = 10
self.maxTraversals = 4
self.monitors = {}
self.nextMonitorToken = 1
self.noiseFactor = noiseFactor
self.moduleNoiseFactor = moduleNoiseFactor
self.representationSet = set()
def reset(self):
self.column.reset()
self.locationOnObject = None
for monitor in self.monitors.values():
monitor.afterReset()
def learnObject(self,
objectDescription,
randomLocation=False,
useNoise=False,
noisyTrainingTime=1):
"""
Train the network to recognize the specified object. Move the sensor to one of
its features and activate a random location representation in the location
layer. Move the sensor over the object, updating the location representation
through path integration. At each point on the object, form reciprocal
connections between the represention of the location and the representation
of the sensory input.
@param objectDescription (dict)
For example:
{"name": "Object 1",
"features": [{"top": 0, "left": 0, "width": 10, "height": 10, "name": "A"},
{"top": 0, "left": 10, "width": 10, "height": 10, "name": "B"}]}
@return locationsAreUnique (bool)
True if this object was assigned a unique set of locations. False if a
location on this object has the same location representation as another
location somewhere else.
"""
self.reset()
self.column.activateRandomLocation()
locationsAreUnique = True
if randomLocation or useNoise:
numIters = noisyTrainingTime
else:
numIters = 1
for i in xrange(numIters):
for iFeature, feature in enumerate(objectDescription["features"]):
self._move(feature, randomLocation=randomLocation, useNoise=useNoise)
featureSDR = self.features[feature["name"]]
self._sense(featureSDR, learn=True, waitForSettle=False)
locationRepresentation = self.column.getSensoryAssociatedLocationRepresentation()
self.locationRepresentations[(objectDescription["name"],
iFeature)].append(locationRepresentation)
self.inputRepresentations[(objectDescription["name"],
iFeature, feature["name"])] = (
self.column.L4.getWinnerCells())
locationTuple = tuple(locationRepresentation)
locationsAreUnique = (locationsAreUnique and
locationTuple not in self.representationSet)
self.representationSet.add(tuple(locationRepresentation))
self.learnedObjects.append(objectDescription)
return locationsAreUnique
def inferObjectWithRandomMovements(self,
objectDescription,
numSensations=None,
randomLocation=False,
checkFalseConvergence=True):
"""
Attempt to recognize the specified object with the network. Randomly move
the sensor over the object until the object is recognized.
@param objectDescription (dict)
For example:
{"name": "Object 1",
"features": [{"top": 0, "left": 0, "width": 10, "height": 10, "name": "A"},
{"top": 0, "left": 10, "width": 10, "height": 10, "name": "B"}]}
@param numSensations (int or None)
Set this to run the network for a fixed number of sensations. Otherwise this
method will run until the object is recognized or until maxTraversals is
reached.
@return (bool)
True if inference succeeded
"""
self.reset()
for monitor in self.monitors.values():
monitor.beforeInferObject(objectDescription)
currentStep = 0
finished = False
inferred = False
inferredStep = None
prevTouchSequence = None
for _ in xrange(self.maxTraversals):
# Choose touch sequence.
while True:
touchSequence = range(len(objectDescription["features"]))
random.shuffle(touchSequence)
# Make sure the first touch will cause a movement.
if (prevTouchSequence is not None and
touchSequence[0] == prevTouchSequence[-1]):
continue
break
for iFeature in touchSequence:
currentStep += 1
feature = objectDescription["features"][iFeature]
self._move(feature, randomLocation=randomLocation)
featureSDR = self.features[feature["name"]]
self._sense(featureSDR, learn=False, waitForSettle=False)
if not inferred:
# Use the sensory-activated cells to detect whether the object has been
# recognized. In some models, this set of cells is equivalent to the
# active cells. In others, a set of cells around the sensory-activated
# cells become active. In either case, if these sensory-activated cells
# are correct, it implies that the input layer's representation is
# classifiable -- the location layer just correctly classified it.
representation = self.column.getSensoryAssociatedLocationRepresentation()
target_representations = set(np.concatenate(
self.locationRepresentations[
(objectDescription["name"], iFeature)]))
inferred = (set(representation) <= target_representations)
if inferred:
inferredStep = currentStep
if not inferred and tuple(representation) in self.representationSet:
# We have converged to an incorrect representation - declare failure.
print("Converged to an incorrect representation!")
return None
finished = ((inferred and numSensations is None) or
(numSensations is not None and currentStep == numSensations))
if finished:
break
prevTouchSequence = touchSequence
if finished:
break
for monitor in self.monitors.values():
monitor.afterInferObject(objectDescription, inferredStep)
return inferredStep
def _move(self, feature, randomLocation = False, useNoise = True):
"""
Move the sensor to the center of the specified feature. If the sensor is
currently at another location, send the displacement into the cortical
column so that it can perform path integration.
"""
if randomLocation:
locationOnObject = {
"top": feature["top"] + np.random.rand()*feature["height"],
"left": feature["left"] + np.random.rand()*feature["width"],
}
else:
locationOnObject = {
"top": feature["top"] + feature["height"]/2.,
"left": feature["left"] + feature["width"]/2.
}
if self.locationOnObject is not None:
displacement = {"top": locationOnObject["top"] -
self.locationOnObject["top"],
"left": locationOnObject["left"] -
self.locationOnObject["left"]}
if useNoise:
params = self.column.movementCompute(displacement,
self.noiseFactor,
self.moduleNoiseFactor)
else:
params = self.column.movementCompute(displacement, 0, 0)
for monitor in self.monitors.values():
monitor.afterLocationShift(**params)
else:
for monitor in self.monitors.values():
monitor.afterLocationInitialize()
self.locationOnObject = locationOnObject
for monitor in self.monitors.values():
monitor.afterLocationChanged(locationOnObject)
def _sense(self, featureSDR, learn, waitForSettle):
"""
Send the sensory input into the network. Optionally, send it multiple times
until the network settles.
"""
for monitor in self.monitors.values():
monitor.beforeSense(featureSDR)
iteration = 0
prevCellActivity = None
while True:
(inputParams,
locationParams) = self.column.sensoryCompute(featureSDR, learn)
if waitForSettle:
cellActivity = (set(self.column.getSensoryRepresentation()),
set(self.column.getLocationRepresentation()))
if cellActivity == prevCellActivity:
# It settled. Don't even log this timestep.
break
prevCellActivity = cellActivity
for monitor in self.monitors.values():
if iteration > 0:
monitor.beforeSensoryRepetition()
monitor.afterInputCompute(**inputParams)
monitor.afterLocationAnchor(**locationParams)
iteration += 1
if not waitForSettle or iteration >= self.maxSettlingTime:
break
def addMonitor(self, monitor):
"""
Subscribe to PIUNExperimentMonitor events.
@param monitor (PIUNExperimentMonitor)
An object that implements a set of monitor methods
@return (object)
An opaque object that can be used to refer to this monitor.
"""
token = self.nextMonitorToken
self.nextMonitorToken += 1
self.monitors[token] = monitor
return token
def removeMonitor(self, monitorToken):
"""
Unsubscribe from PIUNExperimentMonitor events.
@param monitorToken (object)
The return value of addMonitor() from when this monitor was added
"""
del self.monitors[monitorToken]
class PIUNExperimentMonitor(object):
"""
Abstract base class for a PIUNExperiment monitor.
"""
__metaclass__ = abc.ABCMeta
def beforeSense(self, featureSDR): pass
def beforeSensoryRepetition(self): pass
def beforeInferObject(self, obj): pass
def afterInferObject(self, obj, inferredStep): pass
def afterReset(self): pass
def afterLocationChanged(self, locationOnObject): pass
def afterLocationInitialize(self): pass
def afterLocationShift(self, **kwargs): pass
def afterLocationAnchor(self, **kwargs): pass
def afterInputCompute(self, **kwargs): pass
|
neuroidss/nupic.research
|
htmresearch/frameworks/location/path_integration_union_narrowing.py
|
Python
|
agpl-3.0
| 23,428
|
[
"Gaussian"
] |
d9e52327d58e3818346a4e8521b2048f8ab1cfa6ef3b837b46db439d3e0b658e
|
##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, [email protected], All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/llnl/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class RGoDb(RPackage):
"""A set of annotation maps describing the entire Gene
Ontology assembled using data from GO."""
homepage = "https://www.bioconductor.org/packages/GO.db/"
url = "https://www.bioconductor.org/packages/release/data/annotation/src/contrib/GO.db_3.4.1.tar.gz"
list_url = homepage
version('3.4.1', 'e16ee8921d8adc1ed3cbac2a3e35e386')
depends_on('r-annotationdbi', type=('build', 'run'))
|
wscullin/spack
|
var/spack/repos/builtin/packages/r-go-db/package.py
|
Python
|
lgpl-2.1
| 1,691
|
[
"Bioconductor"
] |
3890d4b51208be9b6fa71d5cbbe8dcd18d8368291441426b33467d030d22f45c
|
# 24.05.2007, c
# last revision: 25.02.2008
from sfepy import data_dir
from sfepy.fem.periodic import *
filename_mesh = data_dir + '/meshes/2d/special/channels_symm944t.mesh'
if filename_mesh.find( 'symm' ):
region_1 = {
'name' : 'Y1',
'select' : """elements of group 3""",
}
region_2 = {
'name' : 'Y2',
'select' : """elements of group 4 +e elements of group 6
+e elements of group 8""",
}
region_4 = {
'name' : 'Y1Y2',
'select' : """r.Y1 +e r.Y2""",
}
region_5 = {
'name' : 'Walls',
'select' : """r.EBCGamma1 +n r.EBCGamma2""",
}
region_310 = {
'name' : 'EBCGamma1',
'select' : """(elements of group 1 *n elements of group 3)
+n
(elements of group 2 *n elements of group 3)
""",
}
region_320 = {
'name' : 'EBCGamma2',
'select' : """(elements of group 5 *n elements of group 4)
+n
(elements of group 1 *n elements of group 4)
+n
(elements of group 7 *n elements of group 6)
+n
(elements of group 2 *n elements of group 6)
+n
(elements of group 9 *n elements of group 8)
+n
(elements of group 2 *n elements of group 8)
""",
}
w2 = 0.499
# Sides.
region_20 = {
'name' : 'Left',
'select' : 'nodes in (x < %.3f)' % -w2,
}
region_21 = {
'name' : 'Right',
'select' : 'nodes in (x > %.3f)' % w2,
}
region_22 = {
'name' : 'Bottom',
'select' : 'nodes in (y < %.3f)' % -w2,
}
region_23 = {
'name' : 'Top',
'select' : 'nodes in (y > %.3f)' % w2,
}
field_1 = {
'name' : '2_velocity',
'dtype' : 'real',
'shape' : (2,),
'region' : 'Y1Y2',
'approx_order' : 2,
}
field_2 = {
'name' : 'pressure',
'dtype' : 'real',
'shape' : (1,),
'region' : 'Y1Y2',
'approx_order' : 1,
}
variable_1 = {
'name' : 'u',
'kind' : 'unknown field',
'field' : '2_velocity',
'order' : 0,
}
variable_2 = {
'name' : 'v',
'kind' : 'test field',
'field' : '2_velocity',
'dual' : 'u',
}
variable_3 = {
'name' : 'p',
'kind' : 'unknown field',
'field' : 'pressure',
'order' : 1,
}
variable_4 = {
'name' : 'q',
'kind' : 'test field',
'field' : 'pressure',
'dual' : 'p',
}
integral_1 = {
'name' : 'i1',
'kind' : 'v',
'quadrature' : 'gauss_o2_d2',
}
equations = {
'balance' :
"""dw_div_grad.i1.Y1Y2( fluid.viscosity, v, u )
- dw_stokes.i1.Y1Y2( v, p ) = 0""",
'incompressibility' :
"""dw_stokes.i1.Y1Y2( u, q ) = 0""",
}
material_1 = {
'name' : 'fluid',
'values' : {
'viscosity' : 1.0,
'density' : 1e0,
},
}
ebc_1 = {
'name' : 'walls',
'region' : 'Walls',
'dofs' : {'u.all' : 0.0},
}
ebc_2 = {
'name' : 'top_velocity',
'region' : 'Top',
'dofs' : {'u.1' : -1.0, 'u.0' : 0.0},
}
ebc_10 = {
'name' : 'bottom_pressure',
'region' : 'Bottom',
'dofs' : {'p.0' : 0.0},
}
epbc_1 = {
'name' : 'u_rl',
'region' : ['Left', 'Right'],
'dofs' : {'u.all' : 'u.all', 'p.0' : 'p.0'},
'match' : 'match_y_line',
}
functions = {
'match_y_line' : (match_y_line,),
}
##
# FE assembling parameters.
fe = {
'chunk_size' : 100,
'cache_override' : True,
}
solver_0 = {
'name' : 'ls',
'kind' : 'ls.scipy_direct',
}
solver_1 = {
'name' : 'newton',
'kind' : 'nls.newton',
'i_max' : 2,
'eps_a' : 1e-8,
'eps_r' : 1e-2,
'macheps' : 1e-16,
'lin_red' : 1e-2, # Linear system error < (eps_a * lin_red).
'ls_red' : 0.1,
'ls_red_warp' : 0.001,
'ls_on' : 1.1,
'ls_min' : 1e-5,
'check' : 0,
'delta' : 1e-6,
'is_plot' : False,
'problem' : 'nonlinear', # 'nonlinear' or 'linear' (ignore i_max)
}
save_format = 'hdf5' # 'hdf5' or 'vtk'
|
olivierverdier/sfepy
|
examples/navier_stokes/stokes.py
|
Python
|
bsd-3-clause
| 4,109
|
[
"VTK"
] |
e89da51952d7d404ac169c7423be38d5fabb024223163b1cb58a1405ff548386
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function
import os
import py.path
import pytest
import sys
import _pytest.pytester as pytester
from _pytest.pytester import HookRecorder
from _pytest.pytester import CwdSnapshot, SysModulesSnapshot, SysPathsSnapshot
from _pytest.config import PytestPluginManager
from _pytest.main import EXIT_OK, EXIT_TESTSFAILED
def test_make_hook_recorder(testdir):
item = testdir.getitem("def test_func(): pass")
recorder = testdir.make_hook_recorder(item.config.pluginmanager)
assert not recorder.getfailures()
pytest.xfail("internal reportrecorder tests need refactoring")
class rep(object):
excinfo = None
passed = False
failed = True
skipped = False
when = "call"
recorder.hook.pytest_runtest_logreport(report=rep)
failures = recorder.getfailures()
assert failures == [rep]
failures = recorder.getfailures()
assert failures == [rep]
class rep(object):
excinfo = None
passed = False
failed = False
skipped = True
when = "call"
rep.passed = False
rep.skipped = True
recorder.hook.pytest_runtest_logreport(report=rep)
modcol = testdir.getmodulecol("")
rep = modcol.config.hook.pytest_make_collect_report(collector=modcol)
rep.passed = False
rep.failed = True
rep.skipped = False
recorder.hook.pytest_collectreport(report=rep)
passed, skipped, failed = recorder.listoutcomes()
assert not passed and skipped and failed
numpassed, numskipped, numfailed = recorder.countoutcomes()
assert numpassed == 0
assert numskipped == 1
assert numfailed == 1
assert len(recorder.getfailedcollections()) == 1
recorder.unregister()
recorder.clear()
recorder.hook.pytest_runtest_logreport(report=rep)
pytest.raises(ValueError, "recorder.getfailures()")
def test_parseconfig(testdir):
config1 = testdir.parseconfig()
config2 = testdir.parseconfig()
assert config2 != config1
assert config1 != pytest.config
def test_testdir_runs_with_plugin(testdir):
testdir.makepyfile("""
pytest_plugins = "pytester"
def test_hello(testdir):
assert 1
""")
result = testdir.runpytest()
result.assert_outcomes(passed=1)
def make_holder():
class apiclass(object):
def pytest_xyz(self, arg):
"x"
def pytest_xyz_noarg(self):
"x"
apimod = type(os)('api')
def pytest_xyz(arg):
"x"
def pytest_xyz_noarg():
"x"
apimod.pytest_xyz = pytest_xyz
apimod.pytest_xyz_noarg = pytest_xyz_noarg
return apiclass, apimod
@pytest.mark.parametrize("holder", make_holder())
def test_hookrecorder_basic(holder):
pm = PytestPluginManager()
pm.addhooks(holder)
rec = HookRecorder(pm)
pm.hook.pytest_xyz(arg=123)
call = rec.popcall("pytest_xyz")
assert call.arg == 123
assert call._name == "pytest_xyz"
pytest.raises(pytest.fail.Exception, "rec.popcall('abc')")
pm.hook.pytest_xyz_noarg()
call = rec.popcall("pytest_xyz_noarg")
assert call._name == "pytest_xyz_noarg"
def test_makepyfile_unicode(testdir):
global unichr
try:
unichr(65)
except NameError:
unichr = chr
testdir.makepyfile(unichr(0xfffd))
def test_makepyfile_utf8(testdir):
"""Ensure makepyfile accepts utf-8 bytes as input (#2738)"""
utf8_contents = u"""
def setup_function(function):
mixed_encoding = u'São Paulo'
""".encode('utf-8')
p = testdir.makepyfile(utf8_contents)
assert u"mixed_encoding = u'São Paulo'".encode('utf-8') in p.read('rb')
class TestInlineRunModulesCleanup(object):
def test_inline_run_test_module_not_cleaned_up(self, testdir):
test_mod = testdir.makepyfile("def test_foo(): assert True")
result = testdir.inline_run(str(test_mod))
assert result.ret == EXIT_OK
# rewrite module, now test should fail if module was re-imported
test_mod.write("def test_foo(): assert False")
result2 = testdir.inline_run(str(test_mod))
assert result2.ret == EXIT_TESTSFAILED
def spy_factory(self):
class SysModulesSnapshotSpy(object):
instances = []
def __init__(self, preserve=None):
SysModulesSnapshotSpy.instances.append(self)
self._spy_restore_count = 0
self._spy_preserve = preserve
self.__snapshot = SysModulesSnapshot(preserve=preserve)
def restore(self):
self._spy_restore_count += 1
return self.__snapshot.restore()
return SysModulesSnapshotSpy
def test_inline_run_taking_and_restoring_a_sys_modules_snapshot(
self, testdir, monkeypatch):
spy_factory = self.spy_factory()
monkeypatch.setattr(pytester, "SysModulesSnapshot", spy_factory)
original = dict(sys.modules)
testdir.syspathinsert()
testdir.makepyfile(import1="# you son of a silly person")
testdir.makepyfile(import2="# my hovercraft is full of eels")
test_mod = testdir.makepyfile("""
import import1
def test_foo(): import import2""")
testdir.inline_run(str(test_mod))
assert len(spy_factory.instances) == 1
spy = spy_factory.instances[0]
assert spy._spy_restore_count == 1
assert sys.modules == original
assert all(sys.modules[x] is original[x] for x in sys.modules)
def test_inline_run_sys_modules_snapshot_restore_preserving_modules(
self, testdir, monkeypatch):
spy_factory = self.spy_factory()
monkeypatch.setattr(pytester, "SysModulesSnapshot", spy_factory)
test_mod = testdir.makepyfile("def test_foo(): pass")
testdir.inline_run(str(test_mod))
spy = spy_factory.instances[0]
assert not spy._spy_preserve("black_knight")
assert spy._spy_preserve("zope")
assert spy._spy_preserve("zope.interface")
assert spy._spy_preserve("zopelicious")
def test_external_test_module_imports_not_cleaned_up(self, testdir):
testdir.syspathinsert()
testdir.makepyfile(imported="data = 'you son of a silly person'")
import imported
test_mod = testdir.makepyfile("""
def test_foo():
import imported
imported.data = 42""")
testdir.inline_run(str(test_mod))
assert imported.data == 42
def test_inline_run_clean_sys_paths(testdir):
def test_sys_path_change_cleanup(self, testdir):
test_path1 = testdir.tmpdir.join("boink1").strpath
test_path2 = testdir.tmpdir.join("boink2").strpath
test_path3 = testdir.tmpdir.join("boink3").strpath
sys.path.append(test_path1)
sys.meta_path.append(test_path1)
original_path = list(sys.path)
original_meta_path = list(sys.meta_path)
test_mod = testdir.makepyfile("""
import sys
sys.path.append({:test_path2})
sys.meta_path.append({:test_path2})
def test_foo():
sys.path.append({:test_path3})
sys.meta_path.append({:test_path3})""".format(locals()))
testdir.inline_run(str(test_mod))
assert sys.path == original_path
assert sys.meta_path == original_meta_path
def spy_factory(self):
class SysPathsSnapshotSpy(object):
instances = []
def __init__(self):
SysPathsSnapshotSpy.instances.append(self)
self._spy_restore_count = 0
self.__snapshot = SysPathsSnapshot()
def restore(self):
self._spy_restore_count += 1
return self.__snapshot.restore()
return SysPathsSnapshotSpy
def test_inline_run_taking_and_restoring_a_sys_paths_snapshot(
self, testdir, monkeypatch):
spy_factory = self.spy_factory()
monkeypatch.setattr(pytester, "SysPathsSnapshot", spy_factory)
test_mod = testdir.makepyfile("def test_foo(): pass")
testdir.inline_run(str(test_mod))
assert len(spy_factory.instances) == 1
spy = spy_factory.instances[0]
assert spy._spy_restore_count == 1
def test_assert_outcomes_after_pytest_error(testdir):
testdir.makepyfile("def test_foo(): assert True")
result = testdir.runpytest('--unexpected-argument')
with pytest.raises(ValueError, message="Pytest terminal report not found"):
result.assert_outcomes(passed=0)
def test_cwd_snapshot(tmpdir):
foo = tmpdir.ensure('foo', dir=1)
bar = tmpdir.ensure('bar', dir=1)
foo.chdir()
snapshot = CwdSnapshot()
bar.chdir()
assert py.path.local() == bar
snapshot.restore()
assert py.path.local() == foo
class TestSysModulesSnapshot(object):
key = 'my-test-module'
def test_remove_added(self):
original = dict(sys.modules)
assert self.key not in sys.modules
snapshot = SysModulesSnapshot()
sys.modules[self.key] = 'something'
assert self.key in sys.modules
snapshot.restore()
assert sys.modules == original
def test_add_removed(self, monkeypatch):
assert self.key not in sys.modules
monkeypatch.setitem(sys.modules, self.key, 'something')
assert self.key in sys.modules
original = dict(sys.modules)
snapshot = SysModulesSnapshot()
del sys.modules[self.key]
assert self.key not in sys.modules
snapshot.restore()
assert sys.modules == original
def test_restore_reloaded(self, monkeypatch):
assert self.key not in sys.modules
monkeypatch.setitem(sys.modules, self.key, 'something')
assert self.key in sys.modules
original = dict(sys.modules)
snapshot = SysModulesSnapshot()
sys.modules[self.key] = 'something else'
snapshot.restore()
assert sys.modules == original
def test_preserve_modules(self, monkeypatch):
key = [self.key + str(i) for i in range(3)]
assert not any(k in sys.modules for k in key)
for i, k in enumerate(key):
monkeypatch.setitem(sys.modules, k, 'something' + str(i))
original = dict(sys.modules)
def preserve(name):
return name in (key[0], key[1], 'some-other-key')
snapshot = SysModulesSnapshot(preserve=preserve)
sys.modules[key[0]] = original[key[0]] = 'something else0'
sys.modules[key[1]] = original[key[1]] = 'something else1'
sys.modules[key[2]] = 'something else2'
snapshot.restore()
assert sys.modules == original
def test_preserve_container(self, monkeypatch):
original = dict(sys.modules)
assert self.key not in original
replacement = dict(sys.modules)
replacement[self.key] = 'life of brian'
snapshot = SysModulesSnapshot()
monkeypatch.setattr(sys, 'modules', replacement)
snapshot.restore()
assert sys.modules is replacement
assert sys.modules == original
@pytest.mark.parametrize('path_type', ('path', 'meta_path'))
class TestSysPathsSnapshot(object):
other_path = {
'path': 'meta_path',
'meta_path': 'path'}
@staticmethod
def path(n):
return 'my-dirty-little-secret-' + str(n)
def test_restore(self, monkeypatch, path_type):
other_path_type = self.other_path[path_type]
for i in range(10):
assert self.path(i) not in getattr(sys, path_type)
sys_path = [self.path(i) for i in range(6)]
monkeypatch.setattr(sys, path_type, sys_path)
original = list(sys_path)
original_other = list(getattr(sys, other_path_type))
snapshot = SysPathsSnapshot()
transformation = {
'source': (0, 1, 2, 3, 4, 5),
'target': ( 6, 2, 9, 7, 5, 8)} # noqa: E201
assert sys_path == [self.path(x) for x in transformation['source']]
sys_path[1] = self.path(6)
sys_path[3] = self.path(7)
sys_path.append(self.path(8))
del sys_path[4]
sys_path[3:3] = [self.path(9)]
del sys_path[0]
assert sys_path == [self.path(x) for x in transformation['target']]
snapshot.restore()
assert getattr(sys, path_type) is sys_path
assert getattr(sys, path_type) == original
assert getattr(sys, other_path_type) == original_other
def test_preserve_container(self, monkeypatch, path_type):
other_path_type = self.other_path[path_type]
original_data = list(getattr(sys, path_type))
original_other = getattr(sys, other_path_type)
original_other_data = list(original_other)
new = []
snapshot = SysPathsSnapshot()
monkeypatch.setattr(sys, path_type, new)
snapshot.restore()
assert getattr(sys, path_type) is new
assert getattr(sys, path_type) == original_data
assert getattr(sys, other_path_type) is original_other
assert getattr(sys, other_path_type) == original_other_data
|
tareqalayan/pytest
|
testing/test_pytester.py
|
Python
|
mit
| 13,187
|
[
"Brian"
] |
7c9a593c29108603a336d9547624f0333cec67a79fafbee6a0f12b709131d4d9
|
#########################################################################
## This program is part of 'MOOSE', the
## Messaging Object Oriented Simulation Environment.
## Copyright (C) 2014 Upinder S. Bhalla. and NCBS
## It is made available under the terms of the
## GNU Lesser General Public License version 2.1
## See the file COPYING.LIB for the full notice.
#########################################################################
import sys
sys.path.append('../../python')
import math
import pylab
import numpy
import matplotlib.pyplot as plt
import moose
import os
import signal
PID = os.getpid()
def doNothing( *args ):
pass
signal.signal( signal.SIGUSR1, doNothing )
concA = 0.005 # millimolar
def makeModel():
"""
This example illustrates how to set up a diffusion/transport model with
a simple reaction-diffusion system in a tapering cylinder:
| Molecule **a** diffuses with diffConst of 10e-12 m^2/s.
| Molecule **b** diffuses with diffConst of 5e-12 m^2/s.
| Molecule **b** also undergoes motor transport with a rate of 10e-6 m/s
| Thus it 'piles up' at the end of the cylinder.
| Molecule **c** does not move: diffConst = 0.0
| Molecule **d** does not move: diffConst = 10.0e-12 but it is buffered.
| Because it is buffered, it is treated as non-diffusing.
All molecules other than **d** start out only in the leftmost (first)
voxel, with a concentration of 1 mM. **d** is present throughout
at 0.2 mM, except in the last voxel, where it is at 1.0 mM.
The cylinder has a starting radius of 2 microns, and end radius of
1 micron. So when the molecule undergoing motor transport gets to the
narrower end, its concentration goes up.
There is a little reaction in all compartments: ``b + d <===> c``
As there is a high concentration of **d** in the last compartment,
when the molecule **b** reaches the end of the cylinder, the reaction
produces lots of **c**.
Note that molecule **a** does not participate in this reaction.
The concentrations of all molecules are displayed in an animation.
"""
# create container for model
r0 = 2e-6 # m
r1 = 1e-6 # m
num = 100
diffLength = 1e-6 # m
len = num * diffLength # m
diffConst = 10e-12
#motorRate = 1e-6
#diffConst = 0
motorRate = 0
model = moose.Neutral( 'model' )
compartment = moose.CylMesh( '/model/compartment' )
compartment.r0 = r0
compartment.r1 = r1
compartment.x0 = 0
compartment.x1 = len
compartment.diffLength = diffLength
assert( compartment.numDiffCompts == num )
# create molecules and reactions
a = moose.Pool( '/model/compartment/a' )
b = moose.Pool( '/model/compartment/b' )
c = moose.Pool( '/model/compartment/c' )
d = moose.Pool( '/model/compartment/d' )
r1 = moose.Reac( '/model/compartment/r1' )
moose.connect( r1, 'sub', b, 'reac' )
moose.connect( r1, 'sub', d, 'reac' )
moose.connect( r1, 'prd', c, 'reac' )
r1.Kf = 1000.0 # 1/(mM.sec)
r1.Kb = 1 # 1/sec
# Assign parameters
a.diffConst = diffConst
b.diffConst = diffConst / 2.0
b.motorConst = motorRate
c.diffConst = diffConst
d.diffConst = diffConst
# Make solvers
ksolve = moose.Gsolve( '/model/compartment/ksolve' )
dsolve = moose.Dsolve( '/model/compartment/dsolve' )
stoich = moose.Stoich( '/model/compartment/stoich' )
stoich.compartment = compartment
stoich.ksolve = ksolve
stoich.dsolve = dsolve
os.kill( PID, signal.SIGUSR1 )
stoich.path = "/model/compartment/##"
print dsolve.numPools
assert( dsolve.numPools == 4 )
a.vec.concInit = concA
b.vec.concInit = concA / 5.0
c.vec.concInit = concA
d.vec.concInit = concA / 5.0
for i in range( num ):
d.vec[i].concInit = concA * 2 * i / num
#d.vec[num/2:num].concInit = concA * 1.5
def makePlots():
plt.ion()
fig = plt.figure( figsize=(12,6) )
dynamic = fig.add_subplot( 111 )
a = moose.vec( '/model/compartment/a' )
b = moose.vec( '/model/compartment/b' )
c = moose.vec( '/model/compartment/c' )
d = moose.vec( '/model/compartment/d' )
pos = numpy.arange( 0, a.conc.size, 1 )
aline, = dynamic.plot( pos, a.conc, label='a' )
bline, = dynamic.plot( pos, b.conc, label='b' )
cline, = dynamic.plot( pos, c.conc, label='c' )
dline, = dynamic.plot( pos, d.conc, label='d' )
plt.ylabel( 'Conc (mM)' )
plt.xlabel( 'Cylinder voxel #' )
plt.legend()
timelabel = plt.text( 10, concA * 3, 'time = 0.0' )
#fig.canvas.draw()
plt.ylim( 0, 4.0 * concA )
return( fig, dynamic, timelabel, aline, bline, cline, dline )
def updatePlots( plotlist, time ):
a = moose.vec( '/model/compartment/a' )
b = moose.vec( '/model/compartment/b' )
c = moose.vec( '/model/compartment/c' )
d = moose.vec( '/model/compartment/d' )
plotlist[2].set_text( "time = %g" % time )
plotlist[3].set_ydata( a.conc )
plotlist[4].set_ydata( b.conc )
plotlist[5].set_ydata( c.conc )
plotlist[6].set_ydata( d.conc )
#plotlist[0].canvas.draw()
def main():
runtime = 20.0
diffdt = 0.005
plotdt = 0.1
makeModel()
# Set up clocks. The dsolver to know before assigning stoich
moose.setClock( 10, diffdt ) # 10 is the standard clock for Dsolve.
moose.setClock( 16, plotdt ) # 16 is the standard clock for Ksolve.
a = moose.element( '/model/compartment/a' )
b = moose.element( '/model/compartment/b' )
c = moose.element( '/model/compartment/c' )
d = moose.element( '/model/compartment/d' )
moose.reinit()
atot = sum( a.vec.n )
btot = sum( b.vec.n )
ctot = sum( c.vec.n )
dtot = sum( d.vec.n )
plotlist = makePlots()
for t in numpy.arange( 0, runtime, plotdt ):
moose.start( plotdt )
updatePlots( plotlist, t )
# moose.start( runtime ) # Run the model
atot2 = sum( a.vec.n )
btot2 = sum( b.vec.n )
ctot2 = sum( c.vec.n )
dtot2 = sum( d.vec.n )
print 'Ratio of initial to final total numbers of of a, b, c, d = '
print atot2/atot, btot2/btot, ctot2/ctot, dtot2/dtot
print 'Initial to final (b+c)=', (btot2 + ctot2) / (btot + ctot )
print "\nHit 'enter' to exit"
#raw_input()
quit()
# Run the 'main' if this script is executed standalone.
if __name__ == '__main__':
main()
|
dilawar/moose-full
|
moose-examples/snippets/gssaCylinderDiffusion.py
|
Python
|
gpl-2.0
| 6,418
|
[
"MOOSE"
] |
e0fda5259d53f7e69eb1f24f2a72cd47d03e398f7024f2a391fbfb8f2107259e
|
# (c) 2014 Michael DeHaan, <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
from six import iteritems, string_types
from ansible.errors import AnsibleParserError
from ansible.playbook.attribute import Attribute, FieldAttribute
from ansible.playbook.base import Base
from ansible.playbook.helpers import load_list_of_roles
from ansible.playbook.role.include import RoleInclude
__all__ = ['RoleMetadata']
class RoleMetadata(Base):
'''
This class wraps the parsing and validation of the optional metadata
within each Role (meta/main.yml).
'''
_allow_duplicates = FieldAttribute(isa='bool', default=False)
_dependencies = FieldAttribute(isa='list', default=[])
_galaxy_info = FieldAttribute(isa='GalaxyInfo')
def __init__(self, owner=None):
self._owner = owner
super(RoleMetadata, self).__init__()
@staticmethod
def load(data, owner, variable_manager=None, loader=None):
'''
Returns a new RoleMetadata object based on the datastructure passed in.
'''
if not isinstance(data, dict):
raise AnsibleParserError("the 'meta/main.yml' for role %s is not a dictionary" % owner.get_name())
m = RoleMetadata(owner=owner).load_data(data, variable_manager=variable_manager, loader=loader)
return m
def _load_dependencies(self, attr, ds):
'''
This is a helper loading function for the dependencies list,
which returns a list of RoleInclude objects
'''
current_role_path = None
if self._owner:
current_role_path = os.path.dirname(self._owner._role_path)
return load_list_of_roles(ds, current_role_path=current_role_path, variable_manager=self._variable_manager, loader=self._loader)
def _load_galaxy_info(self, attr, ds):
'''
This is a helper loading function for the galaxy info entry
in the metadata, which returns a GalaxyInfo object rather than
a simple dictionary.
'''
return ds
def serialize(self):
return dict(
allow_duplicates = self._allow_duplicates,
dependencies = self._dependencies,
)
def deserialize(self, data):
setattr(self, 'allow_duplicates', data.get('allow_duplicates', False))
setattr(self, 'dependencies', data.get('dependencies', []))
|
ravello/ansible
|
v2/ansible/playbook/role/metadata.py
|
Python
|
gpl-3.0
| 3,133
|
[
"Galaxy"
] |
affb12a32979165828ffe3e5fd570a484d0a0bba7e6f975d04ba7e02a8ea4486
|
from pymol.wizard import Wizard
from pymol import cmd
import pymol
class Annotation(Wizard):
def get_event_mask(self):
return Wizard.event_mask_scene+Wizard.event_mask_state+Wizard.event_mask_frame
def do_scene(self):
self.cmd.dirty_wizard()
def do_frame(self,frame):
self.cmd.dirty_wizard()
def do_state(self,state):
self.cmd.dirty_wizard()
def get_prompt(self):
prompt = []
pymol = self.cmd._pymol
if hasattr(pymol.session,'annotation'):
anno_dict = pymol.session.annotation
for obj in self.cmd.get_names('objects',1): # enabled objects
state_dict = anno_dict.get(obj,{})
state = self.cmd.get_state()
anno_list = state_dict.get(state,[])
prompt.extend(anno_list)
return prompt
def get_panel(self):
return [
[ 1, 'Annotation', '' ],
[ 2, 'Dismiss', 'cmd.set_wizard()' ]
]
import copy
import string
import re
from chempy.sdf import SDF
def load_annotated_sdf(filename, object=None, state=1, discrete=1, _self=cmd):
pymol=_self._pymol
cmd=_self
# get object name from file prefix
if object==None:
object = re.sub(r"\.[sS][dD][fF]$","",filename)
# open the SD file
inp_sdf = SDF(filename)
# create a persistent place to store the annotations
if not hasattr(pymol.session,'annotation'):
pymol.session.annotation = {}
# create a state-indexed dictionary for this object
state_dict = {}
pymol.session.annotation[object] = state_dict
while 1:
# get next record
sdf_rec = inp_sdf.read()
# if at end of list, break out of loop
if not sdf_rec: break
# get the MOL portion of the record
mol_list = sdf_rec.get('MOL')
# load it into PyMOL
cmd.read_molstr(string.join(mol_list,''),object,
state,finish=0,discrete=discrete)
# populate with tuple containing ordered list of keys
# and associated data dictionary
anno_list = [ "\\955"+object ]
for key in sdf_rec.kees:
if (key!='MOL'):
data = sdf_rec.data[key]
print key,data
anno_list.append(" \\595%s: \\559%s"%(
key,
string.join(map(string.strip,sdf_rec.data[key]))))
state_dict[state] = anno_list
# increment the state index
state = state + 1
if state > 1:
cmd.zoom(object)
cmd.finish_object(object)
|
gratefulfrog/lib
|
python/pymol/wizard/annotation.py
|
Python
|
gpl-2.0
| 2,654
|
[
"ChemPy",
"PyMOL"
] |
242217a5a1edc9dfacb71c3822a33bb66564417418455dd20a254dabab420b78
|
import sys
sys.path.insert(1, "../../../")
import h2o, tests
def distribution_behaviorGBM():
#Log.info("==============================")
#Log.info("Default Behavior - Gaussian")
#Log.info("==============================")
eco = h2o.import_file(path=tests.locate("smalldata/gbm_test/ecology_model.csv"))
# 0/1 response: expect gaussian
eco_model = h2o.gbm(x=eco[2:13], y=eco["Angaus"])
assert isinstance(eco_model,h2o.model.regression.H2ORegressionModel)
# more than 2 integers for response: expect gaussian
cars = h2o.import_file(path=tests.locate("smalldata/junit/cars.csv"))
cars_model = h2o.gbm(x=cars[3:7], y=cars["cylinders"])
assert isinstance(cars_model,h2o.model.regression.H2ORegressionModel)
#Log.info("==============================")
#Log.info("Gaussian Behavior")
#Log.info("==============================")
# 0/1 response: expect gaussian
eco_model = h2o.gbm(x=eco[2:13], y=eco["Angaus"], distribution="gaussian")
assert isinstance(eco_model,h2o.model.regression.H2ORegressionModel)
# character response: expect error
try:
eco_model = h2o.gbm(x=eco[1:8], y=eco["Method"], distribution="gaussian")
assert False, "expected an error"
except EnvironmentError:
assert True
#Log.info("==============================")
#Log.info("Bernoulli Behavior")
#Log.info("==============================")
# 0/1 response: expect bernoulli
eco_model = h2o.gbm(x=eco[2:13], y=eco["Angaus"].asfactor(), distribution="bernoulli")
assert isinstance(eco_model,h2o.model.binomial.H2OBinomialModel)
# 2 level character response: expect bernoulli
tree = h2o.import_file(path=tests.locate("smalldata/junit/test_tree_minmax.csv"))
tree_model = h2o.gbm(x=tree[0:3], y=tree["response"], distribution="bernoulli", min_rows=1)
assert isinstance(tree_model,h2o.model.binomial.H2OBinomialModel)
# more than two integers for response: expect error
try:
cars_mod = h2o.gbm(x=cars[3:7], y=cars["cylinders"], distribution="bernoulli")
assert False, "expected an error"
except EnvironmentError:
assert True
# more than two character levels for response: expect error
try:
eco_model = h2o.gbm(x=eco[0:8], y=eco["Method"], distribution="bernoulli")
assert False, "expected an error"
except EnvironmentError:
assert True
#Log.info("==============================")
#Log.info("Multinomial Behavior")
#Log.info("==============================")
# more than two integers for response: expect multinomial
cars_model = h2o.gbm(x=cars[3:7], y=cars["cylinders"].asfactor(), distribution="multinomial")
assert isinstance(cars_model,h2o.model.multinomial.H2OMultinomialModel)
# more than two character levels for response: expect multinomial
eco_model = h2o.gbm(x=eco[0:8], y=eco["Method"], distribution="multinomial")
assert isinstance(eco_model,h2o.model.multinomial.H2OMultinomialModel)
if __name__ == "__main__":
tests.run_test(sys.argv, distribution_behaviorGBM)
|
kyoren/https-github.com-h2oai-h2o-3
|
h2o-py/tests/testdir_algos/gbm/pyunit_loss_behaviorGBM.py
|
Python
|
apache-2.0
| 2,973
|
[
"Gaussian"
] |
575db2075f13e2917d7ec8ac6716adcff9b31bfcd44326e1332eca531b73ff37
|
#!/usr/bin/env python
import vtk
import numpy as np
from vmtk import vmtkscripts
import argparse
import copy
import sys
# extract region boundaries
class GetBoundaries():
def __init__(self, args):
self.Surface = None
self.InputFile = args.surface
self.OutputFile = args.file_out
self.FeatureAngle = 50.0 #args.feature_angle
self.NumberOfRegions = 0
self.RegionIdsArrayName = "RegionsId"
self.boundaries = vtk.vtkFeatureEdges()
self.NewScalars = vtk.vtkIntArray()
self.RegionAreas = vtk.vtkDoubleArray()
self.mesh = vtk.vtkPolyData()
self.BoundaryLines = vtk.vtkPolyData()
self.BoundaryPointArray = vtk.vtkIntArray()
self.BoundaryCellArray = vtk.vtkIntArray()
self.CheckCells = vtk.vtkIdList()
self.CheckCells2 = vtk.vtkIdList()
self.CheckCellsCareful = vtk.vtkIdList()
self.CheckCellsCareful2 = vtk.vtkIdList()
# dynamic alloated arrays
self.checked = None
self.checkedcarefully = None
self.pointMapper = None
#Feature edges options
self.BoundaryEdges = 0
self.ManifoldEdges = 0
self.NonManifoldEdges = 0
self.FeatureEdges = 1
self.ExtractLargestregion = 0
""" brief Function to flood fill region fast. """
def FindBoundaryRegion(self, reg, start):
#Variables used in function
# i
# j,k,l,cellId
#vtkIdType *pts = 0
#vtkIdType npts = 0
#vtkIdType numNei, nei, p1, p2, nIds, neis
npts = 0
#Id List to store neighbor cells for each set of nodes and a cell
neighbors = vtk.vtkIdList()
tmp = vtk.vtkIdList()
#pts = vtk.vtkIdList()
#Variable for accessing neiIds list
sz = 0 #vtkIdType
#Variables for the boundary cells adjacent to the boundary point
bLinesOne = vtk.vtkIdList()
bLinesTwo = vtk.vtkIdList()
numCheckCells = 0
pts = vtk.vtkIdList()
# Get neighboring cell for each pair of points in current cell
while (self.CheckCells.GetNumberOfIds() > 0):
numCheckCells = self.CheckCells.GetNumberOfIds()
print("peace", numCheckCells)
for c in range(numCheckCells):
cellId = self.CheckCells.GetId(c)
#Get the three points of the cell
self.mesh.GetCellPoints(cellId,pts)
if (self.checked.GetValue(cellId) == 0):
#Mark cell as checked and insert the fillnumber value to cell
self.NewScalars.InsertValue(cellId,reg)
self.checked.SetValue(cellId, 1)
for i in range(pts.GetNumberOfIds()):
p1 = pts.GetId(i)
#Get the cells attached to each point
self.mesh.GetPointCells(p1,neighbors)
numNei = neighbors.GetNumberOfIds()
#print(numNei)
#For each neighboring cell
for j in range(numNei):
#print(self.BoundaryCellArray.GetValue(neighbors.GetId(j)))
#If this cell is close to a boundary
if (self.BoundaryCellArray.GetValue(neighbors.GetId(j))):
#If self cell hasn't been checked already
if (self.checkedcarefully.GetValue(neighbors.GetId(j)) == 0):
print("test hoop")
#Add self cell to the careful check cells list and run
#the region finding tip toe code
self.CheckCellsCareful.InsertNextId(neighbors.GetId(j))
self.FindBoundaryRegionTipToe(reg)
self.CheckCellsCareful.Reset()
self.CheckCellsCareful2.Reset()
#Cell needs to be added to check list
else:
self.CheckCells2.InsertNextId(neighbors.GetId(j))
#If the start cell is a boundary cell
elif (self.checkedcarefully.GetValue(cellId) == 0 and start):
#Reset the check cell list and start a careful search
start=0
self.CheckCells.Reset()
print("I have been added begin {0}".format(cellId))
self.CheckCellsCareful.InsertNextId(cellId)
self.FindBoundaryRegionTipToe(reg)
#Swap the current check list to the full check list and continue
tmp.DeepCopy(self.CheckCells)
self.CheckCells.DeepCopy( self.CheckCells2)
self.CheckCells2.DeepCopy(tmp)
tmp.Reset()
""" Function to flood fill region slower, but is necessary close
to boundaries to make sure it doesn't step over boundary.
"""
def FindBoundaryRegionTipToe(self, reg):
#Variables used in function
#int i
#vtkIdType j,k,l
#vtkIdType *pts = 0
#vtkIdType npts = 0
#vtkIdType cellId
#vtkIdType numNei, nei, p1, p2, nIds, neiId
#Id List to store neighbor cells for each set of nodes and a cell
tmp = vtk.vtkIdList()
neiIds = vtk.vtkIdList()
#Variable for accessing neiIds list
sz = 0
#Variables for the boundary cells adjacent to the boundary point
bLinesOne = vtk.vtkIdList()
bLinesTwo = vtk.vtkIdList()
numCheckCells = 0.0
pts = vtk.vtkIdList()
#Get neighboring cell for each pair of points in current cell
#While there are still cells to be checked
while ( self.CheckCellsCareful.GetNumberOfIds() > 0):
numCheckCells = self.CheckCellsCareful.GetNumberOfIds()
for c in range(numCheckCells):
neiIds.Reset()
cellId = self.CheckCellsCareful.GetId(c)
#Get the three points of the cell
self.mesh.GetCellPoints(cellId,pts)
if ( self.checkedcarefully.GetValue(cellId) == 0):
#Update this cell to have been checked carefully and assign it
#with the fillnumber scalar
self.NewScalars.InsertValue(cellId,reg)
self.checkedcarefully.SetValue(cellId, 1)
#For each edge of the cell
print("Checking edges of cell {0}".format(cellId))
for i in range(pts.GetNumberOfIds()):
p1 = pts.GetId(i)
p2 = pts.GetId((i+1)%(pts.GetNumberOfIds()))
neighbors = vtk.vtkIdList()
#Initial check to make sure the cell is in fact a face cell
self.mesh.GetCellEdgeNeighbors(cellId,p1,p2,neighbors)
numNei = neighbors.GetNumberOfIds()
#Check to make sure it is an oustide surface cell,
#i.e. one neighbor
if (numNei==1):
count = 0
#Check to see if cell is on the boundary,
#if it is get adjacent lines
if ( self.BoundaryPointArray.GetValue(p1) == 1):
count += 1
if ( self.BoundaryPointArray.GetValue(p2) == 1):
count += 1
nei=neighbors.GetId(0)
#if cell is not on the boundary, add new cell to check list
if (count < 2):
neiIds.InsertNextId(nei)
#if cell is on boundary, check to make sure it isn't
#false positive don't add to check list. self is done by
#getting the boundary lines attached to each point, then
#intersecting the two lists. If the result is zero, then self
#is a false positive
else:
self.BoundaryLines.BuildLinks()
bPt1 = self.pointMapper.GetPoint(p1)
self.BoundaryLines.GetPointCells(bPt1,bLinesOne)
bPt2 = self.pointMapper.GetPoint(p2)
self.BoundaryLines.GetPointCells(bPt2,bLinesTwo)
bLinesOne.IntersectWith(bLinesTwo)
#Cell is false positive. Add to check list.
if (bLinesOne.GetNumberOfIds() == 0):
neiIds.InsertNextId(nei)
nIds = neiIds.GetNumberOfIds()
if (nIds>0):
#Add all Ids in current list to global list of Ids
for k in range(nIds):
neiId = neiIds.GetId(k)
if ( self.checkedcarefully.GetValue(neiId)==0):
self.CheckCellsCareful2.InsertNextId(neiId)
elif ( self.checked.GetValue(neiId)==0):
self.CheckCells2.InsertNextId(neiId)
#Add current list of checked cells to the full list and continue
tmp.DeepCopy(self.CheckCells)
self.CheckCells.DeepCopy( self.CheckCells2)
self.CheckCells2.DeepCopy(tmp)
tmp.Reset()
#print("here")
""" Initializes boundary arrays. """
def SetBoundaryArrays(self):
#Variables used in the function
#vtkIdType pointId,bp,bp2,i
bpCellIds = vtk.vtkIdList()
#Point locator to find points on mesh that are the points on the boundary
#lines
pointLocator = vtk.vtkPointLocator()
pointLocator.SetDataSet(self.mesh)
pointLocator.BuildLocator()
# Get number of points and cells
numMeshPoints = self.mesh.GetNumberOfPoints()
numMeshCells = self.mesh.GetNumberOfCells()
# Set up check arrays
self.checked = vtk.vtkIdTypeArray()
self.checked.SetNumberOfComponents(1)
self.checked.SetNumberOfTuples(numMeshCells)# vtkIdType[numMeshCells]
self.checked.Fill(0.0)
self.checkedcarefully = vtk.vtkIdTypeArray()
self.checkedcarefully.SetNumberOfComponents(1)
self.checkedcarefully.SetNumberOfTuples(numMeshCells)# vtkIdType[numMeshCells]
self.checkedcarefully.Fill(1.0)
self.pointMapper = vtk.vtkIdTypeArray()
self.pointMapper.SetNumberOfComponents(1)
self.pointMapper.SetNumberOfTuples(numMeshCells)# vtkIdType[numMeshCells]
self.pointMapper.Fill(0.0)
#for i in range(numMeshCells):
# self.checked.SetValue(i, 0.)
# Set up boundary arrays
self.BoundaryPointArray.SetNumberOfComponents(1)
self.BoundaryPointArray.SetNumberOfTuples(numMeshPoints)
self.BoundaryPointArray.FillComponent(0, 0)
self.BoundaryCellArray.SetNumberOfComponents(1)
self.BoundaryCellArray.SetNumberOfTuples(numMeshCells)
self.BoundaryCellArray.FillComponent(0,0)
# Number of boundary line points
numPoints = self.BoundaryLines.GetNumberOfPoints()
pt = [0.0, 0.0, 0.0]
for pointId in range(numPoints):
self.BoundaryLines.GetPoint(pointId, pt)
print(pt)
#Find point on mesh
bp = pointLocator.FindClosestPoint(pt)
self.pointMapper.SetValue(bp, pointId)
self.BoundaryPointArray.SetValue(bp, 1)
self.mesh.GetPointCells(bp,bpCellIds)
#Set the point mapping array
#Assign each cell attached to self point as a boundary cell
for i in range(bpCellIds.GetNumberOfIds()):
self.BoundaryCellArray.SetValue(bpCellIds.GetId(i), 1)
#print(self.BoundaryCellArray.GetValue(bpCellIds.GetId(i)))
self.checked.InsertValue(bpCellIds.GetId(i), 1)
# Flip the values of checked carefully
for i in range(numMeshCells):
if (self.checked.GetValue(i) == 0):
self.checkedcarefully.SetValue(i, 1)
else:
self.checkedcarefully.SetValue(i, 0)
""" function to add current cell area to full area.
cellId cell whose are to be computed.
area area which will be updated with cell area.
"""
def AddCellArea(self, cellId, area):
# Get cell points
#vtkIdType npts, *pts
pts = vtk.vtkIdList()
self.mesh.GetCellPoints(cellId, pts)
# Get points
pt0 = [0.0, 0.0, 0.0]
pt1 = [0.0, 0.0, 0.0]
pt2 = [0.0, 0.0, 0.0]
self.mesh.GetPoint(pts.GetId(0), pt0)
self.mesh.GetPoint(pts.GetId(1), pt1)
self.mesh.GetPoint(pts.GetId(2), pt2)
# Calculate area of triangle
area += abs(self.ComputeTriangleArea(pt0, pt1, pt2))
"""
ComputeTriangleArea
"""
def ComputeTriangleArea(self, pt0, pt1, pt2):
area = 0.0
area += (pt0[0]*pt1[1])-(pt1[0]*pt0[1])
area += (pt1[0]*pt2[1])-(pt2[0]*pt1[1])
area += (pt2[0]*pt0[1])-(pt0[0]*pt2[1])
area *= 0.5
return area
def Execute(self):
print("Get Surface Boundaries")
reader = vmtkscripts.vmtkSurfaceReader()
reader.InputFileName = self.InputFile
reader.Execute()
self.Surface = reader.Surface #vtkPolyData
#Define variables used by the algorithm
inpts= vtk.vtkPoints()
inPolys = vtk.vtkCellArray()
# ints of vtkIdType
# newPts numPts, newId, cellId
#Get input points, polys and set the up in the vtkPolyData mesh
inpts = self.Surface.GetPoints()
inPolys = self.Surface.GetPolys()
self.mesh.SetPoints(inpts)
self.mesh.SetPolys(inPolys)
#Build Links in the mesh to be able to perform complex polydata processes
self.mesh.BuildLinks()
#Get the number of Polys for scalar allocation
numPolys = self.Surface.GetNumberOfPolys()
numPts = self.Surface.GetNumberOfPoints()
#Check the input to make sure it is there
if (numPolys < 1):
raise RuntimeError("No Input")
#Set up Region scalar for each surface
self.NewScalars.SetNumberOfTuples(numPolys)
#Set up Feature Edges for Boundary Edge Detection
inputCopy = self.Surface.NewInstance()
inputCopy.ShallowCopy(self.Surface)
#Set the Data to hold onto given Point Markers
inputCopy.GlobalReleaseDataFlagOff()
self.boundaries.SetInputData(inputCopy)
self.boundaries.BoundaryEdgesOff() #(self.BoundaryEdges)
self.boundaries.ManifoldEdgesOff() #(self.ManifoldEdges)
self.boundaries.NonManifoldEdgesOff() #(self.NonManifoldEdges)
self.boundaries.FeatureEdgesOn() #(self.FeatureEdges)
self.boundaries.SetFeatureAngle(self.FeatureAngle)
#inputCopy.Delete()
self.boundaries.Update()
# Set the boundary lines
self.BoundaryLines.DeepCopy(self.boundaries.GetOutput())
# Initialize the arrays to be used in the flood fills
self.SetBoundaryArrays()
print("Starting Boundary Face Separation")
# Set Region value of each cell to be zero initially
reg = 0
for cellId in range(numPolys):
self.NewScalars.InsertValue(cellId, reg)
#Go through each cell and perfrom region identification proces
#print(numPolys)
for cellId in range(numPolys):
#if(cellId % 1000 == 0):
#print(cellId)
#Check to make sure the value of the region at self cellId hasn't been set
if (self.NewScalars.GetValue(cellId) == 0):
reg += 1
self.CheckCells.InsertNextId(cellId)
#Call function to find all cells within certain region
self.FindBoundaryRegion(reg, 1)
#print("party")
self.CheckCells.Reset()
self.CheckCells2.Reset()
self.CheckCellsCareful.Reset()
self.CheckCellsCareful2.Reset()
# Check to see if anything left
extraregion=0
for cellId in range(numPolys):
if (self.checked.GetValue(cellId) == 0 or self.checkedcarefully.GetValue(cellId == 0)):
self.NewScalars.InsertValue(cellId,reg+1)
self.AddCellArea(cellId, area)
extraregion=1
if (extraregion):
print("I am incrementing region")
reg +=1
#Copy all the input geometry and data to the output
output.SetPoints(inpts)
output.SetPolys(inPolys)
output.GetPointData().PassData(input.GetPointData())
output.GetCellData().PassData(input.GetCellData())
#Add the new scalars array to the output
self.NewScalars.SetName(self.RegionIdsArrayName)
output.GetCellData().AddArray(self.NewScalars)
output.GetCellData().SetActiveScalars(self.RegionIdsArrayName)
# If extracting largets region, get it out
if (self.ExtractLargestRegion):
maxVal = 0.0
maxRegion = -1
for i in range(reg):
if (self.RegionAreas.GetValue(i) > maxVal):
maxVal = self.RegionAreas.GetValue(i)
maxRegion = i+1
thresholder = vtk.vtkThreshold()
thresholder.SetIntputData(output)
thresholder.SetInputArrayToProcess(0, 0, 0, 1, self.RegionIdsArrayName)
thresholder.ThresholdBetween(maxRegion, maxRegion)
thresholder.Update()
# Check to see if the result has points, don't run surface filter
if (thresholder.GetOutput().GetNumberOfPoints() == 0):
raise RuntimeError("vtkThreshold Output has no points")
#Convert unstructured grid to polydata
surfacer = vtk.vtkDataSetSurfaceFilter()
surfacer.SetInputData(thresholder.GetOutput())
surfacer.Update()
#Set the final pd
output.DeepCopy(surfacer.GetOutput())
# Total number of regions
self.NumberOfRegions = reg
writer = vmtkscripts.vmtkSurfaceWriter()
writer.OutputFileName = self.OutputFile
writer.Input = output
writer.Execute()
if __name__=='__main__':
parser = argparse.ArgumentParser(description='Get surface boundaries')
parser.add_argument("-i", dest="surface", required=True, help="input surface file", metavar="FILE")
parser.add_argument("-o", dest="file_out", required=True, help="output surface file", metavar="FILE")
#parser.add_argument("-a", dest="feature_angle", required=False, help="feature_angle", metavar="FILE", default=50.0)
args = parser.parse_args()
#print(args)
boundaries = GetBoundaries(args)
boundaries.Execute()
|
kayarre/Tools
|
vmtk/get_boundary.py
|
Python
|
bsd-2-clause
| 19,895
|
[
"VTK"
] |
f4bcd9cd44918efdd2ab3f0b01b319bae6230f97e64aa311bc21df079d5e26c1
|
# Copyright 2007 by Tiago Antao <[email protected]>.
# Revisions copyright 2014 by Melissa Gymrek <[email protected]>.
# All rights reserved.
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
"""
This module allows you to control Simcoal2 and FastSimcoal
"""
import os
import sys
from Bio.Application import AbstractCommandline, _Option, _Switch
class SimCoalController(object):
def __init__(self, simcoal_dir):
"""Initializes the controller. (DEPRECATED)
simcoal_dir is the directory where simcoal is.
The initializer checks for existance and executability of binaries.
"""
self.simcoal_dir = simcoal_dir
self.os_name = os.name # remove this?
dir_contents = os.listdir(self.simcoal_dir)
# We expect the tool to be installed as simcoal2(.exe)
# without any trailing version number.
self.bin_name = "simcoal2"
if self.bin_name not in dir_contents:
# Try case insensitive,
dir_contents = [x.lower() for x in dir_contents]
if self.bin_name not in dir_contents:
# Try with .exe
self.bin_name += '.exe'
if self.bin_name not in dir_contents:
raise IOError("SimCoal not available")
if not os.access(os.path.join(self.simcoal_dir, self.bin_name),
os.X_OK):
raise IOError("SimCoal not executable")
def run_simcoal(self, par_file, num_sims, ploydi='1', par_dir='.'):
"""Executes SimCoal.
"""
if par_dir is None:
par_dir = os.sep.join([".", 'SimCoal', 'runs'])
curr_dir = os.getcwd()
# TODO - Make sure we change drive on Windows as well?
os.chdir(par_dir)
exe = os.path.join(self.simcoal_dir, self.bin_name)
if " " in exe:
exe = '"' + exe + '"'
cmd = exe + ' ' + par_file + ' ' + str(num_sims) + ' ' + ploydi
# TODO - Better way to spot if on Jython on Windows?
if sys.platform == "win32" or self.bin_name.endswith(".exe"):
# There is no /dev/nul on Windows
cmd += ' > nul 2>nul'
else:
cmd += ' >/dev/null 2>&1'
os.system(cmd)
os.chdir(curr_dir)
class _FastSimCoalCommandLine(AbstractCommandline):
""" Command Line Wrapper for Fastsimcoal
"""
def __init__(self, fastsimcoal_dir=None, cmd='fastsimcoal', **kwargs):
self.parameters = [
_Option(["-i", "--ifile", "parfile"], "Name of the parameter file",
filename=True, equate=False, is_required=False,
checker_function=lambda x: isinstance(x, str)),
_Option(["-n", "--numsims", "numsims"], "Number of simulations to perform",
filename=False, equate=False, is_required=True,
checker_function=lambda x: isinstance(x, int)),
_Option(["-t", "--tfile", "tfile"], "Name of template parameter file",
filename=True, equate=False, is_required=False,
checker_function=lambda x: isinstance(x, str)),
_Option(["-f", "--dfile", "dfile"], "Name of parameter definition file",
filename=True, equate=False, is_required=False,
checker_function=lambda x: isinstance(x, str)),
_Option(["-F", "--dFile", "dFile"],
"""Same as -f but only uses simple parameters defined
in the template file. Complex params are recomputed""",
filename=True, equate=False, is_required=False,
checker_function=lambda x: isinstance(x, str)),
_Option(["-e", "--efile", "efile"],
"""Parameter prior definition file.
Parameters drawn from specified distributions are
substituted into template file.""",
filename=True, equate=False, is_required=False,
checker_function=lambda x: isinstance(x, str)),
_Option(["-E", "--numest", "numest"],
"""Number of estimations from parameter priors.
Listed parameter values are substituted in template file.""",
filename=False, equate=False, is_required=False,
checker_function=lambda x: isinstance(x, int)),
_Switch(["-g", "--genotypic", "genotypic"], "Generates Arlequin projects with genotypic data"),
_Switch(["-p", "--phased", "phased"], "Specifies that phase is known in Arlequin output"),
_Option(["-s", "--dnatosnp", "dnatosnp"],
""""Output DNA as SNP data (0: ancestral, 1: derived
and specify maximum no. SNPs to output.""",
filename=False, equate=False, is_required=False,
checker_function=lambda x: isinstance(x, int)),
_Switch(["-S", "--allsites", "allsites"],
"""Output the whole DNA sequence, including monomorphic sites"""),
_Switch(["-I", "--inf", "inf"],
"""Generates DNA mutations according to an
infinite sites (IS) mutation model."""),
_Switch(["-d", "--dsfs", "dsfs"], "Computes derived site frequency spectrum"),
_Switch(["-m", "--msfs", "msfs"], "Computes minor site frequency spectrum"),
_Option(["-o", "--oname", "oname"], "Generic name for observed SFS files",
filename=False, equate=False, is_required=False,
checker_function=lambda x: isinstance(x, str)),
_Switch(["-H", "--header", "header"], "Generates header in site frequency spectrum files."),
_Switch(["-q", "--quiet", "quiet"], "Minimal messages output to console"),
_Switch(["-T", "--tree", "tree"], "Output coalescent tree in nexus format."),
_Option(["-k", "--keep", "keep"],
"""Number of simulated polymorphic sites kept in memory.
If the simulated no. is larger, then temporary files are created.""",
filename=False, equate=False, is_required=False,
checker_function=lambda x: isinstance(x, int)),
_Option(["--seed", "seed"], "Seed for the random number generator (positive int <=1E6)",
filename=False, equate=False, is_required=False,
checker_function=lambda x: isinstance(x, int)),
_Switch(["-x", "--noarloutput", "noarloutput"], "Does not generate Arlequin output"),
_Switch(["-D", "--dadioutput", "dadioutput"], "Output SFS in dadi format"),
_Option(["-M", "--maxlhood", "maxlhood"],
"""Perform parameter estimation by max lhood from SFS, and
define stop criterion as min., rel., diff. in parameter
values between iterations""",
filename=False, equate=False, is_required=False,
checker_function=lambda x: isinstance(x, float)),
_Option(["-N", "--maxnumsims", "maxnumsims"],
"""Maximum number of simulations to perform during
likelihood maximization.""",
filename=False, equate=False, is_required=False,
checker_function=lambda x: isinstance(x, int)),
_Option(["-l", "--minnumloops", "minnumloops"],
"""Minimum number of iteration loops to perform during
likelihood maximization.""",
filename=False, equate=False, is_required=False,
checker_function=lambda x: isinstance(x, int)),
_Option(["-L", "--maxnumloops", "maxnumloops"],
"""Maximum number of iterations to perform during
likelihood maximization""",
filename=False, equate=False, is_required=False,
checker_function=lambda x: isinstance(x, int)),
_Option(["-C", "--minSFSCount", "minSFSCount"],
"""Minimum observed SFS entry count taken into account
in likelihood computation""",
filename=False, equate=False, is_required=False,
checker_function=lambda x: isinstance(x, int)),
_Switch(["-0", "--removeZeroSFS", "removeZeroSFS"],
"""Do not take into account monomorphic sites for
SFS likelihood computation."""),
_Option(["-a", "--ascDeme", "ascDeme"],
"""This is the deme id where ascertainment is performed
when simulating SNPs.""",
filename=False, equate=False, is_required=False,
checker_function=lambda x: isinstance(x, int)),
_Option(["-A", "--ascSize", "ascSize"],
"""Number of ascertained chromosomes used to define SNPs in
a given deme.""",
filename=False, equate=False, is_required=False,
checker_function=lambda x: isinstance(x, int)),
_Switch(["-u", "--multiSFS", "multiSFS"],
"Generate or use multidimensional SFS")]
AbstractCommandline.__init__(self, cmd, **kwargs)
class FastSimCoalController(object):
def __init__(self, fastsimcoal_dir=None, bin_name="fsc252"):
"""Initializes the controller.
fastsimcoal_dir is the directory where fastsimcoal is.
By default the binary should be called fsc252.
bin_name specifies a different name for the binary.
The initializer checks for existence and executability of binaries
and sets up the command line controller.
Fastsimcoal2 is available here: http://cmpg.unibe.ch/software/fastsimcoal2/.
This wrapper was written and tested for fastsimcoal version 2.51.
"""
self.bin_name = bin_name
self.fastsimcoal_dir = fastsimcoal_dir
if fastsimcoal_dir is None:
for path in os.environ["PATH"].split(os.pathsep):
if os.path.isfile(os.path.join(path, self.bin_name)):
self.fastsimcoal_dir = path
if self.fastsimcoal_dir is None:
raise IOError("Fastsimcoal not available")
else:
dir_contents = os.listdir(fastsimcoal_dir)
if self.bin_name not in dir_contents:
raise IOError("Fastsimcoal not available")
if not os.access(os.path.join(self.fastsimcoal_dir, self.bin_name), os.X_OK):
raise IOError("Fastsimcoal not executable")
def run_fastsimcoal(self, par_file, num_sims, par_dir='.', opts={}):
"""Executes Fastsimcoal.
par_file is the input parameter file (--ifile) for fastsimcoal.
num_sims is the number of simulations to perform.
par_dir is the directory where par_file is and where output will be written.
opts is a dictionary of additional options to fastsimcoal.
"""
if par_dir is None:
par_dir = os.sep.join([".", "Fastsimcoal", "runs"])
if not os.path.exists(par_dir):
os.mkdir(par_dir)
curr_dir = os.getcwd()
os.chdir(par_dir)
if par_file is None: # Must use .tpl for -t instead if no par_file
controller = _FastSimCoalCommandLine(cmd=os.path.join(self.fastsimcoal_dir, self.bin_name),
numsims=num_sims, **opts)
else:
controller = _FastSimCoalCommandLine(cmd=os.path.join(self.fastsimcoal_dir, self.bin_name),
parfile=par_file, numsims=num_sims, **opts)
controller()
os.chdir(curr_dir)
|
poojavade/Genomics_Docker
|
Dockerfiles/gedlab-khmer-filter-abund/pymodules/python2.7/lib/python/Bio/PopGen/SimCoal/Controller.py
|
Python
|
apache-2.0
| 11,919
|
[
"Biopython"
] |
d9efbcedd621e0001693a5180d4ecef16e9dca912dc872ae3ce18166e81fc77f
|
#!/usr/bin/env python
'''
setup board.h for chibios
'''
import argparse, sys, fnmatch, os, dma_resolver, shlex
parser = argparse.ArgumentParser("chibios_pins.py")
parser.add_argument(
'-D', '--outdir', type=str, default=None, help='Output directory')
parser.add_argument(
'hwdef', type=str, default=None, help='hardware definition file')
args = parser.parse_args()
# output variables for each pin
vtypes = ['MODER', 'OTYPER', 'OSPEEDR', 'PUPDR', 'ODR', 'AFRL', 'AFRH']
# number of pins in each port
pincount = {
'A': 16,
'B': 16,
'C': 16,
'D': 16,
'E': 16,
'F': 16,
'G': 16,
'H': 2,
'I': 0,
'J': 0,
'K': 0
}
ports = pincount.keys()
portmap = {}
# dictionary of all config lines, indexed by first word
config = {}
# list of all pins in config file order
allpins = []
# list of configs by type
bytype = {}
# list of configs by label
bylabel = {}
# list of SPI devices
spidev = []
# SPI bus list
spi_list = []
# all config lines in order
alllines = []
mcu_type = None
def is_int(str):
'''check if a string is an integer'''
try:
int(str)
except Exception:
return False
return True
def error(str):
'''show an error and exit'''
print("Error: " + str)
sys.exit(1)
def get_alt_function(mcu, pin, function):
'''return alternative function number for a pin'''
import importlib
try:
lib = importlib.import_module(mcu)
alt_map = lib.AltFunction_map
except ImportError:
error("Unable to find module for MCU %s" % mcu)
if function and function.endswith("_RTS") and (
function.startswith('USART') or function.startswith('UART')):
# we do software RTS
return None
af_labels = ['USART', 'UART', 'SPI', 'I2C', 'SDIO', 'OTG', 'JT', 'TIM', 'CAN']
for l in af_labels:
if function.startswith(l):
s = pin + ":" + function
if not s in alt_map:
error("Unknown pin function %s for MCU %s" % (s, mcu))
return alt_map[s]
return None
def get_ADC1_chan(mcu, pin):
'''return ADC1 channel for an analog pin'''
import importlib
try:
lib = importlib.import_module(mcu)
ADC1_map = lib.ADC1_map
except ImportError:
error("Unable to find ADC1_Map for MCU %s" % mcu)
if not pin in ADC1_map:
error("Unable to find ADC1 channel for pin %s" % pin)
return ADC1_map[pin]
class generic_pin(object):
'''class to hold pin definition'''
def __init__(self, port, pin, label, type, extra):
self.portpin = "P%s%u" % (port, pin)
self.port = port
self.pin = pin
self.label = label
self.type = type
self.extra = extra
self.af = None
def has_extra(self, v):
'''return true if we have the given extra token'''
return v in self.extra
def extra_prefix(self, prefix):
'''find an extra token starting with the given prefix'''
for e in self.extra:
if e.startswith(prefix):
return e
return None
def extra_value(self, name, type=None, default=None):
'''find an extra value of given type'''
v = self.extra_prefix(name)
if v is None:
return default
if v[len(name)] != '(' or v[-1] != ')':
error("Badly formed value for %s: %s\n" % (name, v))
ret = v[len(name) + 1:-1]
if type is not None:
try:
ret = type(ret)
except Exception:
error("Badly formed value for %s: %s\n" % (name, ret))
return ret
def is_RTS(self):
'''return true if this is a RTS pin'''
if self.label and self.label.endswith("_RTS") and (
self.type.startswith('USART') or self.type.startswith('UART')):
return True
return False
def is_CS(self):
'''return true if this is a CS pin'''
return self.has_extra("CS") or self.type == "CS"
def get_MODER(self):
'''return one of ALTERNATE, OUTPUT, ANALOG, INPUT'''
if self.af is not None:
v = "ALTERNATE"
elif self.type == 'OUTPUT':
v = "OUTPUT"
elif self.type.startswith('ADC'):
v = "ANALOG"
elif self.is_CS():
v = "OUTPUT"
elif self.is_RTS():
v = "OUTPUT"
else:
v = "INPUT"
return "PIN_MODE_%s(%uU)" % (v, self.pin)
def get_OTYPER(self):
'''return one of PUSHPULL, OPENDRAIN'''
v = 'PUSHPULL'
if self.type.startswith('I2C'):
# default I2C to OPENDRAIN
v = 'OPENDRAIN'
values = ['PUSHPULL', 'OPENDRAIN']
for e in self.extra:
if e in values:
v = e
return "PIN_OTYPE_%s(%uU)" % (v, self.pin)
def get_OSPEEDR(self):
'''return one of SPEED_VERYLOW, SPEED_LOW, SPEED_MEDIUM, SPEED_HIGH'''
# on STM32F4 these speeds correspond to 2MHz, 25MHz, 50MHz and 100MHz
values = ['SPEED_VERYLOW', 'SPEED_LOW', 'SPEED_MEDIUM', 'SPEED_HIGH']
v = 'SPEED_MEDIUM'
for e in self.extra:
if e in values:
v = e
return "PIN_O%s(%uU)" % (v, self.pin)
def get_PUPDR(self):
'''return one of FLOATING, PULLUP, PULLDOWN'''
values = ['FLOATING', 'PULLUP', 'PULLDOWN']
v = 'FLOATING'
if self.is_CS():
v = "PULLUP"
for e in self.extra:
if e in values:
v = e
return "PIN_PUPDR_%s(%uU)" % (v, self.pin)
def get_ODR(self):
'''return one of LOW, HIGH'''
values = ['LOW', 'HIGH']
v = 'HIGH'
for e in self.extra:
if e in values:
v = e
return "PIN_ODR_%s(%uU)" % (v, self.pin)
def get_AFIO(self):
'''return AFIO'''
af = self.af
if af is None:
af = 0
return "PIN_AFIO_AF(%uU, %uU)" % (self.pin, af)
def get_AFRL(self):
'''return AFIO low 8'''
if self.pin >= 8:
return None
return self.get_AFIO()
def get_AFRH(self):
'''return AFIO high 8'''
if self.pin < 8:
return None
return self.get_AFIO()
def __str__(self):
str = ''
if self.af is not None:
str += " AF%u" % self.af
if self.type.startswith('ADC1'):
str += " ADC1_IN%u" % get_ADC1_chan(mcu_type, self.portpin)
if self.extra_value('PWM', type=int):
str += " PWM%u" % self.extra_value('PWM', type=int)
return "P%s%u %s %s%s" % (self.port, self.pin, self.label, self.type,
str)
# setup default as input pins
for port in ports:
portmap[port] = []
for pin in range(pincount[port]):
portmap[port].append(generic_pin(port, pin, None, 'INPUT', []))
def get_config(name, column=0, required=True, default=None, type=None):
'''get a value from config dictionary'''
if not name in config:
if required and default is None:
error("missing required value %s in hwdef.dat" % name)
return default
if len(config[name]) < column + 1:
error("missing required value %s in hwdef.dat (column %u)" % (name,
column))
ret = config[name][column]
if type is not None:
try:
ret = type(ret)
except Exception:
error("Badly formed config value %s (got %s)" % (name, ret))
return ret
def write_mcu_config(f):
'''write MCU config defines'''
f.write('// MCU type (ChibiOS define)\n')
f.write('#define %s_MCUCONF\n' % get_config('MCU'))
f.write('#define %s\n\n' % get_config('MCU', 1))
f.write('// crystal frequency\n')
f.write('#define STM32_HSECLK %sU\n\n' % get_config('OSCILLATOR_HZ'))
f.write('// UART used for stdout (printf)\n')
f.write('#define HAL_STDOUT_SERIAL %s\n\n' % get_config('STDOUT_SERIAL'))
f.write('// baudrate used for stdout (printf)\n')
f.write('#define HAL_STDOUT_BAUDRATE %u\n\n' % get_config(
'STDOUT_BAUDRATE', type=int))
if 'SDIO' in bytype:
f.write('// SDIO available, enable POSIX filesystem support\n')
f.write('#define USE_POSIX\n\n')
f.write('#define HAL_USE_SDC TRUE\n')
else:
f.write('#define HAL_USE_SDC FALSE\n')
if 'OTG1' in bytype:
f.write('#define STM32_USB_USE_OTG1 TRUE\n')
f.write('#define HAL_USE_USB TRUE\n')
f.write('#define HAL_USE_SERIAL_USB TRUE\n')
if 'OTG2' in bytype:
f.write('#define STM32_USB_USE_OTG2 TRUE\n')
# write any custom STM32 defines
for d in alllines:
if d.startswith('STM32_'):
f.write('#define %s\n' % d)
if d.startswith('define '):
f.write('#define %s\n' % d[7:])
flash_size = get_config('FLASH_SIZE_KB', type=int)
f.write('#define BOARD_FLASH_SIZE %u\n' % flash_size)
f.write('#define CRT1_AREAS_NUMBER 1\n')
if mcu_type in ['STM32F427xx', 'STM32F405xx']:
def_ccm_size = 64
else:
def_ccm_size = None
ccm_size = get_config(
'CCM_RAM_SIZE_KB', default=def_ccm_size, required=False, type=int)
if ccm_size is not None:
f.write('#define CCM_RAM_SIZE %u\n' % ccm_size)
f.write('\n')
def write_ldscript(fname):
'''write ldscript.ld for this board'''
flash_size = get_config('FLASH_SIZE_KB', type=int)
# space to reserve for bootloader and storage at start of flash
flash_reserve_start = get_config(
'FLASH_RESERVE_START_KB', default=16, type=int)
# space to reserve for storage at end of flash
flash_reserve_end = get_config('FLASH_RESERVE_END_KB', default=0, type=int)
# ram size
ram_size = get_config('RAM_SIZE_KB', default=192, type=int)
flash_base = 0x08000000 + flash_reserve_start * 1024
flash_length = flash_size - (flash_reserve_start + flash_reserve_end)
print("Generating ldscript.ld")
f = open(fname, 'w')
f.write('''/* generated ldscript.ld */
MEMORY
{
flash : org = 0x%08x, len = %uK
ram0 : org = 0x20000000, len = %uk
}
INCLUDE common.ld
''' % (flash_base, flash_length, ram_size))
def write_USB_config(f):
'''write USB config defines'''
if not 'OTG1' in bytype:
return;
f.write('// USB configuration\n')
f.write('#define HAL_USB_VENDOR_ID %s\n' % get_config('USB_VENDOR', default=0x0483)) # default to ST
f.write('#define HAL_USB_PRODUCT_ID %s\n' % get_config('USB_PRODUCT', default=0x5740))
f.write('#define HAL_USB_STRING_MANUFACTURER "%s"\n' % get_config("USB_STRING_MANUFACTURER", default="ArduPilot"))
f.write('#define HAL_USB_STRING_PRODUCT "%s"\n' % get_config("USB_STRING_PRODUCT", default="%BOARD%"))
f.write('#define HAL_USB_STRING_SERIAL "%s"\n' % get_config("USB_STRING_SERIAL", default="%SERIAL%"))
f.write('\n\n')
def write_SPI_table(f):
'''write SPI device table'''
f.write('\n// SPI device table\n')
devlist = []
for dev in spidev:
if len(dev) != 7:
print("Badly formed SPIDEV line %s" % dev)
name = '"' + dev[0] + '"'
bus = dev[1]
devid = dev[2]
cs = dev[3]
mode = dev[4]
lowspeed = dev[5]
highspeed = dev[6]
if not bus.startswith('SPI') or not bus in spi_list:
error("Bad SPI bus in SPIDEV line %s" % dev)
if not devid.startswith('DEVID') or not is_int(devid[5:]):
error("Bad DEVID in SPIDEV line %s" % dev)
if not cs in bylabel or not bylabel[cs].is_CS():
error("Bad CS pin in SPIDEV line %s" % dev)
if not mode in ['MODE0', 'MODE1', 'MODE2', 'MODE3']:
error("Bad MODE in SPIDEV line %s" % dev)
if not lowspeed.endswith('*MHZ') and not lowspeed.endswith('*KHZ'):
error("Bad lowspeed value %s in SPIDEV line %s" % (lowspeed, dev))
if not highspeed.endswith('*MHZ') and not highspeed.endswith('*KHZ'):
error("Bad highspeed value %s in SPIDEV line %s" % (highspeed,
dev))
cs_pin = bylabel[cs]
pal_line = 'PAL_LINE(GPIO%s,%uU)' % (cs_pin.port, cs_pin.pin)
devidx = len(devlist)
f.write(
'#define HAL_SPI_DEVICE%-2u SPIDesc(%-17s, %2u, %2u, %-19s, SPIDEV_%s, %7s, %7s)\n'
% (devidx, name, spi_list.index(bus), int(devid[5:]), pal_line,
mode, lowspeed, highspeed))
devlist.append('HAL_SPI_DEVICE%u' % devidx)
f.write('#define HAL_SPI_DEVICE_LIST %s\n\n' % ','.join(devlist))
def write_SPI_config(f):
'''write SPI config defines'''
global spi_list
for t in bytype.keys():
if t.startswith('SPI'):
spi_list.append(t)
spi_list = sorted(spi_list)
if len(spi_list) == 0:
f.write('#define HAL_USE_SPI FALSE\n')
return
devlist = []
for dev in spi_list:
n = int(dev[3:])
devlist.append('HAL_SPI%u_CONFIG' % n)
f.write(
'#define HAL_SPI%u_CONFIG { &SPID%u, %u, STM32_SPI_SPI%u_TX_DMA_STREAM, STM32_SPI_SPI%u_RX_DMA_STREAM }\n'
% (n, n, n, n, n))
f.write('#define HAL_SPI_BUS_LIST %s\n\n' % ','.join(devlist))
write_SPI_table(f)
def write_UART_config(f):
'''write UART config defines'''
get_config('UART_ORDER')
uart_list = config['UART_ORDER']
f.write('\n// UART configuration\n')
# write out driver declarations for HAL_ChibOS_Class.cpp
devnames = "ABCDEFGH"
sdev = 0
for dev in uart_list:
idx = uart_list.index(dev)
if dev == 'EMPTY':
f.write('#define HAL_UART%s_DRIVER Empty::UARTDriver uart%sDriver\n' %
(devnames[idx], devnames[idx]))
else:
f.write(
'#define HAL_UART%s_DRIVER ChibiOS::UARTDriver uart%sDriver(%u)\n'
% (devnames[idx], devnames[idx], sdev))
sdev += 1
for idx in range(len(uart_list), 6):
f.write('#define HAL_UART%s_DRIVER Empty::UARTDriver uart%sDriver\n' %
(devnames[idx], devnames[idx]))
if 'IOMCU_UART' in config:
f.write('#define HAL_WITH_IO_MCU 1\n')
idx = len(uart_list)
f.write('#define HAL_UART_IOMCU_IDX %u\n' % idx)
f.write(
'#define HAL_UART_IO_DRIVER ChibiOS::UARTDriver uart_io(HAL_UART_IOMCU_IDX)\n'
)
uart_list.append(config['IOMCU_UART'][0])
else:
f.write('#define HAL_WITH_IO_MCU 0\n')
f.write('\n')
need_uart_driver = False
devlist = []
for dev in uart_list:
if dev.startswith('UART'):
n = int(dev[4:])
elif dev.startswith('USART'):
n = int(dev[5:])
elif dev.startswith('OTG'):
n = int(dev[3:])
elif dev.startswith('EMPTY'):
continue
else:
error("Invalid element %s in UART_ORDER" % dev)
devlist.append('HAL_%s_CONFIG' % dev)
if dev + "_RTS" in bylabel:
p = bylabel[dev + '_RTS']
rts_line = 'PAL_LINE(GPIO%s,%uU)' % (p.port, p.pin)
else:
rts_line = "0"
if dev.startswith('OTG'):
f.write(
'#define HAL_%s_CONFIG {(BaseSequentialStream*) &SDU1, true, false, 0, 0, false, 0, 0}\n'
% dev)
else:
need_uart_driver = True
f.write(
"#define HAL_%s_CONFIG { (BaseSequentialStream*) &SD%u, false, "
% (dev, n))
f.write("STM32_%s_RX_DMA_CONFIG, STM32_%s_TX_DMA_CONFIG, %s}\n" %
(dev, dev, rts_line))
f.write('#define HAL_UART_DEVICE_LIST %s\n\n' % ','.join(devlist))
if not need_uart_driver:
f.write('#define HAL_USE_SERIAL FALSE\n')
def write_I2C_config(f):
'''write I2C config defines'''
if not 'I2C_ORDER' in config:
error("Missing I2C_ORDER config")
i2c_list = config['I2C_ORDER']
f.write('// I2C configuration\n')
if len(i2c_list) == 0:
f.write('#define HAL_USE_I2C FALSE\n')
return
devlist = []
for dev in i2c_list:
if not dev.startswith('I2C') or dev[3] not in "1234":
error("Bad I2C_ORDER element %s" % dev)
if dev + "_SCL" in bylabel:
p = bylabel[dev + "_SCL"]
f.write(
'#define HAL_%s_SCL_AF %d\n' % (dev, p.af)
)
n = int(dev[3:])
devlist.append('HAL_I2C%u_CONFIG' % n)
f.write(
'#define HAL_I2C%u_CONFIG { &I2CD%u, STM32_I2C_I2C%u_RX_DMA_STREAM, STM32_I2C_I2C%u_TX_DMA_STREAM }\n'
% (n, n, n, n))
f.write('#define HAL_I2C_DEVICE_LIST %s\n\n' % ','.join(devlist))
def write_PWM_config(f):
'''write PWM config defines'''
rc_in = None
alarm = None
pwm_out = []
pwm_timers = []
for l in bylabel.keys():
p = bylabel[l]
if p.type.startswith('TIM'):
if p.has_extra('RCIN'):
rc_in = p
elif p.has_extra('ALARM'):
alarm = p
else:
if p.extra_value('PWM', type=int) is not None:
pwm_out.append(p)
if p.type not in pwm_timers:
pwm_timers.append(p.type)
if rc_in is not None:
a = rc_in.label.split('_')
chan_str = a[1][2:]
timer_str = a[0][3:]
if chan_str[-1] == 'N':
# it is an inverted channel
f.write('#define HAL_RCIN_IS_INVERTED\n')
chan_str = chan_str[:-1]
if not is_int(chan_str) or not is_int(timer_str):
error("Bad timer channel %s" % rc_in.label)
if int(chan_str) not in [1, 2]:
error(
"Bad channel number, only channel 1 and 2 supported for RCIN")
n = int(a[0][3:])
dma_chan_str = rc_in.extra_prefix('DMA_CH')[6:]
dma_chan = int(dma_chan_str)
f.write('// RC input config\n')
f.write('#define HAL_USE_ICU TRUE\n')
f.write('#define STM32_ICU_USE_TIM%u TRUE\n' % n)
f.write('#define RCIN_ICU_TIMER ICUD%u\n' % n)
f.write(
'#define RCIN_ICU_CHANNEL ICU_CHANNEL_%u\n' % int(chan_str))
f.write('#define STM32_RCIN_DMA_CHANNEL %u' % dma_chan)
f.write('\n')
if alarm is not None:
a = alarm.label.split('_')
chan_str = a[1][2:]
timer_str = a[0][3:]
if not is_int(chan_str) or not is_int(timer_str):
error("Bad timer channel %s" % alarm.label)
n = int(timer_str)
f.write('\n')
f.write('// Alarm PWM output config\n')
f.write('#define STM32_PWM_USE_TIM%u TRUE\n' % n)
f.write('#define STM32_TIM%u_SUPPRESS_ISR\n' % n)
chan_mode = [
'PWM_OUTPUT_DISABLED', 'PWM_OUTPUT_DISABLED',
'PWM_OUTPUT_DISABLED', 'PWM_OUTPUT_DISABLED'
]
chan = int(chan_str)
if chan not in [1, 2, 3, 4]:
error("Bad channel number %u for ALARM PWM %s" % (chan, p))
chan_mode[chan - 1] = 'PWM_OUTPUT_ACTIVE_HIGH'
pwm_clock = 1000000
period = 1000
f.write('''#define HAL_PWM_ALARM \\
{ /* pwmGroup */ \\
%u, /* Timer channel */ \\
{ /* PWMConfig */ \\
%u, /* PWM clock frequency. */ \\
%u, /* Initial PWM period 20ms. */ \\
NULL, /* no callback */ \\
{ /* Channel Config */ \\
{%s, NULL}, \\
{%s, NULL}, \\
{%s, NULL}, \\
{%s, NULL} \\
}, \\
0, 0 \\
}, \\
&PWMD%u /* PWMDriver* */ \\
}\n''' %
(chan-1, pwm_clock, period, chan_mode[0],
chan_mode[1], chan_mode[2], chan_mode[3], n))
else:
f.write('\n')
f.write('// No Alarm output pin defined\n')
f.write('#undef HAL_PWM_ALARM\n')
f.write('\n')
f.write('// PWM timer config\n')
for t in sorted(pwm_timers):
n = int(t[3])
f.write('#define STM32_PWM_USE_TIM%u TRUE\n' % n)
f.write('#define STM32_TIM%u_SUPPRESS_ISR\n' % n)
f.write('\n')
f.write('// PWM output config\n')
groups = []
for t in sorted(pwm_timers):
group = len(groups) + 1
n = int(t[3])
chan_list = [255, 255, 255, 255]
chan_mode = [
'PWM_OUTPUT_DISABLED', 'PWM_OUTPUT_DISABLED',
'PWM_OUTPUT_DISABLED', 'PWM_OUTPUT_DISABLED'
]
for p in pwm_out:
if p.type != t:
continue
chan_str = p.label[7]
if not is_int(chan_str):
error("Bad channel for PWM %s" % p)
chan = int(chan_str)
if chan not in [1, 2, 3, 4]:
error("Bad channel number %u for PWM %s" % (chan, p))
pwm = p.extra_value('PWM', type=int)
chan_list[chan - 1] = pwm - 1
chan_mode[chan - 1] = 'PWM_OUTPUT_ACTIVE_HIGH'
groups.append('HAL_PWM_GROUP%u' % group)
if n in [1, 8]:
# only the advanced timers do 8MHz clocks
advanced_timer = 'true'
else:
advanced_timer = 'false'
pwm_clock = 1000000
period = 20000 * pwm_clock / 1000000
f.write('''#define HAL_PWM_GROUP%u { %s, \\
{%u, %u, %u, %u}, \\
/* Group Initial Config */ \\
{ \\
%u, /* PWM clock frequency. */ \\
%u, /* Initial PWM period 20ms. */ \\
NULL, /* no callback */ \\
{ \\
/* Channel Config */ \\
{%s, NULL}, \\
{%s, NULL}, \\
{%s, NULL}, \\
{%s, NULL} \\
}, 0, 0}, &PWMD%u}\n''' %
(group, advanced_timer, chan_list[0], chan_list[1],
chan_list[2], chan_list[3], pwm_clock, period, chan_mode[0],
chan_mode[1], chan_mode[2], chan_mode[3], n))
f.write('#define HAL_PWM_GROUPS %s\n\n' % ','.join(groups))
def write_ADC_config(f):
'''write ADC config defines'''
f.write('// ADC config\n')
adc_chans = []
for l in bylabel:
p = bylabel[l]
if not p.type.startswith('ADC'):
continue
chan = get_ADC1_chan(mcu_type, p.portpin)
scale = p.extra_value('SCALE', default=None)
if p.label == 'VDD_5V_SENS':
f.write('#define ANALOG_VCC_5V_PIN %u\n' % chan)
adc_chans.append((chan, scale, p.label, p.portpin))
adc_chans = sorted(adc_chans)
vdd = get_config('STM32_VDD')
if vdd[-1] == 'U':
vdd = vdd[:-1]
vdd = float(vdd) * 0.01
f.write('#define HAL_ANALOG_PINS { \\\n')
for (chan, scale, label, portpin) in adc_chans:
scale_str = '%.2f/4096' % vdd
if scale is not None and scale != '1':
scale_str = scale + '*' + scale_str
f.write('{ %2u, %12s }, /* %s %s */ \\\n' % (chan, scale_str, portpin,
label))
f.write('}\n\n')
def write_GPIO_config(f):
'''write GPIO config defines'''
f.write('// GPIO config\n')
gpios = []
for l in bylabel:
p = bylabel[l]
gpio = p.extra_value('GPIO', type=int)
if gpio is None:
continue
# see if it is also a PWM pin
pwm = p.extra_value('PWM', type=int, default=0)
port = p.port
pin = p.pin
gpios.append((gpio, pwm, port, pin, p))
gpios = sorted(gpios)
f.write('#define HAL_GPIO_PINS { \\\n')
for (gpio, pwm, port, pin, p) in gpios:
f.write('{ %3u, true, %2u, PAL_LINE(GPIO%s, %2uU) }, /* %s */ \\\n' %
(gpio, pwm, port, pin, p))
# and write #defines for use by config code
f.write('}\n\n')
f.write('// full pin define list\n')
for l in sorted(bylabel.keys()):
p = bylabel[l]
label = p.label
label = label.replace('-', '_')
f.write('#define HAL_GPIO_PIN_%-20s PAL_LINE(GPIO%s,%uU)\n' %
(label, p.port, p.pin))
f.write('\n')
def write_prototype_file():
'''write the prototype file for apj generation'''
pf = open(os.path.join(outdir, "apj.prototype"), "w")
pf.write('''{
"board_id": %s,
"magic": "PX4FWv1",
"description": "Firmware for the %s board",
"image": "",
"build_time": 0,
"summary": "PX4FMUv3",
"version": "0.1",
"image_size": 0,
"git_identity": "",
"board_revision": 0
}
''' % (get_config('APJ_BOARD_ID'),
get_config('APJ_BOARD_TYPE', default=mcu_type)))
def write_peripheral_enable(f):
'''write peripheral enable lines'''
f.write('// peripherals enabled\n')
for type in sorted(bytype.keys()):
if type.startswith('USART') or type.startswith('UART'):
f.write('#define STM32_SERIAL_USE_%-6s TRUE\n' % type)
if type.startswith('SPI'):
f.write('#define STM32_SPI_USE_%s TRUE\n' % type)
if type.startswith('OTG'):
f.write('#define STM32_USB_USE_%s TRUE\n' % type)
if type.startswith('I2C'):
f.write('#define STM32_I2C_USE_%s TRUE\n' % type)
def get_dma_exclude(periph_list):
'''return list of DMA devices to exclude from DMA'''
dma_exclude = []
for periph in periph_list:
if periph not in bylabel:
continue
p = bylabel[periph]
if p.has_extra('NODMA'):
dma_exclude.append(periph)
return dma_exclude
def write_hwdef_header(outfilename):
'''write hwdef header file'''
print("Writing hwdef setup in %s" % outfilename)
f = open(outfilename, 'w')
f.write('''/*
generated hardware definitions from hwdef.dat - DO NOT EDIT
*/
#pragma once
''')
write_mcu_config(f)
write_USB_config(f)
write_I2C_config(f)
write_SPI_config(f)
write_PWM_config(f)
write_ADC_config(f)
write_GPIO_config(f)
write_peripheral_enable(f)
write_prototype_file()
dma_resolver.write_dma_header(f, periph_list, mcu_type,
dma_exclude=get_dma_exclude(periph_list),
dma_priority=get_config('DMA_PRIORITY',default=''),
dma_noshare=get_config('DMA_NOSHARE',default=''))
write_UART_config(f)
f.write('''
/*
* I/O ports initial setup, this configuration is established soon after reset
* in the initialization code.
* Please refer to the STM32 Reference Manual for details.
*/
#define PIN_MODE_INPUT(n) (0U << ((n) * 2U))
#define PIN_MODE_OUTPUT(n) (1U << ((n) * 2U))
#define PIN_MODE_ALTERNATE(n) (2U << ((n) * 2U))
#define PIN_MODE_ANALOG(n) (3U << ((n) * 2U))
#define PIN_ODR_LOW(n) (0U << (n))
#define PIN_ODR_HIGH(n) (1U << (n))
#define PIN_OTYPE_PUSHPULL(n) (0U << (n))
#define PIN_OTYPE_OPENDRAIN(n) (1U << (n))
#define PIN_OSPEED_VERYLOW(n) (0U << ((n) * 2U))
#define PIN_OSPEED_LOW(n) (1U << ((n) * 2U))
#define PIN_OSPEED_MEDIUM(n) (2U << ((n) * 2U))
#define PIN_OSPEED_HIGH(n) (3U << ((n) * 2U))
#define PIN_PUPDR_FLOATING(n) (0U << ((n) * 2U))
#define PIN_PUPDR_PULLUP(n) (1U << ((n) * 2U))
#define PIN_PUPDR_PULLDOWN(n) (2U << ((n) * 2U))
#define PIN_AFIO_AF(n, v) ((v) << (((n) % 8U) * 4U))
''')
for port in sorted(ports):
f.write("/* PORT%s:\n" % port)
for pin in range(pincount[port]):
p = portmap[port][pin]
if p.label is not None:
f.write(" %s\n" % p)
f.write("*/\n\n")
if pincount[port] == 0:
# handle blank ports
for vtype in vtypes:
f.write("#define VAL_GPIO%s_%-7s 0x0\n" % (port,
vtype))
f.write("\n\n\n")
continue
for vtype in vtypes:
f.write("#define VAL_GPIO%s_%-7s (" % (p.port, vtype))
first = True
for pin in range(pincount[port]):
p = portmap[port][pin]
modefunc = getattr(p, "get_" + vtype)
v = modefunc()
if v is None:
continue
if not first:
f.write(" | \\\n ")
f.write(v)
first = False
if first:
# there were no pin definitions, use 0
f.write("0")
f.write(")\n\n")
def build_peripheral_list():
'''build a list of peripherals for DMA resolver to work on'''
peripherals = []
done = set()
prefixes = ['SPI', 'USART', 'UART', 'I2C']
for p in allpins:
type = p.type
if type in done:
continue
for prefix in prefixes:
if type.startswith(prefix):
peripherals.append(type + "_TX")
peripherals.append(type + "_RX")
if type.startswith('ADC'):
peripherals.append(type)
if type.startswith('SDIO'):
peripherals.append(type)
if type.startswith('TIM') and p.has_extra('RCIN'):
label = p.label
if label[-1] == 'N':
label = label[:-1]
peripherals.append(label)
done.add(type)
return peripherals
def process_line(line):
'''process one line of pin definition file'''
global allpins
a = shlex.split(line)
# keep all config lines for later use
alllines.append(line)
if a[0].startswith('P') and a[0][1] in ports and a[0] in config:
print("WARNING: Pin %s redefined" % a[0])
config[a[0]] = a[1:]
if a[0] == 'MCU':
global mcu_type
mcu_type = a[2]
if a[0].startswith('P') and a[0][1] in ports:
# it is a port/pin definition
try:
port = a[0][1]
pin = int(a[0][2:])
label = a[1]
type = a[2]
extra = a[3:]
except Exception:
error("Bad pin line: %s" % a)
return
p = generic_pin(port, pin, label, type, extra)
portmap[port][pin] = p
allpins.append(p)
if not type in bytype:
bytype[type] = []
bytype[type].append(p)
bylabel[label] = p
af = get_alt_function(mcu_type, a[0], label)
if af is not None:
p.af = af
if a[0] == 'SPIDEV':
spidev.append(a[1:])
if a[0] == 'undef':
print("Removing %s" % a[1])
config.pop(a[1], '')
bytype.pop(a[1],'')
bylabel.pop(a[1],'')
#also remove all occurences of defines in previous lines if any
for line in alllines[:]:
if line.startswith('define') and a[1] in line:
alllines.remove(line)
newpins = []
for pin in allpins:
if pin.type == a[1]:
continue
if pin.label == a[1]:
continue
if pin.portpin == a[1]:
continue
newpins.append(pin)
allpins = newpins
def process_file(filename):
'''process a hwdef.dat file'''
try:
f = open(filename, "r")
except Exception:
error("Unable to open file %s" % filename)
for line in f.readlines():
line = line.strip()
if len(line) == 0 or line[0] == '#':
continue
a = shlex.split(line)
if a[0] == "include" and len(a) > 1:
include_file = a[1]
if include_file[0] != '/':
dir = os.path.dirname(filename)
include_file = os.path.normpath(
os.path.join(dir, include_file))
print("Including %s" % include_file)
process_file(include_file)
else:
process_line(line)
# process input file
process_file(args.hwdef)
outdir = args.outdir
if outdir is None:
outdir = '/tmp'
if not "MCU" in config:
error("Missing MCU type in config")
mcu_type = get_config('MCU', 1)
print("Setup for MCU %s" % mcu_type)
# build a list for peripherals for DMA resolver
periph_list = build_peripheral_list()
# write out hwdef.h
write_hwdef_header(os.path.join(outdir, "hwdef.h"))
# write out ldscript.ld
write_ldscript(os.path.join(outdir, "ldscript.ld"))
|
dgrat/ardupilot
|
libraries/AP_HAL_ChibiOS/hwdef/scripts/chibios_hwdef.py
|
Python
|
gpl-3.0
| 32,533
|
[
"CRYSTAL"
] |
16e129c3932074f192c777ea47e955e4200b95f4a0f43660c8ab803312aab1da
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from rauth import OAuth2Service
print '\n\n##############################################################'
print '################## PYTHON MONEYBIRD OAUTH2 ###################'
print '##############################################################'
print(
"\nBefore you can use OAuth2 in our API, you need to register\n"
"your application with MoneyBird. Registration allows us to\n"
"see which application is authenticating and who is the\n"
"owner of the application. Registration is a one-time event\n"
"and can be done by logging in to your MoneyBird account and\n"
"visit the page:\n\n"
"https://moneybird.com/user/applications/new.\n\n"
"After registration you will receive a Client ID\n"
"and Client Secret. You will use these tokens to identify\n"
"your application when requesting access for users.\n"
)
print '##############################################################'
print '################## CLIENT ID & CLIENT SECRET #################'
print '##############################################################\n'
client_id = raw_input("Paste Client ID: ")
client_secret = raw_input("Paste Client Secret: ")
print '\n##############################################################'
print '################## CALLBACK/REDIRECT URL ####################'
print '##############################################################\n'
redirect_uri = raw_input(
"Enter Callback URL: [http://localhost/callback]:") or "http://localhost/callback"
print '\n##############################################################'
print '################## DEFINE ACCESS TO SCOPES ###################'
print '##############################################################\n'
moneybird = OAuth2Service(
client_id=client_id,
client_secret=client_secret,
name='moneybird',
authorize_url='https://moneybird.com/oauth/authorize',
access_token_url='https://moneybird.com/oauth/token',
base_url='https://moneybird.com')
sales_invoices = raw_input("Access to Sales Invoices?: Y/N [Y]:") or "Y"
documents = raw_input("Access to Documents?: Y/N [Y]:") or "Y"
estimates = raw_input("Access to Estimates?: Y/N [Y]:") or "Y"
bank = raw_input("Access to Bank?: Y/N [Y]:") or "Y"
settings = raw_input("Access to Settings?: Y/N [Y]:") or "Y"
if sales_invoices is "Y":
sc_sal = 'sales_invoices '
else:
sc_sal = ''
if documents is "Y":
sc_doc = 'documents '
else:
sc_doc = ''
if estimates is "Y":
sc_est = 'estimates '
else:
sc_est = ''
if bank is "Y":
sc_ban = 'bank '
else:
sc_ban = ''
if settings is "Y":
sc_set = 'settings '
else:
sc_set = ''
scopes = sc_sal + sc_doc + sc_est + sc_ban + sc_set
params = {'scope': scopes,
'response_type': 'code',
'redirect_uri': redirect_uri}
url = moneybird.get_authorize_url(**params)
print '\n##############################################################'
print '################## AUTHORIZE APPLICATION #####################'
print '##############################################################\n'
print 'Paste the following URL in your browser and authorize the request:\n\n' + url + '\n'
print '##############################################################'
print '################## COPY CODE FROM BROWSER ####################'
print '##############################################################\n'
print 'In your browser you will now find the code in the url after:\n'
print '%s?code=\n' % redirect_uri
print '##############################################################'
print '################## OBTAIN ACCESS TOKEN #######################'
print '##############################################################\n'
data = {'code': raw_input('Paste your code here: '),
'grant_type': 'authorization_code',
'redirect_uri': redirect_uri}
response = moneybird.get_raw_access_token(data=data)
response = response.json()
print '\n##############################################################'
print '################## YOUR ACCESS TOKEN! ########################'
print '##############################################################\n'
print 'Your Access Token is:\n\n' + response.get('access_token') + '\n'
print '##############################################################'
print '################## GOOD LUCK! ################################'
print '##############################################################\n'
|
sanderkwantes/moneybird-python-api
|
tools/get_access_token.py
|
Python
|
mit
| 4,463
|
[
"VisIt"
] |
9658a5d54787107d6082e15f71eb0d3206adb37493b45eaf7d1c816f114489ea
|
import numpy as np
import random
from scipy.ndimage import zoom
from skimage.util import view_as_blocks
import tensorflow as tf
import sys
sys.path.append('../..')
from util import load_images, load_regions
def _float_feature(value):
return tf.train.Feature(float_list=tf.train.FloatList(value=value))
def _int64_feature(value):
return tf.train.Feature(int64_list=tf.train.Int64List(value=value))
def _bytes_feature(value):
return tf.train.Feature(bytes_list=tf.train.BytesList(value=value))
def write_examples(features, labels, output_path):
writer = tf.python_io.TFRecordWriter(output_path)
for feature, label in zip(features, labels):
example = tf.train.Example(features=tf.train.Features(
feature={
'feature': _float_feature(feature),
'label': _int64_feature([label]),
}
))
writer.write(example.SerializeToString())
writer.close()
def generate_simple_dataset(num_samples, num_positives, input_size, num_steps, output_path):
"""
num_samples: number of "patches"
num_positives: number of positive examples
input_size: size of one feature vector from one "frame"
num_steps: number of "frames" to concatenate
"""
dataset = np.zeros([num_samples, input_size * num_steps], dtype=float)
positive_class_indices = random.sample(xrange(num_samples), num_positives)
labels = np.zeros(num_samples, dtype=int)
labels[positive_class_indices] = 1
# Make one index of the positive classes 1
feature_indices = range(input_size * num_steps)
for index in positive_class_indices:
flag_index = random.choice(feature_indices)
dataset[index, flag_index] = 1
# sanity check
print "Number of non-zeros in dataset: %d" % (np.nonzero(dataset)[0].shape[0],)
print "Number of non-zeros in labels: %d" % (np.nonzero(labels)[0].shape[0],)
print "Dataset and labels make sense: %s" % (np.all(np.nonzero(dataset)[0] == np.nonzero(labels)[0]),)
write_examples(dataset, labels, output_path)
def create_patch(region, images, buffer, output_shape):
image_dims = images[0].shape
center_y, center_x = region.center
min_y, min_x, max_y, max_x = region.bbox
min_y = max(0, min_y - buffer)
max_y = min(image_dims[0], max_y + buffer)
min_x = max(0, min_x - buffer)
max_x = min(image_dims[1], max_x + buffer)
# it is nice to have square templates.
if center_x - min_x < max_x - center_x:
bbox_radius_w = center_x - min_x
else:
bbox_radius_w = max_x - center_x
if center_y - min_y < max_y - center_y:
bbox_radius_h = center_y - min_y
else:
bbox_radius_h = max_y - center_y
if bbox_radius_w < bbox_radius_h:
bbox_radius = bbox_radius_w
else:
bbox_radius = bbox_radius_h
x1 = center_x - bbox_radius
x2 = center_x + bbox_radius
y1 = center_y - bbox_radius
y2 = center_y + bbox_radius
# Lets try to make the diameter odd while we are at it
y2 = min(image_dims[0], y2 + 1)
x2 = min(image_dims[1], x2 + 1)
patch = images[:,y1:y2,x1:x2]
scaled_patch = zoom(patch, (1, float(output_shape[0]) / patch.shape[1] , float(output_shape[1]) / patch.shape[2]))
return scaled_patch
def generate_dataset(neurofinder_dataset_path, patch_dims, buffer=1, max_number_of_negatives=500, output_path=None):
"""
neurofinder_dataset_path : path to a neurofinder dataset directory
patch_dims : (height, width) for the desired patch size
buffer : buffer to increase the size of the patch around a neuron
max_number_of_negatives : the maximum number of negative patches to save
output_path : location to save the tfrecords file (or None if you don't want to save a tfrecords file)
"""
images = load_images(neurofinder_dataset_path)
regions = load_regions(neurofinder_dataset_path)
# convert the images to floats and normalize them.
images = images.astype(float)
images /= np.max(images)
assert np.min(images) == 0.
assert np.max(images) == 1.
# we'll make a fixed sized patch around each neuron
positive_patches = []
for region in regions:
patch = create_patch(region, images, buffer=buffer, output_shape=patch_dims)
positive_patches.append(patch)
# for the negative patches, lets consider the patches that don't contain a neuron
# does the regional code base have a test for containment?
negative_patches = []
# create an image with 1s over the neurons and 0s else where
masks = regions.mask(dims=images[0].shape, stroke='red', fill='red', base=np.zeros(images[0].shape))
masks = (masks[:,:,0] > 0) + 0
video_height, video_width = images[0].shape
patch_height, patch_width = patch_dims
patch_area = float(patch_height * patch_width)
h_stride, w_stride = (9, 9)
for h in range(0,video_height-patch_height+1,h_stride):
for w in range(0,video_width-patch_width+1,w_stride):
p = masks[h:h+patch_height, w:w+patch_width]
# make sure that neurons don't cover a significant portion of the patch
if np.sum(p) / patch_area > .4:
continue
# make sure that a neuron is not in the middle of the patch
if np.sum(p[patch_height / 2 - 1 : patch_height / 2 + 1, patch_width / 2 - 1 : patch_width / 2 + 1]) > 0:
continue
# Good to go
negative_patches.append(images[:, h:h+patch_height, w:w+patch_width])
print "Found %d total negative patches." % (len(negative_patches),)
p_patches = [p.ravel() for p in positive_patches]
n_patches = [p.ravel() for p in negative_patches]
random.shuffle(n_patches)
n_patches = n_patches[:max_number_of_negatives]
training_set = [(p, 1) for p in p_patches] + [(p, 0) for p in n_patches]
random.shuffle(training_set)
features = [d[0] for d in training_set]
labels = [d[1] for d in training_set]
print "Number of positive patches: %d" % (len(p_patches),)
print "Number of negatve patches: %d" % (len(n_patches),)
print "Number of frames: %d" % (images.shape[0],)
print "Patch Dims: %s" % (patch_dims,)
print "Feature size: %s" % (p_patches[0].shape,)
if output_path != None:
write_examples(features, labels, output_path)
return features, labels
|
gvanhorn38/active_neurofinder
|
baselearners/tf/generate_inputs.py
|
Python
|
mit
| 6,259
|
[
"NEURON"
] |
22bfb0671692b47ddd6caec8ec9fd5594eb9f15de394be0fc370805efc8a80db
|
# Orca
#
# Copyright 2012 Igalia, S.L.
#
# Author: Joanmarie Diggs <[email protected]>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc., Franklin Street, Fifth Floor,
# Boston MA 02110-1301 USA.
"""Displays a GUI for Orca navigation list dialogs"""
__id__ = "$Id$"
__version__ = "$Revision$"
__date__ = "$Date$"
__copyright__ = "Copyright (c) 2012 Igalia, S.L."
__license__ = "LGPL"
from gi.repository import GObject, Gdk, Gtk
from . import debug
from . import guilabels
from . import orca_state
class OrcaNavListGUI:
def __init__(self, title, columnHeaders, rows, selectedRow):
self._tree = None
self._activateButton = None
self._gui = self._createNavListDialog(columnHeaders, rows, selectedRow)
self._gui.set_title(title)
self._gui.set_modal(True)
self._script = orca_state.activeScript
self.showGUI()
def _createNavListDialog(self, columnHeaders, rows, selectedRow):
dialog = Gtk.Dialog()
dialog.set_default_size(500, 400)
grid = Gtk.Grid()
contentArea = dialog.get_content_area()
contentArea.add(grid)
scrolledWindow = Gtk.ScrolledWindow()
grid.add(scrolledWindow)
self._tree = Gtk.TreeView()
self._tree.set_hexpand(True)
self._tree.set_vexpand(True)
scrolledWindow.add(self._tree)
cols = [GObject.TYPE_OBJECT, GObject.TYPE_INT]
cols.extend(len(columnHeaders) * [GObject.TYPE_STRING])
model = Gtk.ListStore(*cols)
cell = Gtk.CellRendererText()
column = Gtk.TreeViewColumn("Accessible", cell, text=0)
column.set_visible(False)
self._tree.append_column(column)
cell = Gtk.CellRendererText()
column = Gtk.TreeViewColumn("offset", cell, text=1)
column.set_visible(False)
self._tree.append_column(column)
for i, header in enumerate(columnHeaders):
cell = Gtk.CellRendererText()
column = Gtk.TreeViewColumn(header, cell, text=i+2)
column.set_sort_column_id(i+2)
self._tree.append_column(column)
for row in rows:
rowIter = model.append(None)
for i, cell in enumerate(row):
model.set_value(rowIter, i, cell)
self._tree.set_model(model)
selection = self._tree.get_selection()
selection.select_path(selectedRow)
btn = dialog.add_button(guilabels.BTN_CANCEL, Gtk.ResponseType.CANCEL)
btn.connect('clicked', self._onCancelClicked)
btn = dialog.add_button(guilabels.BTN_JUMP_TO, Gtk.ResponseType.APPLY)
btn.grab_default()
btn.connect('clicked', self._onJumpToClicked)
self._activateButton = dialog.add_button(
guilabels.ACTIVATE, Gtk.ResponseType.OK)
self._activateButton.connect('clicked', self._onActivateClicked)
self._tree.connect('key-release-event', self._onKeyRelease)
self._tree.connect('cursor-changed', self._onCursorChanged)
self._tree.set_search_column(2)
return dialog
def showGUI(self):
self._gui.show_all()
ts = orca_state.lastInputEvent.timestamp
if ts == 0:
ts = Gtk.get_current_event_time()
self._gui.present_with_time(ts)
def _onCursorChanged(self, widget):
obj, offset = self._getSelectedAccessibleAndOffset()
try:
action = obj.queryAction()
except:
self._activateButton.set_sensitive(False)
else:
self._activateButton.set_sensitive(action.get_nActions() > 0)
def _onKeyRelease(self, widget, event):
keycode = event.hardware_keycode
keymap = Gdk.Keymap.get_default()
entries_for_keycode = keymap.get_entries_for_keycode(keycode)
entries = entries_for_keycode[-1]
eventString = Gdk.keyval_name(entries[0])
if eventString == 'Return':
self._gui.activate_default()
def _onCancelClicked(self, widget):
self._gui.destroy()
def _onJumpToClicked(self, widget):
obj, offset = self._getSelectedAccessibleAndOffset()
self._gui.destroy()
self._script.utilities.setCaretPosition(obj, offset)
def _onActivateClicked(self, widget):
obj, offset = self._getSelectedAccessibleAndOffset()
self._gui.destroy()
self._script.utilities.setCaretPosition(obj, offset)
try:
action = obj.queryAction()
except:
debug.println(
debug.LEVEL_FINE, 'Could not perform action on %s' % obj)
else:
action.doAction(0)
def _getSelectedAccessibleAndOffset(self):
if not self._tree:
return None
selection = self._tree.get_selection()
if not selection:
return None
model, paths = selection.get_selected_rows()
if not paths:
return None
obj = model.get_value(model.get_iter(paths[0]), 0)
offset = model.get_value(model.get_iter(paths[0]), 1)
return obj, max(0, offset)
def showUI(title='', columnHeaders=[], rows=[()], selectedRow=0):
gui = OrcaNavListGUI(title, columnHeaders, rows, selectedRow)
gui.showGUI()
|
chrys87/orca-beep
|
src/orca/orca_gui_navlist.py
|
Python
|
lgpl-2.1
| 5,857
|
[
"ORCA"
] |
de38758f87eb049bd84a14c615c6c0a363ccf5d06a7701dc05ea347b4ae17e55
|
"""
Methods for interpolating data from structured data sets on Thetis fields.
Simple example of an atmospheric pressure interpolator:
.. code-block:: python
def to_latlon(x, y, positive_lon=False):
# Converts mesh (x,y) points to coordinates used in the atm data
lon, lat = coordsys_spcs.spcs2lonlat(x, y)
if positive_lon and lon < 0.0:
lon += 360.
return lat, lon
class WRFInterpolator(object):
# Interpolates WRF atmospheric model data on 2D fields
def __init__(self, function_space, atm_pressure_field, ncfile_pattern, init_date):
self.atm_pressure_field = atm_pressure_field
# object that interpolates forcing data from structured grid on the local mesh
self.grid_interpolator = NetCDFLatLonInterpolator2d(function_space, to_latlon)
# reader object that can read fields from netCDF files, applies spatial interpolation
self.reader = NetCDFSpatialInterpolator(self.grid_interpolator, ['prmsl'])
# object that can find previous/next time stamps in a collection of netCDF files
self.timesearch_obj = NetCDFTimeSearch(ncfile_pattern, init_date, NetCDFTimeParser)
# finally a linear intepolator class that performs linar interpolation in time
self.interpolator = LinearTimeInterpolator(self.timesearch_obj, self.reader)
def set_fields(self, time):
# Evaluates forcing fields at the given time
pressure = self.interpolator(time)
self.atm_pressure_field.dat.data_with_halos[:] = pressure
Usage:
.. code-block:: python
atm_pressure_2d = Function(solver_obj.function_spaces.P1_2d, name='atm pressure')
wrf_pattern = 'forcings/atm/wrf/wrf_air.2016_*_*.nc'
wrf_atm = WRFInterpolator(
solver_obj.function_spaces.P1_2d,
wind_stress_2d, atm_pressure_2d, wrf_pattern, init_date)
simulation_time = 3600.
wrf_atm.set_fields(simulation_time)
"""
import glob
import os
from .timezone import *
from .log import *
import numpy as np
import scipy.spatial.qhull as qhull
import netCDF4
from abc import ABCMeta, abstractmethod
from firedrake import *
import re
import string
TIMESEARCH_TOL = 1e-6
class GridInterpolator(object):
"""
A reuseable griddata interpolator object.
Usage:
.. code-block:: python
interpolator = GridInterpolator(source_xyz, target_xyz)
vals = interpolator(source_data)
Example:
.. code-block:: python
x0 = np.linspace(0, 10, 10)
y0 = np.linspace(5, 10, 10)
X, Y = np.meshgrid(x, y)
x = X.ravel(); y = Y.ravel()
data = x + 25.*y
x_target = np.linspace(1, 10, 20)
y_target = np.linspace(5, 10, 20)
interpolator = GridInterpolator(np.vstack((x, y)).T, np.vstack((target_x, target_y)).T)
vals = interpolator(data)
Based on
http://stackoverflow.com/questions/20915502/speedup-scipy-griddata-for-multiple-interpolations-between-two-irregular-grids
"""
def __init__(self, grid_xyz, target_xyz, fill_mode=None, fill_value=np.nan,
normalize=False, dont_raise=False):
"""
:arg grid_xyz: Array of source grid coordinates, shape (npoints, 2) or
(npoints, 3)
:arg target_xyz: Array of target grid coordinates, shape (n, 2) or
(n, 3)
:kwarg fill_mode: Determines how points outside the source grid will be
treated. If 'nearest', value of the nearest source point will be
used. Otherwise a constant fill value will be used (default).
:kwarg float fill_value: Set the fill value (default: NaN)
:kwarg bool normalize: If true the data is scaled to unit cube before
interpolation. Default: False.
:kwarg bool dont_raise: Do not raise a Qhull error if triangulation
fails. In this case the data will be set to fill value or nearest
neighbor value.
"""
self.fill_value = fill_value
self.fill_mode = fill_mode
self.normalize = normalize
self.fill_nearest = self.fill_mode == 'nearest'
self.shape = (target_xyz.shape[0], )
ngrid_points = grid_xyz.shape[0]
if self.fill_nearest:
assert ngrid_points > 0, 'at least one source point is needed'
if self.normalize:
def get_norm_params(x, scale=None):
min = x.min()
max = x.max()
if scale is None:
scale = max - min
a = 1./scale
b = -min*a
return a, b
ax, bx = get_norm_params(target_xyz[:, 0])
ay, by = get_norm_params(target_xyz[:, 1])
az, bz = get_norm_params(target_xyz[:, 2])
self.norm_a = np.array([ax, ay, az])
self.norm_b = np.array([bx, by, bz])
ngrid_xyz = self.norm_a*grid_xyz + self.norm_b
ntarget_xyz = self.norm_a*target_xyz + self.norm_b
else:
ngrid_xyz = grid_xyz
ntarget_xyz = target_xyz
self.cannot_interpolate = False
try:
d = ngrid_xyz.shape[1]
tri = qhull.Delaunay(ngrid_xyz)
# NOTE this becomes expensive in 3D for npoints > 10k
simplex = tri.find_simplex(ntarget_xyz)
vertices = np.take(tri.simplices, simplex, axis=0)
temp = np.take(tri.transform, simplex, axis=0)
delta = ntarget_xyz - temp[:, d]
bary = np.einsum('njk,nk->nj', temp[:, :d, :], delta)
self.vtx = vertices
self.wts = np.hstack((bary, 1 - bary.sum(axis=1, keepdims=True)))
self.outside = np.any(~np.isfinite(self.wts), axis=1)
self.outside += np.any(self.wts < 0, axis=1)
self.outside = np.nonzero(self.outside)[0]
self.fill_nearest *= len(self.outside) > 0
if self.fill_nearest:
# find nearest neighbor in the data set
from scipy.spatial import cKDTree
dist, ix = cKDTree(ngrid_xyz).query(ntarget_xyz[self.outside])
self.outside_to_nearest = ix
except qhull.QhullError as e:
if not dont_raise:
raise e
self.cannot_interpolate = True
if self.fill_nearest:
# find nearest neighbor in the data set
from scipy.spatial import cKDTree
dist, ix = cKDTree(ngrid_xyz).query(ntarget_xyz)
self.outside_to_nearest = ix
def __call__(self, values):
"""
Interpolate values defined on grid_xyz to target_xyz.
:arg values: Array of source values to interpolate, shape (npoints, )
:kwarg float fill_value: Fill value to use outside the source grid (default: NaN)
"""
if self.cannot_interpolate:
if self.fill_nearest:
ret = values[self.outside_to_nearest]
else:
ret = np.ones(self.shape)*self.fill_value
else:
ret = np.einsum('nj,nj->n', np.take(values, self.vtx), self.wts)
if self.fill_nearest:
ret[self.outside] = values[self.outside_to_nearest]
else:
ret[self.outside] = self.fill_value
return ret
class FileTreeReader(object):
"""
Abstract base class of file tree reader object
"""
@abstractmethod
def __call__(self, filename, time_index):
"""
Reads a data for one time step from the file
:arg str filename: a filename where to find the data (e.g. filename)
:arg int time_index: time index to read
:return: a list of floats or numpy.array_like objects
"""
pass
class NetCDFTimeSeriesReader(FileTreeReader):
"""
A simple netCDF reader that returns a time slice of the given variable.
This class does not interpolate the data in any way. Useful for
interpolating time series.
"""
def __init__(self, variable_list, time_variable_name='time'):
self.variable_list = variable_list
self.time_variable_name = time_variable_name
self.time_dim = None
self.ndims = None
def _detect_time_dim(self, ncfile):
assert self.time_variable_name in ncfile.dimensions
nc_var = ncfile[self.variable_list[0]]
assert self.time_variable_name in nc_var.dimensions
self.time_dim = nc_var.dimensions.index(self.time_variable_name)
self.ndims = len(nc_var.dimensions)
def _get_slice(self, time_index):
"""
Returns a slice object that extracts a single time index
"""
if self.ndims == 1:
return time_index
slice_list = [slice(None, None, None)]*self.ndims
slice_list[self.time_dim] = slice(time_index, time_index+1, None)
return slice_list
def __call__(self, filename, time_index):
"""
Reads a time_index from the data base
:arg str filename: netcdf file where to find the data
:arg int time_index: time index to read
:return: a float or numpy.array_like value
"""
assert os.path.isfile(filename), 'File not found: {:}'.format(filename)
with netCDF4.Dataset(filename) as ncfile:
if self.time_dim is None:
self._detect_time_dim(ncfile)
output = []
for var in self.variable_list:
values = ncfile[var][self._get_slice(time_index)]
output.append(values)
return output
def _get_subset_nodes(grid_x, grid_y, target_x, target_y):
"""
Retuns grid nodes that are necessary for intepolating onto target_x,y
"""
orig_shape = grid_x.shape
grid_xy = np.array((grid_x.ravel(), grid_y.ravel())).T
target_xy = np.array((target_x.ravel(), target_y.ravel())).T
tri = qhull.Delaunay(grid_xy)
simplex = tri.find_simplex(target_xy)
vertices = np.take(tri.simplices, simplex, axis=0)
nodes = np.unique(vertices.ravel())
nodes_x, nodes_y = np.unravel_index(nodes, orig_shape)
# x and y bounds for reading a subset of the netcdf data
ind_x = slice(nodes_x.min(), nodes_x.max() + 1)
ind_y = slice(nodes_y.min(), nodes_y.max() + 1)
return nodes, ind_x, ind_y
class SpatialInterpolator():
"""
Abstract base class for spatial interpolators that read data from disk
"""
__metaclass__ = ABCMeta
@abstractmethod
def __init__(self, function_space, to_latlon):
"""
:arg function_space: target Firedrake FunctionSpace
:arg to_latlon: Python function that converts local mesh coordinates to
latitude and longitude: 'lat, lon = to_latlon(x, y)'
"""
pass
@abstractmethod
def interpolate(self, filename, variable_list, itime):
"""
Interpolates data from the given file at given time step
"""
pass
class SpatialInterpolator2d(SpatialInterpolator):
"""
Abstract spatial interpolator class that can interpolate onto a 2D Function
"""
__metaclass__ = ABCMeta
def __init__(self, function_space, to_latlon):
"""
:arg function_space: target Firedrake FunctionSpace
:arg to_latlon: Python function that converts local mesh coordinates to
latitude and longitude: 'lat, lon = to_latlon(x, y)'
"""
assert function_space.ufl_element().value_shape() == ()
# construct local coordinates
x, y = SpatialCoordinate(function_space.mesh())
fsx = Function(function_space).interpolate(x).dat.data_with_halos
fsy = Function(function_space).interpolate(y).dat.data_with_halos
mesh_lonlat = []
for node in range(len(fsx)):
lat, lon = to_latlon(fsx[node], fsy[node])
mesh_lonlat.append((lon, lat))
self.mesh_lonlat = np.array(mesh_lonlat)
self._initialized = False
def _create_interpolator(self, lat_array, lon_array):
"""
Create compact interpolator by finding the minimal necessary support
"""
self.nodes, self.ind_lon, self.ind_lat = _get_subset_nodes(
lon_array,
lat_array,
self.mesh_lonlat[:, 0],
self.mesh_lonlat[:, 1]
)
subset_lat = lat_array[self.ind_lon, self.ind_lat].ravel()
subset_lon = lon_array[self.ind_lon, self.ind_lat].ravel()
subset_lonlat = np.array((subset_lon, subset_lat)).T
self.grid_interpolator = GridInterpolator(subset_lonlat, self.mesh_lonlat)
self._initialized = True
# debug: plot subsets
# import matplotlib.pyplot as plt
# plt.plot(grid_lon_full, grid_lat_full, 'k.')
# plt.plot(grid_lonlat[:, 0], grid_lonlat[:, 1], 'b.')
# plt.plot(self.mesh_lonlat[:, 0], self.mesh_lonlat[:, 1], 'r.')
# plt.show()
@abstractmethod
def interpolate(self, filename, variable_list, time):
"""
Calls the interpolator object
"""
pass
class NetCDFLatLonInterpolator2d(SpatialInterpolator2d):
"""
Interpolates netCDF data on a local 2D unstructured mesh
The intepolator is constructed for a single netCDF file that defines the
source grid. Once the interpolator has been constructed, data can be read
from any file that uses the same grid.
This routine returns the data in numpy arrays.
Usage:
.. code-block:: python
fs = FunctionSpace(...)
myfunc = Function(fs, ...)
ncinterp2d = NetCDFLatLonInterpolator2d(fs, to_latlon, nc_filename)
val1, val2 = ncinterp2d.interpolate(nc_filename, ['var1', 'var2'], 10)
myfunc.dat.data_with_halos[:] = val1 + val2
"""
def interpolate(self, nc_filename, variable_list, itime):
"""
Interpolates data from a netCDF file onto Firedrake function space.
:arg str nc_filename: netCDF file to read
:arg variable_list: list of netCDF variable names to read
:arg int itime: time index to read
:returns: list of numpy.arrays corresponding to variable_list
"""
with netCDF4.Dataset(nc_filename, 'r') as ncfile:
if not self._initialized:
grid_lat = ncfile['lat'][:]
grid_lon = ncfile['lon'][:]
self._create_interpolator(grid_lat, grid_lon)
output = []
for var in variable_list:
assert var in ncfile.variables
# TODO generalize data dimensions, sniff from netcdf file
grid_data = ncfile[var][itime, self.ind_lon, self.ind_lat].ravel()
data = self.grid_interpolator(grid_data)
output.append(data)
return output
class NetCDFSpatialInterpolator(FileTreeReader):
"""
Wrapper class that provides FileTreeReader API for grid interpolators
"""
def __init__(self, grid_interpolator, variable_list):
self.grid_interpolator = grid_interpolator
self.variable_list = variable_list
def __call__(self, filename, time_index):
return self.grid_interpolator.interpolate(filename, self.variable_list, time_index)
class TimeParser(object):
"""
Abstract base class for time definition objects.
Defines the time span that a file (or data set) covers and provides a time
index search routine.
"""
@abstractmethod
def get_start_time(self):
"""Returns the first time stamp in the file/data set"""
pass
@abstractmethod
def get_end_time(self):
"""Returns the last time stamp in the file/data set"""
pass
@abstractmethod
def find_time_stamp(self, t, previous=False):
"""
Given time t, returns index of the next (previous) time stamp
raises IndexError if t is out of range, i.e.
t > self.get_end_time() or t < self.get_start_time()
"""
pass
class NetCDFTimeParser(TimeParser):
"""
Describes the time stamps stored in a netCDF file.
"""
scalars = {
'seconds': 1.0,
'days': 24*3600.0,
}
def __init__(self, filename, time_variable_name='time', allow_gaps=False,
verbose=False):
"""
Construct a new object by scraping data from the given netcdf file.
:arg str filename: name of the netCDF file to read
:kwarg str time_variable_name: name of the time variable in the netCDF
file (default: 'time')
:kwarg bool allow_gaps: if False, an error is raised if time step is
not constant.
"""
self.filename = filename
self.time_variable_name = time_variable_name
with netCDF4.Dataset(filename) as d:
time_var = d[self.time_variable_name]
assert 'units' in time_var.ncattrs(), 'Time does not have units; {:}'.format(self.filename)
unit_str = time_var.getncattr('units')
msg = 'Unknown time unit "{:}" in {:}'.format(unit_str, self.filename)
words = unit_str.split()
assert words[0] in ['days', 'seconds'], msg
self.time_unit = words[0]
self.time_scalar = self.scalars[self.time_unit]
assert words[1] == 'since', msg
if len(words) == 3:
# assuming format "days since 2000-01-01" in UTC
base_date_srt = words[2]
numbers = len(base_date_srt.split('-'))
assert numbers == 3, msg
try:
self.basetime = datetime.datetime.strptime(base_date_srt, '%Y-%m-%d').replace(tzinfo=pytz.utc)
except ValueError:
raise ValueError(msg)
if len(words) == 4:
# assuming format "days since 2000-01-01 00:00:00" in UTC
# or "days since 2000-01-01 00:00:00-10"
base_date_srt = ' '.join(words[2:4])
assert len(words[2].split('-')) == 3, msg
assert len(words[3].split(':')) == 3, msg
if len(words[3].split('-')) == 2:
base_date_srt = base_date_srt[:-3]
tz_offset = int(words[3][-3:])
timezone = FixedTimeZone(tz_offset, 'UTC{:}'.format(tz_offset))
else:
timezone = pytz.utc
try:
self.basetime = datetime.datetime.strptime(base_date_srt, '%Y-%m-%d %H:%M:%S').replace(tzinfo=timezone)
except ValueError:
raise ValueError(msg)
self.time_array = datetime_to_epoch(self.basetime) + np.array(time_var[:]*self.time_scalar, dtype=float)
self.start_time = epoch_to_datetime(float(self.time_array[0]))
self.end_time = epoch_to_datetime(float(self.time_array[-1]))
self.time_step = np.mean(np.diff(self.time_array))
self.nb_steps = len(self.time_array)
if verbose:
print_output('Parsed file {:}'.format(filename))
print_output(' Time span: {:} -> {:}'.format(self.start_time, self.end_time))
print_output(' Number of time steps: {:}'.format(self.nb_steps))
if self.nb_steps > 1:
print_output(' Time step: {:} h'.format(self.time_step/3600.))
def get_start_time(self):
return self.start_time
def get_end_time(self):
return self.end_time
def find_time_stamp(self, t, previous=False):
t_epoch = datetime_to_epoch(t) if isinstance(t, datetime.datetime) else t
itime = np.searchsorted(self.time_array, t_epoch + TIMESEARCH_TOL) # next
if previous:
itime -= 1
if itime < 0:
raise IndexError('Requested time out of bounds {:} < {:} in {:}'.format(t_epoch, self.time_array[0], self.filename))
if itime >= len(self.time_array):
raise IndexError('Requested time out of bounds {:} > {:} in {:}'.format(t_epoch, self.time_array[0], self.filename))
return itime
class TimeSearch(object):
"""
Base class for searching nearest time steps in a file tree or database
"""
@abstractmethod
def find(self, time, previous=False):
"""
Find a next (previous) time stamp from a given time
:arg float time: input time stamp
:arg bool previous: if True, look for last time stamp before requested
time. Otherwise returns next time stamp.
:return: a (filename, time_index, time) tuple
"""
pass
class NetCDFTimeSearch(TimeSearch):
"""
Finds a nearest time stamp in a collection of netCDF files.
"""
def __init__(self, file_pattern, init_date, netcdf_class, *args, **kwargs):
all_files = glob.glob(file_pattern)
assert len(all_files) > 0, 'No files found: {:}'.format(file_pattern)
self.netcdf_class = netcdf_class
self.init_date = init_date
self.sim_start_time = datetime_to_epoch(self.init_date)
self.verbose = kwargs.get('verbose', False)
dates = []
ncfiles = []
for fn in all_files:
nc = self.netcdf_class(fn, *args, **kwargs)
ncfiles.append(nc)
dates.append(nc.get_start_time())
sort_ix = np.argsort(dates)
self.files = np.array(all_files)[sort_ix]
self.ncfiles = np.array(ncfiles)[sort_ix]
self.start_datetime = np.array(dates)[sort_ix]
self.start_times = [(s - self.init_date).total_seconds() for s in self.start_datetime]
self.start_times = np.array(self.start_times)
if self.verbose:
print_output('{:}: Found time index:'.format(self.__class__.__name__))
for i in range(len(self.files)):
print_output('{:} {:} {:}'.format(i, self.files[i], self.start_times[i]))
nc = self.ncfiles[i]
print_output(' {:} -> {:}'.format(nc.start_time, nc.end_time))
if nc.nb_steps > 1:
print_output(' {:} time steps, dt = {:} s'.format(nc.nb_steps, nc.time_step))
else:
print_output(' {:} time steps'.format(nc.nb_steps))
def simulation_time_to_datetime(self, t):
return epoch_to_datetime(datetime_to_epoch(self.init_date) + t).astimezone(self.init_date.tzinfo)
def find(self, simulation_time, previous=False):
"""
Find file that contains the given simulation time
:arg float simulation_time: simulation time in seconds
:kwarg bool previous: if True finds previous existing time stamp instead
of next (default False).
:return: (filename, time index, simulation time) of found data
"""
err_msg = 'No file found for time {:}'.format(self.simulation_time_to_datetime(simulation_time))
ix = np.searchsorted(self.start_times, simulation_time + TIMESEARCH_TOL)
if ix > 0:
candidates = [ix-1, ix]
else:
candidates = [ix]
if ix + 1 < len(self.start_times):
candidates += [ix + 1]
itime = None
for i in candidates:
try:
nc = self.ncfiles[i]
itime = nc.find_time_stamp(self.sim_start_time + simulation_time, previous=previous)
time = nc.time_array[itime] - self.sim_start_time
break
except IndexError:
pass
if itime is None:
raise Exception(err_msg)
return self.files[i], itime, time
class DailyFileTimeSearch(TimeSearch):
"""
Treats a list of daily files as a time series.
File name pattern must be given as a string where the 4-digit year is
tagged with "{year:04d}", and 2-digit zero-padded month and year are tagged
with "{month:02d}" and "{day:02d}", respectively. The tags can be used
multiple times.
Example pattern:
'ncom/{year:04d}/s3d.glb8_2f_{year:04d}{month:02d}{day:02d}00.nc'
In this time search method the time stamps are parsed solely from the
filename, no other metadata is used. By default the data is assumed to be
centered at 12:00 UTC every day.
"""
def __init__(self, file_pattern, init_date, verbose=False,
center_hour=12, center_timezone=pytz.utc):
self.file_pattern = file_pattern
self.init_date = init_date
self.sim_start_time = datetime_to_epoch(self.init_date)
self.verbose = verbose
all_files = self._find_files()
dates = []
for fn in all_files:
d = self._parse_date(fn)
timestamp = datetime.datetime(d['year'], d['month'], d['day'],
center_hour, tzinfo=center_timezone)
dates.append(timestamp)
sort_ix = np.argsort(dates)
self.files = np.array(all_files)[sort_ix]
self.start_datetime = np.array(dates)[sort_ix]
self.start_times = [(s - self.init_date).total_seconds() for s in self.start_datetime]
self.start_times = np.array(self.start_times)
if self.verbose:
print_output('{:}: Found time index:'.format(self.__class__.__name__))
for i in range(len(self.files)):
print_output('{:} {:} {:}'.format(i, self.files[i], self.start_times[i]))
print_output(' {:}'.format(self.start_datetime[i]))
def _find_files(self):
"""Finds all files that match the given pattern."""
search_pattern = str(self.file_pattern)
search_pattern = search_pattern.replace(':02d}', ':}')
search_pattern = search_pattern.replace(':04d}', ':}')
search_pattern = search_pattern.format(year='*', month='*', day='*')
all_files = glob.glob(search_pattern)
assert len(all_files) > 0, 'No files found: {:}'.format(search_pattern)
return all_files
def _parse_date(self, filename):
"""
Parse year, month, day from filename using the given pattern.
"""
re_pattern = str(self.file_pattern)
re_pattern = re_pattern.replace('{year:04d}', r'(\d{4,4})')
re_pattern = re_pattern.replace('{month:02d}', r'(\d{2,2})')
re_pattern = re_pattern.replace('{day:02d}', r'(\d{2,2})')
o = re.findall(re_pattern, filename)
assert len(o) == 1, 'parsing date from filename failed\n {:}'.format(filename)
values = [int(v) for v in o[0]]
fmt = string.Formatter()
labels = [s[1] for s in fmt.parse(self.file_pattern) if s[1] is not None]
return dict(zip(labels, values))
def simulation_time_to_datetime(self, t):
return epoch_to_datetime(datetime_to_epoch(self.init_date) + t).astimezone(self.init_date.tzinfo)
def find(self, simulation_time, previous=False):
"""
Find file that contains the given simulation time
:arg float simulation_time: simulation time in seconds
:kwarg bool previous: if True finds previous existing time stamp instead
of next (default False).
:return: (filename, time index, simulation time) of found data
"""
err_msg = 'No file found for time {:}'.format(self.simulation_time_to_datetime(simulation_time))
ix = np.searchsorted(self.start_times, simulation_time + TIMESEARCH_TOL)
i = ix - 1 if previous else ix
assert i >= 0, err_msg
assert i < len(self.start_times), err_msg
itime = 0
time = self.start_times[i]
return self.files[i], itime, time
class LinearTimeInterpolator(object):
"""
Interpolates time series in time
User must provide timesearch_obj that finds time stamps from
a file tree, and a reader that can read those time stamps into numpy arrays.
Previous/next data sets are cached in memory to avoid hitting disk every
time.
"""
def __init__(self, timesearch_obj, reader):
"""
:arg timesearch_obj: TimeSearch object
:arg reader: FileTreeReader object
"""
self.timesearch = timesearch_obj
self.reader = reader
self.cache = {}
def _get_from_cache(self, key):
"""
Fetch data set from cache, read if not present
"""
if key not in self.cache:
self.cache[key] = self.reader(key[0], key[1])
return self.cache[key]
def _clean_cache(self, keys_to_keep):
"""
Remove cached data sets that are no longer needed
"""
for key in list(self.cache.keys()):
if key not in keys_to_keep:
self.cache.pop(key)
def __call__(self, t):
"""
Interpolate at time t
:retuns: list of numpy arrays
"""
prev_id = self.timesearch.find(t, previous=True)
next_id = self.timesearch.find(t, previous=False)
prev = self._get_from_cache(prev_id)
next = self._get_from_cache(next_id)
self._clean_cache([prev_id, next_id])
# interpolate
t_prev = prev_id[2]
t_next = next_id[2]
alpha = (t - t_prev)/(t_next - t_prev)
RELTOL = 1e-6
assert alpha >= 0.0 - RELTOL and alpha <= 1.0 + RELTOL, \
'Value {:} out of range {:} .. {:}'.format(t, t_prev, t_next)
val = [(1.0 - alpha)*p + alpha*n for p, n in zip(prev, next)]
return val
class NetCDFTimeSeriesInterpolator(object):
"""
Reads and interpolates scalar time series from a sequence of netCDF files.
"""
def __init__(self, ncfile_pattern, variable_list, init_date,
time_variable_name='time', scalars=None, allow_gaps=False):
"""
:arg str ncfile_pattern: file search pattern, e.g. "mydir/foo_*.nc"
:arg variable_list: list if netCDF variable names to read
:arg datetime.datetime init_date: simulation start time
:kwarg scalars: (optional) list of scalars; scale output variables by
a factor.
.. note::
All the variables must have the same dimensions in the netCDF files.
If the shapes differ, create separate interpolator instances.
"""
self.reader = NetCDFTimeSeriesReader(
variable_list, time_variable_name=time_variable_name)
self.timesearch_obj = NetCDFTimeSearch(
ncfile_pattern, init_date, NetCDFTimeParser,
time_variable_name=time_variable_name, allow_gaps=allow_gaps)
self.time_interpolator = LinearTimeInterpolator(self.timesearch_obj, self.reader)
if scalars is not None:
assert len(scalars) == len(variable_list)
self.scalars = scalars
def __call__(self, time):
"""
Time series at the given time
:returns: list of scalars or numpy.arrays
"""
vals = self.time_interpolator(time)
if self.scalars is not None:
for i in range(len(vals)):
vals[i] *= self.scalars[i]
return vals
|
tkarna/cofs
|
thetis/interpolation.py
|
Python
|
mit
| 31,094
|
[
"NetCDF"
] |
1aa9164af59dd034291167ee68430ccefbbb6193739a71eb51d8b99702571a5d
|
# This file is part of DmpBbo, a set of libraries and programs for the
# black-box optimization of dynamical movement primitives.
# Copyright (C) 2018 Freek Stulp
#
# DmpBbo is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# DmpBbo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with DmpBbo. If not, see <http://www.gnu.org/licenses/>.
#
import numpy as np
import sys
import os
lib_path = os.path.abspath('../../python/')
sys.path.append(lib_path)
from dmp.Trajectory import Trajectory
from functionapproximators.Parameterizable import Parameterizable
from dynamicalsystems.DynamicalSystem import DynamicalSystem
from dynamicalsystems.ExponentialSystem import ExponentialSystem
from dynamicalsystems.SigmoidSystem import SigmoidSystem
from dynamicalsystems.TimeSystem import TimeSystem
from dynamicalsystems.SpringDamperSystem import SpringDamperSystem
class Dmp(DynamicalSystem,Parameterizable):
def __init__(self, tau, y_init, y_attr, function_apps, name="Dmp", sigmoid_max_rate=-20,forcing_term_scaling="NO_SCALING"):
super().__init__(1, tau, y_init, y_attr, name)
dim_orig = self.dim_orig_
self.goal_system_ = ExponentialSystem(tau,y_init,y_attr,15,'goal')
self.gating_system_ = SigmoidSystem(tau,np.ones(1),sigmoid_max_rate,0.9*tau,'gating')
self.phase_system_ = TimeSystem(tau,False,'phase')
alpha = 20.0
self.spring_system_ = SpringDamperSystem(tau,y_init,y_attr,alpha)
self.function_approximators_ = function_apps
self.forcing_term_scaling_ = forcing_term_scaling
self.ts_train_ = None
# Make room for the subsystems
self.dim_ = 3*dim_orig+2
self.SPRING = np.arange(0*dim_orig+0, 0*dim_orig+0 +2*dim_orig)
self.SPRING_Y = np.arange(0*dim_orig+0, 0*dim_orig+0 +dim_orig)
self.SPRING_Z = np.arange(1*dim_orig+0, 1*dim_orig+0 +dim_orig)
self.GOAL = np.arange(2*dim_orig+0, 2*dim_orig+0 +dim_orig)
self.PHASE = np.arange(3*dim_orig+0, 3*dim_orig+0 + 1)
self.GATING = np.arange(3*dim_orig+1, 3*dim_orig+1 + 1)
#print(self.SPRING)
#print(self.SPRING_Y)
#print(self.SPRING_Z)
#print(self.GOAL)
#print(self.PHASE)
#print(self.GATING)
def set_tau(self,tau):
self.tau_ = tau
# Set value in all relevant subsystems also
self.spring_system_.set_tau(tau)
if self.goal_system_:
self.goal_system_.set_tau(tau)
self.phase_system_ .set_tau(tau)
self.gating_system_.set_tau(tau)
def integrateStart(self):
x = np.zeros(self.dim_)
xd = np.zeros(self.dim_)
# Start integrating goal system if it exists
if self.goal_system_ is None:
# No goal system, simply set goal state to attractor state
x[self.GOAL] = self.attractor_state_
xd[self.GOAL] = 0.0
else:
# Goal system exists. Start integrating it.
(x[self.GOAL],xd[self.GOAL]) = self.goal_system_.integrateStart()
# Set the attractor state of the spring system
self.spring_system_.set_attractor_state(x[self.GOAL])
# Start integrating all futher subsystems
(x[self.SPRING],xd[self.SPRING]) = self.spring_system_.integrateStart()
(x[self.PHASE ],xd[self.PHASE ]) = self.phase_system_.integrateStart()
(x[self.GATING],xd[self.GATING]) = self.gating_system_.integrateStart()
# Add rates of change
xd = self.differentialEquation(x)
return (x,xd)
def differentialEquation(self,x):
n_dims = self.dim_
xd = np.zeros(x.shape)
if self.goal_system_ is None:
# If there is no dynamical system for the delayed goal, the goal is
# simply the attractor state
self.spring_system_.set_attractor_state(self.attractor_state_)
# with zero change
xd_goal = np.zeros(n_dims)
else:
# Integrate goal system and get current goal state
self.goal_system_.set_attractor_state(self.attractor_state_)
x_goal = x[self.GOAL]
xd[self.GOAL] = self.goal_system_.differentialEquation(x_goal)
# The goal state is the attractor state of the spring-damper system
self.spring_system_.set_attractor_state(x_goal)
# Integrate spring damper system
#Forcing term is added to spring_state later
xd[self.SPRING] = self.spring_system_.differentialEquation(x[self.SPRING])
# Non-linear forcing term phase and gating systems
xd[self.PHASE] = self.phase_system_.differentialEquation(x[self.PHASE])
xd[self.GATING] = self.gating_system_.differentialEquation(x[self.GATING])
fa_output = self.computeFunctionApproximatorOutput(x[self.PHASE])
# Gate the output of the function approximators
gating = x[self.GATING]
forcing_term = gating*fa_output
# Scale the forcing term, if necessary
if (self.forcing_term_scaling_=="G_MINUS_Y0_SCALING"):
g_minus_y0 = (self.attractor_state_-self.initial_state_)
forcing_term = forcing_term*g_minus_y0
elif (self.forcing_term_scaling_=="AMPLITUDE_SCALING"):
forcing_term = forcing_term*self.trajectory_amplitudes_
# Add forcing term to the ZD component of the spring state
xd[self.SPRING_Z] += np.squeeze(forcing_term)/self.tau_
return xd
def computeFunctionApproximatorOutput(self,phase_state):
n_time_steps = phase_state.size
n_dims = self.dim_orig_
fa_output = np.zeros([n_time_steps,n_dims])
for i_fa in range(n_dims):
if self.function_approximators_[i_fa]:
if self.function_approximators_[i_fa].isTrained():
fa_output[:,i_fa] = self.function_approximators_[i_fa].predict(phase_state)
return fa_output
def analyticalSolution(self,ts=None):
if ts is None:
if self.ts_train_ is None:
print("Neither the argument 'ts' nor the member variable self.ts_train_ was set. Returning None.")
return None
else:
# Set the times to the ones the Dmp was trained on.
ts = self.ts_train_
n_time_steps = ts.size
# INTEGRATE SYSTEMS ANALYTICALLY AS MUCH AS POSSIBLE
# Integrate phase
( xs_phase, xds_phase) = self.phase_system_.analyticalSolution(ts)
# Compute gating term
( xs_gating, xds_gating ) = self.gating_system_.analyticalSolution(ts)
# Compute the output of the function approximator
fa_outputs = self.computeFunctionApproximatorOutput(xs_phase)
# Gate the output to get the forcing term
forcing_terms = fa_outputs*xs_gating
# Scale the forcing term, if necessary
if (self.forcing_term_scaling_=="G_MINUS_Y0_SCALING"):
g_minus_y0 = (self.attractor_state_-self.initial_state_)
g_minus_y0_rep = np.tile(g_minus_y0,(n_time_steps,1))
forcing_terms *= g_minus_y0_rep
elif (self.forcing_term_scaling_=="AMPLITUDE_SCALING"):
trajectory_amplitudes_rep = np.tile(self.trajectory_amplitudes_,(n_time_steps,1))
forcing_terms *= trajectory_amplitudes_rep
# Get current delayed goal
if self.goal_system_ is None:
# If there is no dynamical system for the delayed goal, the goal is
# simply the attractor state
xs_goal = np.tile(self.attractor_state_,(n_time_steps,1))
# with zero change
xds_goal = np.zeros(xs_goal.shape)
else:
# Integrate goal system and get current goal state
(xs_goal,xds_goal) = self.goal_system_.analyticalSolution(ts)
xs = np.zeros([n_time_steps,self.dim_])
xds = np.zeros([n_time_steps,self.dim_])
xs[:,self.GOAL] = xs_goal
xds[:,self.GOAL] = xds_goal
xs[:,self.PHASE] = xs_phase
xds[:,self.PHASE] = xds_phase
xs[:,self.GATING] = xs_gating
xds[:,self.GATING] = xds_gating
# THE REST CANNOT BE DONE ANALYTICALLY
# Reset the dynamical system, and get the first state
damping = self.spring_system_.damping_coefficient_
localspring_system = SpringDamperSystem(self.tau_,self.initial_state_,self.attractor_state_,damping)
# Set first attractor state
localspring_system.set_attractor_state(xs_goal[0,:])
# Start integrating spring damper system
(x_spring, xd_spring) = localspring_system.integrateStart()
# For convenience
SPRING = self.SPRING
SPRING_Y = self.SPRING_Y
SPRING_Z = self.SPRING_Z
t0 = 0
xs[t0,SPRING] = x_spring
xds[t0,SPRING] = xd_spring
# Add forcing term to the acceleration of the spring state
xds[0,SPRING_Z] = xds[0,SPRING_Z] + forcing_terms[t0,:]/self.tau_
for tt in range(1,n_time_steps):
dt = ts[tt]-ts[tt-1]
# Euler integration
xs[tt,SPRING] = xs[tt-1,SPRING] + dt*xds[tt-1,SPRING]
# Set the attractor state of the spring system
localspring_system.set_attractor_state(xs[tt,self.GOAL])
# Integrate spring damper system
xds[tt,SPRING] = localspring_system.differentialEquation(xs[tt,SPRING])
# If necessary add a perturbation. May be useful for some off-line tests.
#RowVectorXd perturbation = RowVectorXd::Constant(dim_orig(),0.0)
#if (analytical_solution_perturber_!=NULL)
# for (int i_dim=0 i_dim<dim_orig() i_dim++)
# // Sample perturbation from a normal Gaussian distribution
# perturbation(i_dim) = (*analytical_solution_perturber_)()
# Add forcing term to the acceleration of the spring state
xds[tt,SPRING_Z] = xds[tt,SPRING_Z] + forcing_terms[tt,:]/self.tau_ #+ perturbation
# Compute y component from z
xds[tt,SPRING_Y] = xs[tt,SPRING_Z]/self.tau_
return ( xs, xds, forcing_terms, fa_outputs)
def train(self,trajectory):
# Set tau, initial_state and attractor_state from the trajectory
self.set_tau(trajectory.ts_[-1])
self.set_initial_state(trajectory.ys_[0,:])
self.set_attractor_state(trajectory.ys_[-1,:])
# This needs to be computed for (optional) scaling of the forcing term.
# Needs to be done BEFORE computeFunctionApproximatorInputsAndTargets
self.trajectory_amplitudes_ = trajectory.getRangePerDim()
(fa_input_phase, f_target) = self.computeFunctionApproximatorInputsAndTargets(trajectory)
for dd in range(self.dim_orig_):
fa_target = f_target[:,dd]
self.function_approximators_[dd].train(fa_input_phase,fa_target)
# Save the times steps on which the Dmp was trained.
# This is just a convenience function to be able to call
# analyticalSolution without the "ts" argument.
self.ts_train_ = trajectory.ts_
def computeFunctionApproximatorInputsAndTargets(self,trajectory):
n_time_steps = trajectory.ts_.size
dim_data = trajectory.dim_
assert(self.dim_orig_==dim_data)
(xs_ana,xds_ana,forcing_terms, fa_outputs) = self.analyticalSolution(trajectory.ts_)
xs_goal = xs_ana[:,self.GOAL]
xs_gating = xs_ana[:,self.GATING]
xs_phase = xs_ana[:,self.PHASE]
fa_inputs_phase = xs_phase
# Get parameters from the spring-dampers system to compute inverse
damping_coefficient = self.spring_system_.damping_coefficient_
spring_constant = self.spring_system_.spring_constant_
mass = self.spring_system_.mass_
# Usually, spring-damper system of the DMP should have mass==1
assert(mass==1.0)
#Compute inverse
tau = self.tau_
f_target = tau*tau*trajectory.ydds_ + (spring_constant*(trajectory.ys_-xs_goal) + damping_coefficient*tau*trajectory.yds_)/mass
# Factor out gating term
for dd in range(self.dim_orig_):
f_target[:,dd] = f_target[:,dd]/np.squeeze(xs_gating)
# Factor out scaling
if (self.forcing_term_scaling_=="G_MINUS_Y0_SCALING"):
g_minus_y0 = (self.attractor_state_-self.initial_state_)
g_minus_y0_rep = np.tile(g_minus_y0,(n_time_steps,1))
f_target /= g_minus_y0_rep
elif (self.forcing_term_scaling_=="AMPLITUDE_SCALING"):
trajectory_amplitudes_rep = np.tile(self.trajectory_amplitudes_,(n_time_steps,1))
f_target /= trajectory_amplitudes_rep
return (fa_inputs_phase, f_target)
def statesAsTrajectory(self,ts, x_in, xd_in):
# Left column is time
return Trajectory(ts,x_in[:,self.SPRING_Y], xd_in[:,self.SPRING_Y], xd_in[:,self.SPRING_Z]/self.tau_)
def getParameterVectorSelected(self):
values = np.empty(0)
for fa in self.function_approximators_:
if fa.isTrained():
values = np.append(values,fa.getParameterVectorSelected())
return values
def setParameterVectorSelected(self,values):
size = self.getParameterVectorSelectedSize()
assert(len(values)==size)
offset = 0
for fa in self.function_approximators_:
if fa.isTrained():
cur_size = fa.getParameterVectorSelectedSize()
cur_values = values[offset:offset+cur_size]
fa.setParameterVectorSelected(cur_values)
offset += cur_size
def getParameterVectorSelectedSize(self):
size = 0
for fa in self.function_approximators_:
if fa.isTrained():
size += fa.getParameterVectorSelectedSize()
return size
def set_initial_state(self,initial_state):
assert(initial_state.size==self.dim_orig_)
super(Dmp,self).set_initial_state(initial_state);
# Set value in all relevant subsystems also
self.spring_system_.set_initial_state(initial_state);
if self.goal_system_:
self.goal_system_.set_initial_state(initial_state);
def set_attractor_state(self,attractor_state):
assert(attractor_state.size==self.dim_orig_)
super(Dmp,self).set_attractor_state(attractor_state);
# Set value in all relevant subsystems also
if self.goal_system_:
self.goal_system_.set_attractor_state(attractor_state);
# Do NOT do the following. The attractor state of the spring system is determined by the
# goal system
# self.spring_system_.set_attractor_state(attractor_state);
|
stulp/dmpbbo
|
python/dmp/Dmp.py
|
Python
|
lgpl-2.1
| 15,749
|
[
"Gaussian"
] |
28c335a9bd003bd753e68563f835ac5fee37399432d299c5b7433932975a48c6
|
'''
Created on 24 Nov 2015
@author: wnm24546
'''
from scipy.constants import c, h, k, pi
from scipy.optimize import curve_fit
from collections import OrderedDict
import numpy as np
from Lucky.LuckyExceptions import BadModelStateException
#k is kb
class CalculationService(object):
def __init__(self, pp):
self.parentPresenter = pp
self.planckResults = (0, 0, 0, 0)
self.wienResults = (0, 0, 0, 0)
self.twoColResults = (0, 0, 0, 0)
#TODO Spawn calculations and plots in a separate thread
def createCalcs(self, dM, debug=False):
self.updateModel(dM)
self.dsCalcs = LuckyCalculations(self.dsData, self.dsCalib,
self.integConf, self.bulbTemp, "Downstream Measurement")
self.usCalcs = LuckyCalculations(self.usData, self.usCalib,
self.integConf, self.bulbTemp, "Upstream Measurement")
self.dsCalcs.runCalculations()
self.usCalcs.runCalculations()
self.updateResults()
#Create plot objects once we've got some data to plot
self.dsPlots = LuckyPlots(self.dsCalcs)
self.usPlots = LuckyPlots(self.usCalcs)
def updateCalcs(self):
#Perhaps add updateModel call?
self.dsCalcs.runCalculations()
self.usCalcs.runCalculations()
self.updateResults()
#Update the plots with new values from the calculations
self.dsPlots.updatePlots()
self.usPlots.updatePlots()
def updateResults(self):
def calculateResults(dsVal, usVal):
avs = (dsVal + usVal)/2
diff = abs(dsVal - usVal)
return [dsVal, usVal, avs, diff]
self.planckResults = calculateResults(self.dsCalcs.planckTemp, self.usCalcs.planckTemp)
self.wienResults = calculateResults(self.dsCalcs.wienTemp, self.usCalcs.wienTemp)
self.twoColResults = calculateResults(self.dsCalcs.twoColTemp, self.usCalcs.twoColTemp)
def updateModel(self, dM):
self.dsData, self.usData = self.openData(dM)
self.dsCalib, self.usCalib = self.openCalib(dM.calibType, dM.calibConfigData)
self.integConf = dM.integrationConf
self.bulbTemp = dM.calibConfigData.bulbTemp
def updateData(self, usData=None, dsData=None):
if (usData == None) and (dsData == None):
raise BadModelStateException("No data given for data update")
if dsData != None:
newData = np.loadtxt(usData)
self.dsCalcs.update(data=newData)
if usData != None:
newData = np.loadtxt(usData)
self.usCalcs.update(data=usData)
def updateIntegration(self, integConf):
self.dsCalcs.update(integConf=integConf)
self.usCalcs.update(integConf=integConf)
def updateCalibration(self, calibType, calibConf):
self.dsCalib, self.usCalib = self.openCalib(calibType, calibConf)
self.bulbTemp = calibConf.bulbTemp
self.dsCalcs.update(calib=self.dsCalib, bulbTemp=self.bulbTemp)
self.usCalcs.update(calib=self.usCalib, bulbTemp=self.bulbTemp)
def openCalib(self, calibType, calibConfig):
calibFileLabels = calibConfig.calibFiles.keys()
dsCalib, usCalib = None, None
for i in range(len(calibType)):
if calibType[i] == 1:
dsCalib = str(calibConfig.calibFiles[calibFileLabels[2*i]])
usCalib = str(calibConfig.calibFiles[calibFileLabels[2*i+1]])
if None not in [dsCalib, usCalib]:
break
return np.loadtxt(dsCalib, unpack=True), np.loadtxt(usCalib, unpack=True)
def openData(self, dM):
return np.loadtxt(dM.usdsPair[0], unpack=True), np.loadtxt(dM.usdsPair[1], unpack=True)
def disposePlots(self):
self.dsPlots.dispose()
self.usPlots.dispose()
class LuckyCalculations(object): #TODO Make calcs use calcserv to get bulbTemp, integConf & calibset
def __init__(self, data, calib, integConf, bulbTemp, label, debug=False):
self.dataSet = data
self.calibSet = calib
self.intConf = integConf
self.bulbTemp = bulbTemp
self.label = label
self.planckPlotRange = [550, 900]
self.wienPlotRange = [1e9 / self.planckPlotRange[1], 1e9/self.planckPlotRange[0]]
#Prepare the data
self.normaliseData()
def update(self, data=None, integConf=None, calib=None, bulbTemp=None):
self.dataSet = data if (data != None) else self.dataSet
self.intConf = integConf if (integConf != None) else self.intConf
self.calibSet = calib if (calib != None) else self.calibSet
self.bulbTemp = bulbTemp if (bulbTemp != None) else self.bulbTemp
if (data != None) or (calib != None) or (bulbTemp != None):
self.normaliseData()
if integConf != None:
self.calculateRanges()
def normaliseData(self):
self.planckIdeal = self.planck(self.dataSet[0], 1, self.bulbTemp)
self.planckIdeal = np.reshape(self.planckIdeal, (1, len(self.planckIdeal)))
#This step adds the normalises dataset & concatenates with the original data array
self.dataSet = np.concatenate((self.dataSet, self.dataSet[1] / self.calibSet[1] * self.planckIdeal), axis=0)
#We've changed the data so we need to recalculate the ranges:
self.calculateRanges()
def calculateRanges(self):
#Data sets for fitting or plotting, limited by integration range
self.invWL = 1e9 / self.dataSet[0]# For Wien function
self.invWLIntegLim = self.invWL[self.intConf[0]:self.intConf[1]]
self.wlIntegLim = self.dataSet[0][self.intConf[0]:self.intConf[1]]
self.RawIntegLim= self.dataSet[1][self.intConf[0]:self.intConf[1]]
self.normIntegLim = self.dataSet[2][self.intConf[0]:self.intConf[1]]
def runCalculations(self):
#Calculate functions over the range of data
self.wienData = self.wien(self.dataSet[0], self.dataSet[2])
self.wienDataIntegLim = self.wienData[self.intConf[0]:self.intConf[1]]
self.twoColData = self.twoColour(self.dataSet[0], self.dataSet[2], self.intConf[2])
self.twoColDataLim = self.twoColData[self.intConf[0]:self.intConf[1]] #twoColData limited between the integration boundaries
#modifica
self.a = int(round(min(self.twoColDataLim)))
self.b = int(round(max(self.twoColDataLim)))
self.binning = range(self.a, self.b, 30)
#self.twoColHistFreq, self.twoColHistValues = np.histogram(self.twoColDataLim, bins=np.log(len(self.twoColDataLim))/np.log(2)+4], density=False)
self.twoColHistFreq, self.twoColHistValues = np.histogram(self.twoColDataLim, bins= self.binning, density=False)
#old
#self.twoColHistFreq, self.twoColHistValues = np.histogram(self.twoColDataLim, bins=range(1500,5000,1), density=False)
#self.twoColHistValues = np.delete(self.twoColHistValues, len(self.twoColHistFreq), 0)
#Do fits
self.fitPlanck()
self.fitWien()
self.fitHistogram()
def fitPlanck(self):
#Do some fitting for Planck...
###
self.planckFit, planckCov = curve_fit(self.planck, self.wlIntegLim, self.normIntegLim, [1,2000])
self.planckTemp = self.planckFit[1]
self.planckEmiss = self.planckFit[0]
#Planck with fit params(??)
self.planckFitData = self.planck(self.wlIntegLim, self.planckEmiss, self.planckTemp)
#new method defined to operate a sliding average. usefull for the fit Histogram
def moving_average(self, a, n=2) :
self.ret = np.cumsum(a, dtype=float)
self.ret[n:] = self.ret[n:] - self.ret[:-n]
return self.ret[n - 1:] / n
def fitWien(self):
#Do some fitting for Wien...
###
self.wienFit, wienCov = curve_fit(self.fWien, self.invWLIntegLim[(np.isfinite(self.wienDataIntegLim))], self.wienDataIntegLim[(np.isfinite(self.wienDataIntegLim))], p0=[1, self.planckTemp])
self.wienResidual = self.wienDataIntegLim - self.fWien(self.invWLIntegLim[(np.isfinite(self.wienDataIntegLim))], *self.wienFit)
self.wienTemp = self.wienFit[1]
def fitHistogram(self):
#Gaussian fit of two colour histogram
###
#print('averaged twocolhistvalues:')
#print self.moving_average(self.twoColHistValues)
self.histFit, histCov = curve_fit(self.gaus, self.moving_average(self.twoColHistValues), self.twoColHistFreq, p0=[1000,self.planckTemp,100])
self.twoColTemp = self.histFit[1]
self.twoColErr = self.histFit[2]
#old
#def fitHistogram(self):
#Gaussian fit of two colour histogram
###
#self.histFit, histCov = curve_fit(self.gaus, self.twoColHistValues, self.twoColHistFreq, p0=[1000,self.planckTemp,100])
#self.twoColTemp = self.histFit[1]
#self.twoColErr = self.histFit[2]
#Planck function
def planck(self, wavelength, emiss, temp):
wavelength = wavelength * 1e-9
return emiss / np.power(wavelength, 5) * (2 * pi * h * np.power(c, 2)) / np.expm1((h * c)/(k * wavelength * temp))
#Wien function
def wien(self, wavelength, intens):
wavelength = wavelength * 1e-9
return self.wienBase(np.power(wavelength, 5) * intens / (2 * pi * h * np.power(c, 2)))
#Linear Wien function
def fWien(self, wavelength, emiss, temp):
# wavelength = wavelength * 1e-9
return self.wienBase(emiss) - (1/temp) * wavelength
#Wien support function (this is just recycling code)
def wienBase(self, exponent):
return k / (h * c) * np.log(exponent)
#Two colour function
def twoColour(self, wavelength, intens, delta):
#wavelength = wavelength * 1e-9
nPoints = len(wavelength)
nWindows = nPoints - delta
twoCol = []
#def twoColCalc(wavelength, intens):
# return np.log(intens * np.power(wavelength, 5) / (2 * pi * h * np.power(c, 2))) * (k / (h *c))
for i in range(nWindows):
f1 = 1 / (wavelength[i]* 1e-9)
f2 = 1/ (wavelength[i + delta]* 1e-9)
i1 = np.log(intens[i]/2/pi/h/c**2/f1**5)*k/h/c #twoColCalc(wavelength[i], intens[i])
i2 = np.log(intens[i+delta]/2/pi/h/c**2/f2**5)*k/h/c #twoColCalc(wavelength[i + delta], intens[i+delta])
twoCol.append(abs((f2 - f1) / (i2 - i1)))
for i in range(nWindows, nPoints):
twoCol.append(float('nan'))
return twoCol
#Gaussian for fit
def gaus(self, x, a, x0, sigma):
return a*np.exp(-(x-x0)**2/(2*sigma**2))
###
import matplotlib.pyplot as plt
class LuckyPlots(object):
def __init__(self, calcs, debug=False):
if debug:
return
self.debug = debug
self.luckyCalcs = calcs
self.fig = plt.figure(self.luckyCalcs.label)
self.fig.suptitle(self.luckyCalcs.label, fontsize="16", weight="bold", color = 'b')
self.ax1 = self.fig.add_subplot(3, 2, 1)#Raw+Calib
self.ax2 = self.fig.add_subplot(3, 2, 3)#Planck
self.ax3 = self.fig.add_subplot(3, 2, 4)#Wien
self.ax3.xaxis.get_major_formatter().set_powerlimits((0, 1))
self.ax4 = self.fig.add_subplot(3, 2, 5)#2Colour
self.ax5 = self.fig.add_subplot(3, 2, 6)#Histogram
self.ax5.xaxis.get_major_formatter().set_powerlimits((0, 1))
self.ax6 = self.ax3.twinx()
#Layout settings for the plots
plt.subplots_adjust(wspace=0.3, hspace=0.7)
#One-time configuration of plots
self.ax1.set_title('Raw (blue) & Calibration Data (green)', fontsize= 13, style='italic', weight="bold")
self.ax1.set_xlabel('Wavelength [nm]', fontsize= 13)
self.ax1.grid(True, linestyle='-')
self.ax2.set_title('Planck Function Data', fontsize='13', style='italic', weight="bold")
self.ax2.set_xlabel('Wavelength [nm]', fontsize= 13)
self.ax3.set_ylabel("Planck Function [a.u.]", fontsize= 13)
#self.ax2.set_yticks([])
self.ax2.set_yticks([0.1, 0.3, 0.5, 0.7, 0.9])
self.ax3.set_title('Wien Function Data', fontsize='13', style='italic', weight="bold")
self.ax3.set_xlabel(r'1/Wavelength [m$^{-1}$]', fontsize= 13)
self.ax3.set_ylabel("Wien Function", fontsize= 13)
self.ax3.set_yticks([])
self.ax4.set_title('Two-Colour Plot', fontsize='13', style='italic', weight="bold")
self.ax4.set_xlabel('Wavelength [nm]', fontsize= 13)
self.ax4.set_ylabel('Temperature [K]', fontsize= 13)
self.ax4.grid(True, linestyle='-')
self.ax5.set_title('Two-colour Histogram', fontsize='13', style='italic', weight="bold")
self.ax5.set_xlabel('Temperature [K]', fontsize= 13)
self.ax5.set_ylabel('Counts [a.u.]', fontsize= 13)
self.ax6.set_ylabel('Wien Residual', color='g', fontsize= 13)
self.updatePlots(redraw=False)
#ax1 = calibration and raw spectrum
#ax2 = planck spectrum
#ax3 = wien
#ax4 = 2-col
#ax5 =histogram
#ax6 = residuals in subplot (3,2,4)
if not self.debug:
#Draw the plots if we're not debugging
plt.ion()
plt.show()
#Needed to make plt appear!
# http://stackoverflow.com/questions/28269157/plotting-in-a-non-blocking-way-with-matplotlib
plt.pause(0.001)
def updatePlots(self, redraw=True):
#Raw and calibration data subgraph
self.ax1.plot(self.luckyCalcs.dataSet[0], self.luckyCalcs.dataSet[1],
self.luckyCalcs.dataSet[0], self.luckyCalcs.calibSet[1],'green',self.luckyCalcs.wlIntegLim,self.luckyCalcs.RawIntegLim,'red')
self.ax1.set_ylim(0, self.getYMax(self.luckyCalcs.dataSet[1], self.luckyCalcs.calibSet[1]))
# self.ax1.set_ylim(0,50000) #TODO Get max fn.
#Planck data subgraph
#self.ax2.plot(self.luckyCalcs.dataSet[0], self.luckyCalcs.dataSet[2],
# self.luckyCalcs.wlIntegLim, self.luckyCalcs.planckFitData, 'red')
#self.ax2.set_xlim(*self.luckyCalcs.planckPlotRange)
#Planck data subgraph
self.ax2.plot(self.luckyCalcs.dataSet[0], self.luckyCalcs.dataSet[2] / max(self.luckyCalcs.dataSet[2]),
self.luckyCalcs.wlIntegLim, self.luckyCalcs.planckFitData / max(self.luckyCalcs.dataSet[2]), 'red')
self.ax2.set_xlim(*self.luckyCalcs.planckPlotRange)
self.ax2.set_ylim([0, 1])
#Wien data subgraph
self.ax3.plot(self.luckyCalcs.invWL, self.luckyCalcs.wienData,
self.luckyCalcs.invWLIntegLim, self.luckyCalcs.fWien(self.luckyCalcs.invWLIntegLim,*self.luckyCalcs.wienFit), 'red')
self.ax3.set_xlim(*self.luckyCalcs.wienPlotRange)
#Two Colour data subgraph
self.ax4.plot(self.luckyCalcs.dataSet[0], self.luckyCalcs.twoColData, 'b:',
self.luckyCalcs.wlIntegLim, self.luckyCalcs.twoColDataLim, 'r:')
self.ax4.set_xlim(*self.luckyCalcs.planckPlotRange)
#self.ax4.set_ylim([np.amin(calcs.TwoColDataLim),np.amax(calcs.TwoColDataLim)])
#self.ax4.set_ylim(*calcs.twoColDataLim)
#nuova modifica
self.ax4.set_ylim(self.luckyCalcs.twoColTemp - 500, self.luckyCalcs.twoColTemp + 500)
#Histogram subgraph
#old
#self.ax5.plot(self.luckyCalcs.twoColHistValues, self.luckyCalcs.twoColHistFreq,
# self.luckyCalcs.twoColHistValues, self.luckyCalcs.gaus(self.luckyCalcs.twoColHistValues, *self.luckyCalcs.histFit), 'red')
#modifica
self.ax5.hist(self.luckyCalcs.twoColDataLim, self.luckyCalcs.binning)
self.ax5.plot(self.luckyCalcs.twoColHistValues, self.luckyCalcs.gaus(self.luckyCalcs.twoColHistValues, *self.luckyCalcs.histFit), 'red')
#
self.ax5.set_xlim([self.luckyCalcs.twoColTemp - 400, self.luckyCalcs.twoColTemp + 400])
#self.ax5.set_xlim(1800,4000)
#Residual subgraph of the Wien
ordin = len(self.luckyCalcs.invWL)*[0]
self.ax6.plot(self.luckyCalcs.invWLIntegLim, self.luckyCalcs.wienResidual,'green',self.luckyCalcs.invWL,ordin,'black')
#Create text label for calculated T values -OLD-
#textLabel = OrderedDict([("T"+r"$_{Planck}$","{0:10.2f}".format(self.luckyCalcs.planckTemp)),
# ("T"+r"$_{Wien}$","{0:10.2f}".format(self.luckyCalcs.wienTemp)),
# ("T"+r"$_{Two Colour}$","{0:10.2f}".format(self.luckyCalcs.twoColTemp))])
#Create text label for calculated T values -modified-
textLabel = OrderedDict([("T"+r"$_{Planck}$" + "[K]","{0:9d}".format(int(self.luckyCalcs.planckTemp))),
("T"+r"$_{Wien}$"+ "[K]","{0:9d}".format(int(self.luckyCalcs.wienTemp))),
("T"+r"$_{2col}$"+ "[K]","{0:9d}".format(int(self.luckyCalcs.twoColTemp)))])
self.errWienPlanck = (abs(self.luckyCalcs.planckTemp - self.luckyCalcs.wienTemp)/ (self.luckyCalcs.planckTemp))*100
self.std2col = self.luckyCalcs.twoColErr
textLabel1 = OrderedDict([
("ERR"+"$_{2col}$"+ "[K]","{0:9d}".format(int(self.std2col))),
("ERR"+"$_{W-P}$","{0:9.2f}".format(self.errWienPlanck))
])
# {"T"+r"$_{Planck}$" : "{0:10.2f}".format(self.luckyCalcs.planckTemp),
# "T"+r"$_{Wien}$" : "{0:10.2f}".format(self.luckyCalcs.wienTemp),
# "T"+r"$_{Two Colour}$":"{0:10.2f}".format(self.luckyCalcs.twoColTemp)}
labelPosition = (0.54, 0.85)
rowNr = 0
for label,tVal in textLabel.iteritems( ):
plt.figtext(labelPosition[0], labelPosition[1]-(0.05*rowNr), label, fontdict = None, size = 'large')
plt.figtext(labelPosition[0]+0.080, labelPosition[1]-(0.05*rowNr), tVal, fontdict = None, size = 'large')
rowNr += 1
labelPosition1 = (0.78, 0.85)
rowNr = 0
for label,tVal in textLabel1.iteritems( ):
if self.errWienPlanck < 1 or rowNr == 0 :
plt.figtext(labelPosition1[0], labelPosition1[1]-(0.05*rowNr), label, fontdict = None, size = 'large')
plt.figtext(labelPosition1[0]+0.080, labelPosition1[1]-(0.05*rowNr), tVal, fontdict = None, size = 'large')
else:
plt.figtext(labelPosition1[0], labelPosition1[1]-(0.05*rowNr), label, fontdict = None, size = 'large')
plt.figtext(labelPosition1[0]+0.080, labelPosition1[1]-(0.05*rowNr), tVal, fontdict = None, size = 'large', color = 'r')
rowNr += 1
if redraw and not self.debug:
plt.draw()
#Needed to make plt appear!
# http://stackoverflow.com/questions/28269157/plotting-in-a-non-blocking-way-with-matplotlib
plt.pause(0.001)
#Draws text label on plot
# txt=plt.text(4500,33,TP)
# txt1=plt.text(4200,33,'T=')
# txt2=plt.text(2000,17,TW)
# txt3=plt.text(1800,17,'T=')
# txt.set_size(15)
# txt1.set_size(15)
# txt2.set_size(15)
# txt3.set_size(15)
# fig.canvas.draw()
def getYMax(self, *data):
maxes = []
for dat in data:
maxes.append(np.amax(dat))
return max(maxes)*1.1
def dispose(self):
plt.close(self.luckyCalcs.label)
|
mtwharmby/lucky
|
Lucky/src_Mike_GUI_Total/Lucky/Calculations.py
|
Python
|
apache-2.0
| 20,095
|
[
"Gaussian"
] |
dab88bda50e83c5f74bd456d13ac943888cfc5e502b8046d549230eaa836ee6b
|
#!/home/elsa/Ureka/variants/common/bin/python
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import cm
from matplotlib.ticker import LinearLocator
from matplotlib.mlab import griddata
import matplotlib.pyplot as plt
import numpy as np
##By Elsa Johnson
##Heat filter algorithm by Sam Gerber (originally written for MATLAB)
##
##following MATLAB functions did not exist and had to be written in python:
##MATLAB function ---> Python function
##fspecial --> fspecialgauss2D
## --from stackoverflow http://stackoverflow.com/questions/17190649/how-to-obtain-a-gaussian-filter-in-python
##fspecial --> fspecialdisk
##Note, routine has many parts designed to just cut and paste into ipython environment.
## Also start ipython with the --pylab flag. Just makes it easier.
##This version take the output put from readpolyphot and then calculates br and puts
## it into the br steps, plots various stages, save to file and then shows the 3d version!
# args:
# file - input file
# res - size of each grid box/data point in units of pixels
# cut - color cut off. for example, anything smaller than the cutoff will be considered blue and plotted as blue and anything
# bigger than cutoff will be plotted red. I try to set this to Sun colors.
# Blim and rlim are the faintest surface brightnesses of each grid and anything fainter in either band will be 'green'
# overlap is in reference to the grids. If yes, that means the grids from polyphot overlap by 1/2 a grid
# outfile is outfile
##Example usage
#Z,Xn,Yn,color=subgrids('m81medreadpoly.txt',res=25,cut=.6, blim=23,rlim=22,overlap='No', outfile = 'unfilteredM81.txt')
#outarr = heatfilter(Z,val=2,outfile='m81filtered.txt')
#Then if you need further filtering you can use outarr in the heatfilter function again
def subgrids(file, res=25, cut=1, blim=23, rlim=22,overlap = 'No',outfile='out.txt'):
# galfile='m101output24nol.txt'
#res=24.
if (overlap == 'Yes'):
area = 4*res*res
elif (overlap == 'No'):
area = res*res
sbfactor = 2.5*np.log10(area)
xx,yy,bm,rm,ber,rer = np.genfromtxt(file,dtype = float, skiprows=1,unpack=True)
Xn=np.divide(np.subtract(xx,min(xx)),res) +1.
Yn=np.divide(np.subtract(yy,min(yy)),res) +1
mX=max(Xn)
mY=max(Yn)
xlen = len(Xn)
ylen = len(Yn)
br=[]
color = []
Z = []
b = bm+sbfactor
r = rm+sbfactor
br=np.subtract(b,r)
#Thinking about the cut off. If red is really red b/c blue is really faint
# Need to change this as per resolution:
# for 30 res (15, this was 23.1 and 22 resp)
for i in range(len(br)):
if br[i]<=cut:
if b[i]>=blim:
color.append('g')
Z.append(0.)
elif b[i]<blim:
color.append('b')
Z.append(abs(round(b[i],1)-blim))
elif br[i]>cut:
if r[i]>=rlim:
color.append('g')
Z.append(0.)
elif r[i]<rlim:
color.append('r')
Z.append(abs(round(r[i],1)-rlim))
#if you want to save to a text file at this point:
np.savetxt(outfile,np.column_stack((Xn,Yn,Z)),fmt=('%5.2f','%5.2f','%10.5f'))
Z = np.array(Z).reshape(mX,mY)
plt.figure()
#Below is for color
#imshow(Z)
#This is for grayscale
plt.imshow(Z,cmap = cm.Greys_r)
return Z,Xn,Yn,color
##Heat filter to get rid of stars
##First all of the special subroutines
import math
import scipy.ndimage as nd
from skimage.morphology import disk
def fspecialgauss2D(shape=(3,3),sigma=0.5):
"""
2D gaussian mask - should give the same result as MATLAB's
fspecial('gaussian',[shape],[sigma])
"""
m,n = [(ss-1.)/2. for ss in shape]
y,x = np.ogrid[-m:m+1,-n:n+1]
h = np.exp( -(x*x + y*y) / (2.*sigma*sigma) )
## The statement below determines the machine
## epsilon - if gaussian is smaller than that
## set to 0.
h[ h < np.finfo(h.dtype).eps*h.max() ] = 0
sumh = h.sum()
## This notation states that it takes the input
## h and then it divides by the sum and returns h
if sumh != 0:
h /= sumh
return h
def fspecialdisk(r=3.0):
"""
This is matlab code translated into python code for fspecialdisk:
"""
cr = math.ceil(r-0.5)
x,y = np.mgrid[-cr:cr+1,-cr:cr+1]
mxxy = np.maximum(abs(x),abs(y))
mnxy = np.minimum(abs(x),abs(y))
m1 = np.zeros((2*r+1,2*r+1))
m1 = (r**2.< (mxxy+.5)**2. +(mnxy-.5)**2.)*(mnxy-0.5)+ \
np.nan_to_num((r**2.>=(mxxy+.5)**2. + \
(mnxy-.5)**2.)*sqrt(r**2.-(mxxy+0.5)**2.))
m2 = (r**2.>(mxxy-.5)**2. +(mnxy+.5)**2.)*(mnxy+0.5)+ \
(r**2.<=(mxxy-.5)**2. +(mnxy+.5)**2.)*np.sqrt(r**2.-(mxxy-0.5)**2.)
sgrid = ((r**2.)*(0.5*(arcsin(m2/r)-arcsin(m1/r))+ \
0.25*(sin(2*arcsin(m2/r))-sin(2*arcsin(m1/r))))- \
(mxxy-0.5)*(m2-m1)+(m1-mnxy+0.5))\
*(logical_or(logical_and((r**2.<(mxxy+.5)**2.+(mnxy+0.5)**2.), \
(r**2.>(mxxy-0.5)**2.+ (mnxy-0.5)**2.)),\
(((mnxy==0)&(mxxy-0.5<r)&(mxxy+.5>=r))))).astype(float)
sgrid =sgrid+((mxxy+.5)**2.+(mnxy+0.5)**2.<r**2.)
sgrid[cr,cr] = min(pi*r**2,pi/2)
if cr>0 and r>cr-0.5 and r**2.< (cr-0.5)**2.+0.25:
m1 =sqrt(r**2-(cr-.5)**2.)
m1n=m1/r
sg0=2*(r**2.*(0.5*arcsin(m1n)+0.25*sin(2*arcsin(m1n)))-m1*(cr-0.5))
sgrid[2*cr,cr]=sg0
sgrid[cr,2*cr]=sg0
sgrid[cr,0]=sg0
sgrid[0,cr]=sg0
sgrid[2*cr-1,cr]=sgrid[2*cr-1,cr]-sg0
sgrid[cr,2*cr-1]=sgrid[cr,2*cr-1]-sg0
sgrid[cr,1]=sgrid[cr,1]-sg0
sgrid[1,cr]=sgrid[1,cr]-sg0
sgrid[cr,cr]=min(sgrid[cr,cr],1)
h=sgrid/sgrid.sum()
return h
def heatfilter(Z,val=.6,outfile='filterout.txt' ):
## Here, image is Z data
im=Z
G=fspecialgauss2D((3,3),0.7)
##Apply gaussian filter
imB = nd.correlate(im,G,mode='reflect')
##Operate laplacian (del2 in matlab)
L=nd.filters.laplace(imB)
##Set all negative 2nd derivatives to 0
#This was commented out for some reason
#L(L<0)=0
##Use new gaussian filter
G=fspecialgauss2D((3,3),1)
##Apply this filter
L=nd.correlate(abs(L),G,mode='reflect')
##Call it a mask
mask=L
#mask[mask<15]=0
mask[mask<val]=0
mask[mask>0] = 1;
#Create a structuring element of a disk;
sel = disk(2)
#Apply structuring element to mask with imdilate
mask = nd.morphology.binary_dilation(mask,sel).astype(mask.dtype)
X=im
X[mask>0]=0
#X=(X/X.max())*255.##for movie
xd,yd = X.shape
#movie=zeros((xd,yd,5001))
G = fspecialdisk(1)
iter = 0
delta =1.
while iter<5001:
Xg = nd.correlate(X,G,mode='reflect')
Xtmp = X
Xtmp[mask>0]=Xg[mask>0]
# Xtmp = (Xtmp/X.max())*255.
delta - sum((X[mask>0]-Xtmp[mask>0])**2.)
# movie[:,:,iter]=X
iter=iter+1
X=Xtmp
#Uncomment this and all other movie references if you want to see your results as a movie
#Takes a lot of memory, so I don't use it.
#movie[:,:,iter:]= []
#movie=movie/255
#Note fspecialgauss2D can be changed: make the arguments larger for bigger grids.
G=fspecialgauss2D((3,3),0.5)
X = nd.correlate(X,G,mode='reflect')
figure()
#for default color image uncomment below
#imshow(X)
# for gray scale
imshow(X,cmap = cm.Greys_r)
#Okay now put this back into a file. This file will need to be translated into 3d printerfile
crap = X.ravel()
# Save to a basic txt file of image with data values:
savetxt(outfile,column_stack((Xn,Yn,crap)),fmt=('%5.2f','%5.2f','%10.5f'))
of = open('threedgal.txt','w')
for i in range(len(crap)):
of.write(str(Xn[i])+" "+str(Yn[i])+" "+str(color[i])+" "+str(round(crap[i],1))+"\n")
fig = figure()
ax = fig.gca(projection='3d')
nx = max(Xn)-min(Xn) +1.
ny = max(Yn) - min(Yn) +1.
xi = np.linspace(min(Xn),max(Xn),nx)
yi = np.linspace(min(Yn),max(Yn),ny)
XX,YY = np.meshgrid(xi,yi)
ZZ = griddata(Xn,Yn,crap,xi,yi)
colors = np.array(color).reshape(max(Yn),max(Xn))
surf = ax.plot_surface(XX, YY, ZZ, rstride=1, cstride=1, facecolors=colors,linewidth=0, antialiased=True)
#This is to view from the top #change as necessary
ax.view_init(azim = 270, elev = 90)
ax.set_zlim3d(0,max(crap))
ax.w_zaxis.set_major_locator(LinearLocator(6))
plt.show()
return X
|
ElsaMJohnson/pythonprograms
|
ThreeDGalaxy/proc_3dgal.py
|
Python
|
mit
| 8,283
|
[
"Gaussian"
] |
be3564772b43a3cf8850d334bb46984102a4db17fbda44e0ccd7507d38675281
|
#!/usr/bin/env python
#
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Utilities for the modular DevTools build.
"""
from os import path
import os
try:
import simplejson as json
except ImportError:
import json
def read_file(filename):
with open(path.normpath(filename), 'rt') as input:
return input.read()
def write_file(filename, content):
if path.exists(filename):
os.remove(filename)
directory = path.dirname(filename)
if not path.exists(directory):
os.makedirs(directory)
with open(filename, 'wt') as output:
output.write(content)
def bail_error(message):
raise Exception(message)
def load_and_parse_json(filename):
try:
return json.loads(read_file(filename))
except:
print 'ERROR: Failed to parse %s' % filename
raise
def concatenate_scripts(file_names, module_dir, output_dir, output):
for file_name in file_names:
output.write('/* %s */\n' % file_name)
file_path = path.join(module_dir, file_name)
if not path.isfile(file_path):
file_path = path.join(output_dir, path.basename(module_dir), file_name)
output.write(read_file(file_path))
output.write(';')
class Descriptors:
def __init__(self, application_dir, application_descriptor, module_descriptors, has_html):
self.application_dir = application_dir
self.application = application_descriptor
self.modules = module_descriptors
self._cached_sorted_modules = None
self.has_html = has_html
def application_json(self):
result = dict()
result['modules'] = self.application.values()
result['has_html'] = self.has_html
return json.dumps(result)
def all_compiled_files(self):
files = {}
for name in self.modules:
module = self.modules[name]
skipped_files = set(module.get('skip_compilation', []))
for script in module.get('scripts', []):
if script not in skipped_files:
files[path.normpath(path.join(self.application_dir, name, script))] = True
return files.keys()
def module_compiled_files(self, name):
files = []
module = self.modules.get(name)
skipped_files = set(module.get('skip_compilation', []))
for script in module.get('scripts', []):
if script not in skipped_files:
files.append(script)
return files
def module_resources(self, name):
return [name + '/' + resource for resource in self.modules[name].get('resources', [])]
def sorted_modules(self):
if self._cached_sorted_modules:
return self._cached_sorted_modules
result = []
unvisited_modules = set(self.modules)
temp_modules = set()
def visit(parent, name):
if name not in unvisited_modules:
return None
if name not in self.modules:
return (parent, name)
if name in temp_modules:
bail_error('Dependency cycle found at module "%s"' % name)
temp_modules.add(name)
deps = self.modules[name].get('dependencies')
if deps:
for dep_name in deps:
bad_dep = visit(name, dep_name)
if bad_dep:
return bad_dep
unvisited_modules.remove(name)
temp_modules.remove(name)
result.append(name)
return None
while len(unvisited_modules):
for next in unvisited_modules:
break
failure = visit(None, next)
if failure:
# failure[0] can never be None
bail_error('Unknown module "%s" encountered in dependencies of "%s"' % (failure[1], failure[0]))
self._cached_sorted_modules = result
return result
def sorted_dependencies_closure(self, module_name):
visited = set()
def sorted_deps_for_module(name):
result = []
desc = self.modules[name]
deps = desc.get('dependencies', [])
for dep in deps:
result += sorted_deps_for_module(dep)
if name not in visited:
result.append(name)
visited.add(name)
return result
return sorted_deps_for_module(module_name)
class DescriptorLoader:
def __init__(self, application_dir):
self.application_dir = application_dir
def load_application(self, application_descriptor_name):
return self.load_applications([application_descriptor_name])
def load_applications(self, application_descriptor_names):
merged_application_descriptor = {}
all_module_descriptors = {}
has_html = False
for application_descriptor_name in application_descriptor_names:
module_descriptors = {}
application_descriptor_filename = path.join(self.application_dir, application_descriptor_name)
descriptor_json = load_and_parse_json(application_descriptor_filename)
application_descriptor = {desc['name']: desc for desc in descriptor_json['modules']}
has_html = descriptor_json['has_html']
for name in application_descriptor:
merged_application_descriptor[name] = application_descriptor[name]
for (module_name, module) in application_descriptor.items():
if module_descriptors.get(module_name):
bail_error('Duplicate definition of module "%s" in %s' % (module_name, application_descriptor_filename))
if not all_module_descriptors.get(module_name):
module_descriptors[module_name] = self._read_module_descriptor(module_name, application_descriptor_filename)
all_module_descriptors[module_name] = module_descriptors[module_name]
for module in module_descriptors.values():
deps = module.get('dependencies', [])
for dep in deps:
if dep not in application_descriptor:
bail_error('Module "%s" (dependency of "%s") not listed in application descriptor %s' % (dep, module['name'], application_descriptor_filename))
return Descriptors(self.application_dir, merged_application_descriptor, all_module_descriptors, has_html)
def _read_module_descriptor(self, module_name, application_descriptor_filename):
json_filename = path.join(self.application_dir, module_name, 'module.json')
if not path.exists(json_filename):
bail_error('Module descriptor %s referenced in %s is missing' % (json_filename, application_descriptor_filename))
module_json = load_and_parse_json(json_filename)
module_json['name'] = module_name
return module_json
|
danakj/chromium
|
third_party/WebKit/Source/devtools/scripts/modular_build.py
|
Python
|
bsd-3-clause
| 7,037
|
[
"VisIt"
] |
0fe78828d015498d55675992fd43ef0cd05f53e74e933c951f11e086903bca15
|
########################################################################
#
# (C) 2015, Brian Coca <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
########################################################################
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import errno
import datetime
import os
import tarfile
import tempfile
import yaml
from distutils.version import LooseVersion
from shutil import rmtree
from ansible.errors import AnsibleError
from ansible.module_utils.urls import open_url
from ansible.playbook.role.requirement import RoleRequirement
from ansible.galaxy.api import GalaxyAPI
try:
from __main__ import display
except ImportError:
from ansible.utils.display import Display
display = Display()
class GalaxyRole(object):
SUPPORTED_SCMS = set(['git', 'hg'])
META_MAIN = os.path.join('meta', 'main.yml')
META_INSTALL = os.path.join('meta', '.galaxy_install_info')
ROLE_DIRS = ('defaults', 'files', 'handlers', 'meta', 'tasks', 'templates', 'vars', 'tests')
def __init__(self, galaxy, name, src=None, version=None, scm=None, path=None):
self._metadata = None
self._install_info = None
self._validate_certs = not galaxy.options.ignore_certs
display.debug('Validate TLS certificates: %s' % self._validate_certs)
self.options = galaxy.options
self.galaxy = galaxy
self.name = name
self.version = version
self.src = src or name
self.scm = scm
if path is not None:
if self.name not in path:
path = os.path.join(path, self.name)
self.path = path
else:
# use the first path by default
self.path = os.path.join(galaxy.roles_paths[0], self.name)
# create list of possible paths
self.paths = [x for x in galaxy.roles_paths]
self.paths = [os.path.join(x, self.name) for x in self.paths]
def __repr__(self):
"""
Returns "rolename (version)" if version is not null
Returns "rolename" otherwise
"""
if self.version:
return "%s (%s)" % (self.name, self.version)
else:
return self.name
def __eq__(self, other):
return self.name == other.name
@property
def metadata(self):
"""
Returns role metadata
"""
if self._metadata is None:
meta_path = os.path.join(self.path, self.META_MAIN)
if os.path.isfile(meta_path):
try:
f = open(meta_path, 'r')
self._metadata = yaml.safe_load(f)
except:
display.vvvvv("Unable to load metadata for %s" % self.name)
return False
finally:
f.close()
return self._metadata
@property
def install_info(self):
"""
Returns role install info
"""
if self._install_info is None:
info_path = os.path.join(self.path, self.META_INSTALL)
if os.path.isfile(info_path):
try:
f = open(info_path, 'r')
self._install_info = yaml.safe_load(f)
except:
display.vvvvv("Unable to load Galaxy install info for %s" % self.name)
return False
finally:
f.close()
return self._install_info
def _write_galaxy_install_info(self):
"""
Writes a YAML-formatted file to the role's meta/ directory
(named .galaxy_install_info) which contains some information
we can use later for commands like 'list' and 'info'.
"""
info = dict(
version=self.version,
install_date=datetime.datetime.utcnow().strftime("%c"),
)
if not os.path.exists(os.path.join(self.path, 'meta')):
os.makedirs(os.path.join(self.path, 'meta'))
info_path = os.path.join(self.path, self.META_INSTALL)
with open(info_path, 'w+') as f:
try:
self._install_info = yaml.safe_dump(info, f)
except:
return False
return True
def remove(self):
"""
Removes the specified role from the roles path.
There is a sanity check to make sure there's a meta/main.yml file at this
path so the user doesn't blow away random directories.
"""
if self.metadata:
try:
rmtree(self.path)
return True
except:
pass
return False
def fetch(self, role_data):
"""
Downloads the archived role from github to a temp location
"""
if role_data:
# first grab the file and save it to a temp location
if "github_user" in role_data and "github_repo" in role_data:
archive_url = 'https://github.com/%s/%s/archive/%s.tar.gz' % (role_data["github_user"], role_data["github_repo"], self.version)
else:
archive_url = self.src
display.display("- downloading role from %s" % archive_url)
try:
url_file = open_url(archive_url, validate_certs=self._validate_certs)
temp_file = tempfile.NamedTemporaryFile(delete=False)
data = url_file.read()
while data:
temp_file.write(data)
data = url_file.read()
temp_file.close()
return temp_file.name
except Exception as e:
display.error("failed to download the file: %s" % str(e))
return False
def install(self):
if self.scm:
# create tar file from scm url
tmp_file = RoleRequirement.scm_archive_role(keep_scm_meta=self.options.keep_scm_meta, **self.spec)
elif self.src:
if os.path.isfile(self.src):
tmp_file = self.src
elif '://' in self.src:
role_data = self.src
tmp_file = self.fetch(role_data)
else:
api = GalaxyAPI(self.galaxy)
role_data = api.lookup_role_by_name(self.src)
if not role_data:
raise AnsibleError("- sorry, %s was not found on %s." % (self.src, api.api_server))
if role_data.get('role_type') == 'APP':
# Container Role
display.warning("%s is a Container App role, and should only be installed using Ansible "
"Container" % self.name)
role_versions = api.fetch_role_related('versions', role_data['id'])
if not self.version:
# convert the version names to LooseVersion objects
# and sort them to get the latest version. If there
# are no versions in the list, we'll grab the head
# of the master branch
if len(role_versions) > 0:
loose_versions = [LooseVersion(a.get('name', None)) for a in role_versions]
try:
loose_versions.sort()
except TypeError:
raise AnsibleError(
'Unable to compare role versions (%s) to determine the most recent version due to incompatible version formats. '
'Please contact the role author to resolve versioning conflicts, or specify an explicit role version to '
'install.' % ', '.join([v.vstring for v in loose_versions])
)
self.version = str(loose_versions[-1])
elif role_data.get('github_branch', None):
self.version = role_data['github_branch']
else:
self.version = 'master'
elif self.version != 'master':
if role_versions and str(self.version) not in [a.get('name', None) for a in role_versions]:
raise AnsibleError("- the specified version (%s) of %s was not found in the list of available versions (%s)." % (self.version,
self.name,
role_versions))
tmp_file = self.fetch(role_data)
else:
raise AnsibleError("No valid role data found")
if tmp_file:
display.debug("installing from %s" % tmp_file)
if not tarfile.is_tarfile(tmp_file):
raise AnsibleError("the file downloaded was not a tar.gz")
else:
if tmp_file.endswith('.gz'):
role_tar_file = tarfile.open(tmp_file, "r:gz")
else:
role_tar_file = tarfile.open(tmp_file, "r")
# verify the role's meta file
meta_file = None
members = role_tar_file.getmembers()
# next find the metadata file
for member in members:
if self.META_MAIN in member.name:
# Look for parent of meta/main.yml
# Due to possibility of sub roles each containing meta/main.yml
# look for shortest length parent
meta_parent_dir = os.path.dirname(os.path.dirname(member.name))
if not meta_file:
archive_parent_dir = meta_parent_dir
meta_file = member
else:
if len(meta_parent_dir) < len(archive_parent_dir):
archive_parent_dir = meta_parent_dir
meta_file = member
if not meta_file:
raise AnsibleError("this role does not appear to have a meta/main.yml file.")
else:
try:
self._metadata = yaml.safe_load(role_tar_file.extractfile(meta_file))
except:
raise AnsibleError("this role does not appear to have a valid meta/main.yml file.")
# we strip off any higher-level directories for all of the files contained within
# the tar file here. The default is 'github_repo-target'. Gerrit instances, on the other
# hand, does not have a parent directory at all.
installed = False
while not installed:
display.display("- extracting %s to %s" % (self.name, self.path))
try:
if os.path.exists(self.path):
if not os.path.isdir(self.path):
raise AnsibleError("the specified roles path exists and is not a directory.")
elif not getattr(self.options, "force", False):
raise AnsibleError("the specified role %s appears to already exist. Use --force to replace it." % self.name)
else:
# using --force, remove the old path
if not self.remove():
raise AnsibleError("%s doesn't appear to contain a role.\n please remove this directory manually if you really "
"want to put the role here." % self.path)
else:
os.makedirs(self.path)
# now we do the actual extraction to the path
for member in members:
# we only extract files, and remove any relative path
# bits that might be in the file for security purposes
# and drop any containing directory, as mentioned above
if member.isreg() or member.issym():
parts = member.name.replace(archive_parent_dir, "", 1).split(os.sep)
final_parts = []
for part in parts:
if part != '..' and '~' not in part and '$' not in part:
final_parts.append(part)
member.name = os.path.join(*final_parts)
role_tar_file.extract(member, self.path)
# write out the install info file for later use
self._write_galaxy_install_info()
installed = True
except OSError as e:
error = True
if e.errno == errno.EACCES and len(self.paths) > 1:
current = self.paths.index(self.path)
if len(self.paths) > current:
self.path = self.paths[current + 1]
error = False
if error:
raise AnsibleError("Could not update files in %s: %s" % (self.path, str(e)))
# return the parsed yaml metadata
display.display("- %s was installed successfully" % str(self))
if not (self.src and os.path.isfile(self.src)):
try:
os.unlink(tmp_file)
except (OSError, IOError) as e:
display.warning("Unable to remove tmp file (%s): %s" % (tmp_file, str(e)))
return True
return False
@property
def spec(self):
"""
Returns role spec info
{
'scm': 'git',
'src': 'http://git.example.com/repos/repo.git',
'version': 'v1.0',
'name': 'repo'
}
"""
return dict(scm=self.scm, src=self.src, version=self.version, name=self.name)
|
hkariti/ansible
|
lib/ansible/galaxy/role.py
|
Python
|
gpl-3.0
| 15,068
|
[
"Brian",
"Galaxy"
] |
4521e8243ecaacefb820eccb0a007288f16a41a57bd00cafc5b2dd20c706065d
|
# Copyright (C) 2019 The ESPResSo project
#
# This file is part of ESPResSo.
#
# ESPResSo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import unittest as ut
import importlib_wrapper
import numpy as np
import scipy.optimize
tutorial, skipIfMissingFeatures = importlib_wrapper.configure_and_import(
"@TUTORIALS_DIR@/lattice_boltzmann/lattice_boltzmann_part2.py")
@skipIfMissingFeatures
class Tutorial(ut.TestCase):
system = tutorial.system
def test_ballistic_regime(self):
for (tau_p, tau, msd) in zip(tutorial.tau_p_values,
tutorial.tau_results,
tutorial.msd_results):
popt, _ = scipy.optimize.curve_fit(
tutorial.quadratic, tau[:tau_p], msd[:tau_p])
residuals = msd[:tau_p] - tutorial.quadratic(tau[:tau_p], *popt)
np.testing.assert_allclose(residuals, 0, rtol=0, atol=1e-3)
def test_diffusion_coefficient(self):
D_val = tutorial.diffusion_results
D_ref = tutorial.KT / np.array(tutorial.gammas)
np.testing.assert_allclose(D_val, D_ref, rtol=0, atol=0.1)
if __name__ == "__main__":
ut.main()
|
fweik/espresso
|
testsuite/scripts/tutorials/test_lattice_boltzmann_part2.py
|
Python
|
gpl-3.0
| 1,748
|
[
"ESPResSo"
] |
4afeff5540f88d8964e32ce0ca81344ceb1bafe647709258943c1ec021ecb993
|
#!/usr/bin/env python
# coding: utf-8
"""Tools for 3D displays with VTK."""
# Copyright (C) 2017-2019 Matthieu Ancellin
# See LICENSE file at <https://github.com/mancellin/capytaine>
from typing import Union
from capytaine.meshes.meshes import Mesh
from capytaine.meshes.collections import CollectionOfMeshes
from capytaine.tools.optional_imports import import_optional_dependency
vtk = import_optional_dependency("vtk")
def compute_vtk_polydata(mesh: Union[Mesh, CollectionOfMeshes]):
"""Transform a mesh into vtkPolydata."""
# Create a vtkPoints object and store the points in it
points = vtk.vtkPoints()
for point in mesh.vertices:
points.InsertNextPoint(point)
# Create a vtkCellArray to store faces
faces = vtk.vtkCellArray()
for face_ids in mesh.faces:
if face_ids[0] == face_ids[-1]:
# Triangle
curface = face_ids[:3]
vtk_face = vtk.vtkTriangle()
else:
# Quadrangle
curface = face_ids[:4]
vtk_face = vtk.vtkQuad()
for idx, id in enumerate(curface):
vtk_face.GetPointIds().SetId(idx, id)
faces.InsertNextCell(vtk_face)
vtk_polydata = vtk.vtkPolyData()
vtk_polydata.SetPoints(points)
vtk_polydata.SetPolys(faces)
return vtk_polydata
def compute_node_data(mesh: Union[Mesh, CollectionOfMeshes],
face_data):
"""Transform data defined at the center of the faces to data defined at the nodes of the mesh
by a simple averaging of the values of the neighboring faces.
Parameters
----------
mesh: Mesh or CollectionOfMeshes
the mesh on which the face face_data are defined
face_data: numpy array of shape (mesh.nb_faces, ...)
the data defined on the center of the faces of the mesh
Returns
-------
node_data: numpy array of shape (mesh.nb_vertices, ...)
the same data averaged on the nodes
"""
import numpy as np
mesh = mesh.merged()
assert face_data.shape[0] == mesh.nb_faces
# Initialize output array
node_data_shape = (mesh.vertices.shape[0], ) + face_data.shape[1:]
node_data = np.zeros(node_data_shape, dtype=complex)
# Keep track of the number of faces near each vertex
faces_near_nodes_shape = (mesh.vertices.shape[0], ) + (1, ) * len(face_data.shape[1:])
nb_faces_near_nodes = np.zeros(faces_near_nodes_shape, dtype=np.int8)
for i, vertices in enumerate(mesh.faces):
for vertex in vertices:
nb_faces_near_nodes[vertex] += 1
node_data[vertex, ...] += face_data[i, ...]
node_data /= nb_faces_near_nodes
return node_data
|
mancellin/capytaine
|
capytaine/ui/vtk/helpers.py
|
Python
|
gpl-3.0
| 2,679
|
[
"VTK"
] |
3630282932026581d7d103f650a6819fc9db0f3eed590947668be5a8daac992a
|
#!/usr/bin/env python
# -*- coding: utf8 -*-
# *****************************************************************
# ** PTS -- Python Toolkit for working with SKIRT **
# ** © Astronomical Observatory, Ghent University **
# *****************************************************************
## \package pts.eagle.plotgreybodyfit Plot a modified blackbody fit to
# the dust continuum emission for an EAGLE SKIRT-run.
#
# The facilities in this module serve to plot a modified blackbody fit to
# the dust continuum emission for a particular EAGLE SKIRT-run.
# ----------------------------------------------------------------------
# use a non-interactive back-end to generate high-quality vector graphics
import matplotlib.pyplot as plt
# import standard modules
import os.path
import numpy as np
# import pts modules
from ..core.tools import archive as arch
from ..core.basics.filter import Filter
from ..core.basics.greybody import GreyBody, kappa350_Cortese
# ----------------------------------------------------------------------
## This function creates a PDF plot of a modified blackbody fit to
# the dust continuum emission for a particular EAGLE SKIRT-run,
# also listing the corresponding temperature.
# The output plot is placed in the SKIRT-run's visualization directory.
def plotgreybodyfit(skirtrun):
simulation = skirtrun.simulation()
# setup the figure
figure = plt.figure(figsize=(10,6))
plt.xscale('log')
plt.yscale('log')
# load and plot the total SED
filepath = simulation.seddatpaths()[0]
lambdav, fluxv = np.loadtxt(arch.opentext(filepath), usecols=(0,1), unpack=True)
lambdav = simulation.convert(lambdav, to_unit='micron', quantity='wavelength')
fluxv = simulation.convert(fluxv, to_unit='Jy', quantity='fluxdensity', wavelength=lambdav)
plot = (lambdav>=10) & (lambdav<=1000)
plt.plot(lambdav[plot], fluxv[plot], color='b', label="SKIRT galaxy SED")
# load and plot the contributions from HII particles (stellar emission) and gas particles (dust emission)
# --> we do this inside a try block because these columns are not always available
try:
fstrdirv, fstrscav, ftotdusv = np.loadtxt(arch.opentext(filepath), usecols=(2,3,4), unpack=True)
fstrdirv = simulation.convert(fstrdirv, to_unit='Jy', quantity='fluxdensity', wavelength=lambdav)
fstrscav = simulation.convert(fstrscav, to_unit='Jy', quantity='fluxdensity', wavelength=lambdav)
ftotdusv = simulation.convert(ftotdusv, to_unit='Jy', quantity='fluxdensity', wavelength=lambdav)
plt.plot(lambdav[plot], fstrdirv[plot]+fstrscav[plot], color='c', ls="dashed", label=" contribution from HII regions")
plt.plot(lambdav[plot], ftotdusv[plot], color='y', ls="dashed", label=" contribution from other dust")
except:
pass
# load and plot the Herschel continuum data points (160, 250, 350, 500 micron)
info = { }
infofile = arch.listdir(skirtrun.vispath(), "_info.txt")[0]
for line in arch.opentext(os.path.join(skirtrun.vispath(),infofile)):
if not line.startswith("#"):
key,dummy,value = line.split(None, 2)
info[key] = float(value)
waves = np.array( [ Filter(fs).pivotwavelength() for fs in ("Pacs.red","SPIRE.PSW","SPIRE.PMW","SPIRE.PLW")] )
fluxes = np.array(( info['instr_xy_fluxdensity_pacs_red_continuum'],
info['instr_xy_fluxdensity_spire_psw_continuum'],
info['instr_xy_fluxdensity_spire_pmw_continuum'],
info['instr_xy_fluxdensity_spire_plw_continuum'] ))
sigmas = np.array(( 3,1,1,3 )) # pacs is less sensitive; longer wavelength fluxes are harder to measure
plt.scatter(waves, fluxes, color='r', marker='*', label="Mock PACS/SPIRE fluxes")
# fit a grey body to the Herschel fluxes and plot the result
greybody = GreyBody(simulation.instrumentdistance(), 2, kappa350_Cortese)
T,M = greybody.fit(waves, fluxes, sigmas)
plt.plot(lambdav[plot], greybody(lambdav[plot], T, M), color='m',
label=r"Grey body fit $T={:.2f},\,M_\mathrm{{dust}}={:.2e}\,M_\odot$".format(T,M))
# add axis labels, legend and title
plt.grid('on')
plt.xlabel(r"$\lambda\,(\mu \mathrm{m})$", fontsize='medium')
plt.ylabel(simulation.fluxlabel(), fontsize='medium')
plt.xlim(10, 1000)
ymax = fluxv[plot].max()
plt.ylim(ymax*1.1e-3, ymax*1.1)
plt.legend(loc='upper left', prop={'size':'small'})
plt.title("runid {} -- {}".format(skirtrun.runid(), skirtrun.prefix()), fontsize='medium')
# save the figure
plotpath = os.path.join(skirtrun.vispath(), skirtrun.prefix()+"_dust_body_fit.pdf")
plt.savefig(plotpath, bbox_inches='tight', pad_inches=0.25)
plt.close()
print "Created PDF plot file " + plotpath
# ----------------------------------------------------------------------
|
Stargrazer82301/CAAPR
|
CAAPR/CAAPR_AstroMagic/PTS/pts/eagle/plotgreybodyfit.py
|
Python
|
mit
| 4,909
|
[
"Galaxy"
] |
4f560531b6b2b988ec67e1d3d176a758e8a4240b2f87ead0300f1d79316fc60c
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import os
import re
from HTMLParser import HTMLParseError
from time import time
from urlparse import urlparse
import requests
from bs4 import BeautifulSoup
from app import logger
from http_cache import http_get
from http_cache import is_response_too_large
from oa_local import find_normalized_license
from open_location import OpenLocation
from util import NoDoiException
from util import elapsed
from util import get_link_target
from util import get_tree
from util import is_same_publisher
DEBUG_SCRAPING = os.getenv('DEBUG_SCRAPING', False)
# it matters this is just using the header, because we call it even if the content
# is too large. if we start looking in content, need to break the pieces apart.
def is_pdf_from_header(response):
looks_good = False
for k, v in response.headers.iteritems():
if v:
key = k.lower()
val = v.lower()
if key == "content-type" and "application/pdf" in val:
looks_good = True
if key == 'content-disposition' and "pdf" in val:
looks_good = True
try:
if key == 'content-length' and int(val) < 128:
looks_good = False
break
except ValueError:
logger.error(u'got a nonnumeric content-length header: {}'.format(val))
looks_good = False
break
return looks_good
def is_a_pdf_page(response, page_publisher):
if is_pdf_from_header(response):
if DEBUG_SCRAPING:
logger.info(u"http header says this is a PDF {}".format(
response.request.url)
)
return True
# everything below here needs to look at the content
# so bail here if the page is too big
if is_response_too_large(response):
if DEBUG_SCRAPING:
logger.info(u"response is too big for more checks in is_a_pdf_page")
return False
content = response.content_big()
# PDFs start with this character
if re.match(u"%PDF", content):
return True
if page_publisher:
says_free_publisher_patterns = [
("Wiley-Blackwell", u'<span class="freeAccess" title="You have free access to this content">'),
("Wiley-Blackwell", u'<iframe id="pdfDocument"'),
("JSTOR", ur'<li class="download-pdf-button">.*Download PDF.*</li>'),
("Institute of Electrical and Electronics Engineers (IEEE)",
ur'<frame src="http://ieeexplore.ieee.org/.*?pdf.*?</frameset>'),
("IOP Publishing", ur'Full Refereed Journal Article')
]
for (publisher, pattern) in says_free_publisher_patterns:
matches = re.findall(pattern, content, re.IGNORECASE | re.DOTALL)
if is_same_publisher(page_publisher, publisher) and matches:
return True
return False
def is_a_word_doc_from_header(response):
looks_good = False
for k, v in response.headers.iteritems():
if v:
key = k.lower()
val = v.lower()
if key == "content-type" and (
"application/msword" in val or
"application/doc" in val or
"application/vnd.openxmlformats-officedocument.wordprocessingml.document" in val
):
looks_good = True
try:
if key == 'content-length' and int(val) < 512:
looks_good = False
break
except ValueError:
logger.error(u'got a nonnumeric content-length header: {}'.format(val))
looks_good = False
break
return looks_good
def is_a_word_doc(response):
if is_a_word_doc_from_header(response):
if DEBUG_SCRAPING:
logger.info(u"http header says this is a word doc {}".format(response.request.url))
return True
# everything below here needs to look at the content
# so bail here if the page is too big
if is_response_too_large(response):
if DEBUG_SCRAPING:
logger.info(u"response is too big for more checks in is_a_word_doc")
return False
content = response.content_big()
# docx
if content[-22:].startswith('PK'):
return True
# doc
if content.startswith('\xD0\xCF\x11\xE0\xA1\xB1\x1A\xE1'):
return True
return False
class Webpage(object):
def __init__(self, **kwargs):
self.url = None
self.scraped_pdf_url = None
self.scraped_open_metadata_url = None
self.scraped_license = None
self.error = ""
self.related_pub_doi = None
self.related_pub_publisher = None
self.match_type = None
self.session_id = None
self.endpoint_id = None
self.base_id = None
self.base_doc = None
self.resolved_url = None
self.r = None
for (k, v) in kwargs.iteritems():
self.__setattr__(k, v)
if not self.url:
self.url = u"http://doi.org/{}".format(self.doi)
# from https://stackoverflow.com/a/865272/596939
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
pass
@property
def doi(self):
return self.related_pub_doi
# sometimes overriden, for publisherwebpage
@property
def ask_slowly(self):
return False
@property
def publisher(self):
return self.related_pub_publisher
def is_same_publisher(self, publisher):
return is_same_publisher(self.related_pub_publisher, publisher)
@property
def fulltext_url(self):
if self.scraped_pdf_url:
return self.scraped_pdf_url
if self.scraped_open_metadata_url:
return self.scraped_open_metadata_url
if self.is_open:
return self.url
return None
@property
def has_fulltext_url(self):
if self.scraped_pdf_url or self.scraped_open_metadata_url:
return True
return False
@property
def is_open(self):
# just having the license isn't good enough
if self.scraped_pdf_url or self.scraped_open_metadata_url:
return True
return False
def mint_open_location(self):
my_location = OpenLocation()
my_location.pdf_url = self.scraped_pdf_url
my_location.metadata_url = self.scraped_open_metadata_url
my_location.license = self.scraped_license
my_location.doi = self.related_pub_doi
my_location.evidence = self.open_version_source_string
my_location.match_type = self.match_type
my_location.pmh_id = self.base_id
my_location.endpoint_id = self.endpoint_id
my_location.base_doc = self.base_doc
my_location.error = ""
if self.is_open and not my_location.best_url:
my_location.metadata_url = self.url
return my_location
def set_r_for_pdf(self):
self.r = None
try:
self.r = http_get(url=self.scraped_pdf_url, stream=False, publisher=self.publisher, session_id=self.session_id, ask_slowly=self.ask_slowly)
except requests.exceptions.ConnectionError as e:
self.error += u"ERROR: connection error on {} in set_r_for_pdf: {}".format(self.scraped_pdf_url, unicode(e.message).encode("utf-8"))
logger.info(self.error)
except requests.Timeout as e:
self.error += u"ERROR: timeout error on {} in set_r_for_pdf: {}".format(self.scraped_pdf_url, unicode(e.message).encode("utf-8"))
logger.info(self.error)
except requests.exceptions.InvalidSchema as e:
self.error += u"ERROR: InvalidSchema error on {} in set_r_for_pdf: {}".format(self.scraped_pdf_url, unicode(e.message).encode("utf-8"))
logger.info(self.error)
except requests.exceptions.RequestException as e:
self.error += u"ERROR: RequestException in set_r_for_pdf"
logger.info(self.error)
except requests.exceptions.ChunkedEncodingError as e:
self.error += u"ERROR: ChunkedEncodingError error on {} in set_r_for_pdf: {}".format(self.scraped_pdf_url, unicode(e.message).encode("utf-8"))
logger.info(self.error)
except NoDoiException as e:
self.error += u"ERROR: NoDoiException error on {} in set_r_for_pdf: {}".format(self.scraped_pdf_url, unicode(e.message).encode("utf-8"))
logger.info(self.error)
except Exception as e:
self.error += u"ERROR: Exception error in set_r_for_pdf"
logger.exception(self.error)
def is_a_pdf_page(self):
return is_a_pdf_page(self.r, self.publisher)
def gets_a_pdf(self, link, base_url):
if is_purchase_link(link):
return False
absolute_url = get_link_target(link.href, base_url)
if DEBUG_SCRAPING:
logger.info(u"checking to see if {} is a pdf".format(absolute_url))
start = time()
try:
self.r = http_get(absolute_url, stream=True, publisher=self.publisher, session_id=self.session_id, ask_slowly=self.ask_slowly)
if self.r.status_code != 200:
if self.r.status_code in [401]:
# is unauthorized, so not open
pass
else:
self.error += u"ERROR: status_code={} on {} in gets_a_pdf".format(self.r.status_code, absolute_url)
return False
if self.is_a_pdf_page():
return True
except requests.exceptions.ConnectionError as e:
self.error += u"ERROR: connection error in gets_a_pdf for {}: {}".format(absolute_url, unicode(e.message).encode("utf-8"))
logger.info(self.error)
except requests.Timeout as e:
self.error += u"ERROR: timeout error in gets_a_pdf for {}: {}".format(absolute_url, unicode(e.message).encode("utf-8"))
logger.info(self.error)
except requests.exceptions.InvalidSchema as e:
self.error += u"ERROR: InvalidSchema error in gets_a_pdf for {}: {}".format(absolute_url, unicode(e.message).encode("utf-8"))
logger.info(self.error)
except requests.exceptions.RequestException as e:
self.error += u"ERROR: RequestException error in gets_a_pdf"
logger.info(self.error)
except requests.exceptions.ChunkedEncodingError as e:
self.error += u"ERROR: ChunkedEncodingError error in gets_a_pdf for {}: {}".format(absolute_url, unicode(e.message).encode("utf-8"))
logger.info(self.error)
except NoDoiException as e:
self.error += u"ERROR: NoDoiException error in gets_a_pdf for {}: {}".format(absolute_url, unicode(e.message).encode("utf-8"))
logger.info(self.error)
except Exception as e:
self.error += u"ERROR: Exception error in gets_a_pdf"
logger.exception(self.error)
if DEBUG_SCRAPING:
logger.info(u"we've decided this ain't a PDF. took {} seconds [{}]".format(
elapsed(start), absolute_url))
return False
def gets_a_word_doc(self, link, base_url):
if is_purchase_link(link):
return False
absolute_url = get_link_target(link.href, base_url)
if DEBUG_SCRAPING:
logger.info(u"checking to see if {} is a word doc".format(absolute_url))
start = time()
try:
r = http_get(absolute_url, stream=True, publisher=self.publisher, session_id=self.session_id, ask_slowly=self.ask_slowly)
if r.status_code != 200:
return False
if is_a_word_doc(r):
return True
except Exception as e:
logger.exception(u'error in gets_a_word_doc: {}'.format(e))
return False
def is_known_bad_link(self, link):
if re.search(ur'^https?://repositorio\.uchile\.cl/handle', self.url):
# these are abstracts
return re.search(ur'item_\d+\.pdf', link.href or u'')
if re.search(ur'^https?://dial\.uclouvain\.be', self.r.url):
# disclaimer parameter is an unstable key
return re.search(ur'downloader\.php\?.*disclaimer=', link.href or u'')
if re.search(ur'^https?://(?:www)?\.goodfellowpublishers\.com', self.r.url):
return re.search(ur'free_files/', link.href or u'', re.IGNORECASE)
if re.search(ur'^https?://(?:www)?\.intellectbooks\.com', self.r.url):
return re.search(ur'_nfc', link.href or u'', re.IGNORECASE)
if re.search(ur'^https?://philpapers.org/rec/FISBAI', self.r.url):
return link.href and link.href.endswith(u'FISBAI.pdf')
bad_meta_pdf_links = [
ur'^https?://cora\.ucc\.ie/bitstream/', # https://cora.ucc.ie/handle/10468/3838
ur'^https?://zefq-journal\.com/', # https://zefq-journal.com/article/S1865-9217(09)00200-1/pdf
ur'^https?://www\.nowpublishers\.com/', # https://www.nowpublishers.com/article/Details/ENT-062
]
if link.anchor == '<meta citation_pdf_url>':
for url_pattern in bad_meta_pdf_links:
if re.search(url_pattern, link.href or u''):
return True
bad_meta_pdf_sites = [
# https://researchonline.federation.edu.au/vital/access/manager/Repository/vital:11142
ur'^https?://researchonline\.federation\.edu\.au/vital/access/manager/Repository/',
ur'^https?://www.dora.lib4ri.ch/[^/]*/islandora/object/',
ur'^https?://ifs\.org\.uk/publications/', # https://ifs.org.uk/publications/14795
]
if link.anchor == '<meta citation_pdf_url>':
for url_pattern in bad_meta_pdf_sites:
if re.search(url_pattern, self.r.url or u''):
return True
return False
def filter_link(self, link):
return None if not link or self.is_known_bad_link(link) else link
def find_pdf_link(self, page):
if DEBUG_SCRAPING:
logger.info(u"in find_pdf_link in {}".format(self.url))
# before looking in links, look in meta for the pdf link
# = open journal http://onlinelibrary.wiley.com/doi/10.1111/j.1461-0248.2011.01645.x/abstract
# = open journal http://doi.org/10.1002/meet.2011.14504801327
# = open repo http://hdl.handle.net/10088/17542
# = open http://handle.unsw.edu.au/1959.4/unsworks_38708 cc-by
# logger.info(page)
links = [get_pdf_in_meta(page)] + [get_pdf_from_javascript(page)] + get_useful_links(page)
for link in [x for x in links if x is not None]:
if DEBUG_SCRAPING:
logger.info(u"trying {}, {} in find_pdf_link".format(link.href, link.anchor))
if self.is_known_bad_link(link):
continue
# there are some links that are SURELY NOT the pdf for this article
if has_bad_anchor_word(link.anchor):
continue
# there are some links that are SURELY NOT the pdf for this article
if has_bad_href_word(link.href):
continue
# don't include links with newlines
if link.href and u"\n" in link.href:
continue
if link.href.startswith(u'#'):
continue
# download link ANCHOR text is something like "manuscript.pdf" or like "PDF (1 MB)"
# = open repo http://hdl.handle.net/1893/372
# = open repo https://research-repository.st-andrews.ac.uk/handle/10023/7421
# = open repo http://dro.dur.ac.uk/1241/
if link.anchor and "pdf" in link.anchor.lower():
return link
# button says download
# = open repo https://works.bepress.com/ethan_white/45/
# = open repo http://ro.uow.edu.au/aiimpapers/269/
# = open repo http://eprints.whiterose.ac.uk/77866/
if "download" in link.anchor:
if "citation" in link.anchor:
pass
else:
return link
# want it to match for this one https://doi.org/10.2298/SGS0603181L
# but not this one: 10.1097/00003643-201406001-00238
if self.publisher and not self.is_same_publisher("Ovid Technologies (Wolters Kluwer Health)"):
if link.anchor and "full text" in link.anchor.lower():
return link
# download link is identified with an image
for img in link.findall(".//img"):
try:
if "pdf" in img.attrib["src"].lower() or "pdf" in img.attrib["class"].lower():
return link
except KeyError:
pass
try:
if "pdf" in link.attrib["title"].lower():
return link
if "download/pdf" in link.href:
return link
except KeyError:
pass
anchor = link.anchor or ''
href = link.href or ''
version_labels = ['submitted version', 'accepted version', 'published version']
if anchor.lower() in version_labels and href.lower().endswith('.pdf'):
return link
return None
def __repr__(self):
return u"<{} ({}) {}>".format(self.__class__.__name__, self.url, self.is_open)
class PublisherWebpage(Webpage):
open_version_source_string = u"publisher landing page"
@property
def ask_slowly(self):
return True
@staticmethod
def use_resolved_landing_url(resolved_url):
resolved_hostname = urlparse(resolved_url).hostname
return resolved_hostname and resolved_hostname.endswith('journals.lww.com')
def is_known_bad_link(self, link):
if super(PublisherWebpage, self).is_known_bad_link(link):
return True
if re.search(ur'^https?://www.reabic.net/journals/bir/', self.r.url):
# doi.org urls go to issue page with links for all articles, e.g. https://doi.org/10.3391/bir.2019.8.1.08
return True
if re.search(ur'^https?://nnw.cz', self.r.url):
# doi.org urls go to issue page with links for all articles, e.g. http://nnw.cz/obsahy15.html#25.033
return True
return False
def _trust_pdf_landing_pages(self):
if is_same_publisher(self.publisher, 'Oxford University Press (OUP)'):
return False
return True
def scrape_for_fulltext_link(self, find_pdf_link=True):
landing_url = self.url
if DEBUG_SCRAPING:
logger.info(u"checking to see if {} says it is open".format(landing_url))
start = time()
try:
self.r = http_get(landing_url, stream=True, publisher=self.publisher, session_id=self.session_id, ask_slowly=self.ask_slowly)
self.resolved_url = self.r.url
resolved_host = urlparse(self.resolved_url).hostname or u''
metadata_url = self.resolved_url if self.use_resolved_landing_url(self.resolved_url) else landing_url
if self.r.status_code != 200:
if self.r.status_code in [401]:
# is unauthorized, so not open
pass
else:
self.error += u"ERROR: status_code={} on {} in scrape_for_fulltext_link, skipping.".format(self.r.status_code, self.r.url)
logger.info(u"DIDN'T GET THE PAGE: {}".format(self.error))
# logger.debug(self.r.request.headers)
return
# example 10.1007/978-3-642-01445-1
if u"crossref.org/_deleted-doi/" in self.resolved_url:
logger.info(u"this is a deleted doi")
return
# if our landing_url redirects to a pdf, we're done.
# = open repo http://hdl.handle.net/2060/20140010374
if self.is_a_pdf_page():
if self._trust_pdf_landing_pages():
if DEBUG_SCRAPING:
logger.info(u"this is a PDF. success! [{}]".format(landing_url))
self.scraped_pdf_url = landing_url
self.open_version_source_string = "open (via free pdf)"
elif DEBUG_SCRAPING:
logger.info(u"landing page is an untrustworthy PDF {}".format(landing_url))
# don't bother looking for open access lingo because it is a PDF (or PDF wannabe)
return
else:
if DEBUG_SCRAPING:
logger.info(u"landing page is not a PDF for {}. continuing more checks".format(landing_url))
# get the HTML tree
page = self.r.content_small()
# get IEEE PDF from script. we might need it later.
ieee_pdf = resolved_host.endswith(u'ieeexplore.ieee.org') and re.search(ur'"pdfPath":\s*"(/ielx?7/[\d/]*\.pdf)"', page)
try:
soup = BeautifulSoup(page, 'html.parser')
[script.extract() for script in soup('script')]
[div.extract() for div in soup.find_all("div", {'class': 'table-of-content'})]
if self.is_same_publisher('Wiley'):
[div.extract() for div in soup.find_all('div', {'class': 'hubpage-menu'})]
page = str(soup)
except HTMLParseError as e:
logger.error(u'error parsing html, skipped script removal: {}'.format(e))
# Look for a pdf link. If we find one, look for a license.
pdf_download_link = self.find_pdf_link(page) if find_pdf_link else None
# if we haven't found a pdf yet, try known patterns
if pdf_download_link is None:
if ieee_pdf:
pdf_download_link = DuckLink(ieee_pdf.group(1).replace('iel7', 'ielx7'), 'download')
if pdf_download_link is not None:
pdf_url = get_link_target(pdf_download_link.href, self.r.url)
if self.gets_a_pdf(pdf_download_link, self.r.url):
self.scraped_pdf_url = pdf_url
self.scraped_open_metadata_url = metadata_url
self.open_version_source_string = "open (via free pdf)"
# set the license if we can find one
scraped_license = _trust_publisher_license(self.resolved_url) and find_normalized_license(page)
if scraped_license:
self.scraped_license = scraped_license
# Look for patterns that indicate availability but not necessarily openness and make this a bronze location.
bronze_url_snippet_patterns = [
('sciencedirect.com/', u'<div class="OpenAccessLabel">open archive</div>'),
('onlinelibrary.wiley.com', u'<div[^>]*class="doi-access"[^>]*>Free Access</div>'),
('openedition.org', ur'<span[^>]*id="img-freemium"[^>]*></span>'),
('openedition.org', ur'<span[^>]*id="img-openaccess"[^>]*></span>'),
# landing page html is invalid: <span class="accesstext"></span>Free</span>
('microbiologyresearch.org', ur'<span class="accesstext">(?:</span>)?Free'),
('journals.lww.com', ur'<li[^>]*id="[^"]*-article-indicators-free"[^>]*>'),
('ashpublications.org', ur'<i[^>]*class="[^"]*icon-availability_free'),
]
for (url_snippet, pattern) in bronze_url_snippet_patterns:
if url_snippet in self.resolved_url.lower() and re.findall(pattern, page, re.IGNORECASE | re.DOTALL):
self.scraped_open_metadata_url = metadata_url
self.open_version_source_string = "open (via free article)"
bronze_publisher_patterns = [
("New England Journal of Medicine (NEJM/MMS)", u'<meta content="yes" name="evt-free"'),
("Massachusetts Medical Society", u'<meta content="yes" name="evt-free"'),
]
for (publisher, pattern) in bronze_publisher_patterns:
if self.is_same_publisher(publisher) and re.findall(pattern, page, re.IGNORECASE | re.DOTALL):
self.scraped_open_metadata_url = metadata_url
self.open_version_source_string = "open (via free article)"
bronze_citation_pdf_patterns = [
r'^https?://www\.sciencedirect\.com/science/article/pii/S[0-9X]+/pdf(?:ft)?\?md5=[0-9a-f]+.*[0-9x]+-main.pdf$'
]
citation_pdf_link = get_pdf_in_meta(page)
if citation_pdf_link and citation_pdf_link.href:
for pattern in bronze_citation_pdf_patterns:
if re.findall(pattern, citation_pdf_link.href, re.IGNORECASE | re.DOTALL):
logger.info(u'found bronzish citation_pdf_url {}'.format(citation_pdf_link.href))
self.scraped_open_metadata_url = metadata_url
self.open_version_source_string = "open (via free article)"
# Look for some license-like patterns that make this a hybrid location.
hybrid_url_snippet_patterns = [
('projecteuclid.org/', u'<strong>Full-text: Open access</strong>'),
('sciencedirect.com/', u'<div class="OpenAccessLabel">open access</div>'),
('journals.ametsoc.org/', ur'src="/templates/jsp/_style2/_ams/images/access_free\.gif"'),
('apsjournals.apsnet.org', ur'src="/products/aps/releasedAssets/images/open-access-icon\.png"'),
('psychiatriapolska.pl', u'is an Open Access journal:'),
('journals.lww.com', u'<span class="[^>]*ejp-indicator--free'),
('journals.lww.com', ur'<img[^>]*src="[^"]*/icon-access-open\.gif"[^>]*>'),
('iospress.com', ur'<img[^>]*src="[^"]*/img/openaccess_icon.png[^"]*"[^>]*>'),
('rti.org/', ur'</svg>[^<]*Open Access[^<]*</span>'),
]
for (url_snippet, pattern) in hybrid_url_snippet_patterns:
if url_snippet in self.resolved_url.lower() and re.findall(pattern, page, re.IGNORECASE | re.DOTALL):
self.scraped_open_metadata_url = metadata_url
self.open_version_source_string = "open (via page says Open Access)"
self.scraped_license = "implied-oa"
hybrid_publisher_patterns = [
("Informa UK Limited", u"/accessOA.png"),
("Oxford University Press (OUP)", u"<i class='icon-availability_open'"),
("Institute of Electrical and Electronics Engineers (IEEE)", ur'"isOpenAccess":true'),
("Institute of Electrical and Electronics Engineers (IEEE)", ur'"openAccessFlag":"yes"'),
("Informa UK Limited", u"/accessOA.png"),
("Royal Society of Chemistry (RSC)", u"/open_access_blue.png"),
("Cambridge University Press (CUP)", u'<span class="icon access open-access cursorDefault">'),
("Wiley", ur'<div[^>]*class="doi-access"[^>]*>Open Access</div>'),
]
for (publisher, pattern) in hybrid_publisher_patterns:
if self.is_same_publisher(publisher) and re.findall(pattern, page, re.IGNORECASE | re.DOTALL):
self.scraped_open_metadata_url = metadata_url
self.open_version_source_string = "open (via page says Open Access)"
self.scraped_license = "implied-oa"
# Look for more license-like patterns that make this a hybrid location.
# Extract the specific license if present.
license_patterns = [
ur"(creativecommons.org/licenses/[a-z\-]+)",
u"distributed under the terms (.*) which permits",
u"This is an open access article under the terms (.*) which permits",
u"This is an open-access article distributed under the terms (.*), where it is permissible",
u"This is an open access article published under (.*) which permits",
u'<div class="openAccess-articleHeaderContainer(.*?)</div>',
ur'this article is published under the creative commons (.*) licence',
]
if _trust_publisher_license(self.resolved_url):
for pattern in license_patterns:
matches = re.findall(pattern, page, re.IGNORECASE)
if matches:
self.scraped_open_metadata_url = metadata_url
normalized_license = find_normalized_license(matches[0])
self.scraped_license = normalized_license or 'implied-oa'
if normalized_license:
self.open_version_source_string = 'open (via page says license)'
else:
self.open_version_source_string = 'open (via page says Open Access)'
if self.is_open:
if DEBUG_SCRAPING:
logger.info(u"we've decided this is open! took {} seconds [{}]".format(
elapsed(start), landing_url))
return True
else:
if DEBUG_SCRAPING:
logger.info(u"we've decided this doesn't say open. took {} seconds [{}]".format(
elapsed(start), landing_url))
return False
except requests.exceptions.ConnectionError as e:
self.error += u"ERROR: connection error in scrape_for_fulltext_link on {}: {}".format(landing_url, unicode(e.message).encode("utf-8"))
logger.info(self.error)
return False
except requests.Timeout as e:
self.error += u"ERROR: timeout error in scrape_for_fulltext_link on {}: {}".format(landing_url, unicode(e.message).encode("utf-8"))
logger.info(self.error)
return False
except requests.exceptions.InvalidSchema as e:
self.error += u"ERROR: InvalidSchema error in scrape_for_fulltext_link on {}: {}".format(landing_url, unicode(e.message).encode("utf-8"))
logger.info(self.error)
return False
except requests.exceptions.RequestException as e:
self.error += u"ERROR: RequestException error in scrape_for_fulltext_link"
logger.info(self.error)
return False
except requests.exceptions.ChunkedEncodingError as e:
self.error += u"ERROR: ChunkedEncodingError error in scrape_for_fulltext_link on {}: {}".format(landing_url, unicode(e.message).encode("utf-8"))
logger.info(self.error)
return False
except NoDoiException as e:
self.error += u"ERROR: NoDoiException error in scrape_for_fulltext_link on {}: {}".format(landing_url, unicode(e.message).encode("utf-8"))
logger.info(self.error)
return False
except Exception as e:
self.error += u"ERROR: Exception error in scrape_for_fulltext_link"
logger.exception(self.error)
return False
def _trust_repo_license(resolved_url):
hostname = urlparse(resolved_url).hostname
if not hostname:
return False
trusted_hosts = ['babel.hathitrust.org']
for host in trusted_hosts:
if hostname.endswith(host):
return True
return False
def _try_pdf_link_as_doc(resolved_url):
hostname = urlparse(resolved_url).hostname
if not hostname:
return False
doc_hosts = ['paleorxiv.org']
for host in doc_hosts:
if hostname.endswith(host):
return True
return False
def _trust_publisher_license(resolved_url):
hostname = urlparse(resolved_url).hostname
if not hostname:
return True
untrusted_hosts = [
'indianjournalofmarketing.com',
'rupress.org',
'rnajournal.cshlp.org',
'press.umich.edu',
'genome.cshlp.org',
]
for host in untrusted_hosts:
if hostname.endswith(host):
logger.info(u'not trusting license from {}'.format(host))
return False
return True
# abstract. inherited by PmhRepoWebpage
class RepoWebpage(Webpage):
@property
def open_version_source_string(self):
return self.base_open_version_source_string
def scrape_for_fulltext_link(self, find_pdf_link=True):
url = self.url
dont_scrape_list = [
u"ncbi.nlm.nih.gov",
u"europepmc.org",
u"/europepmc/",
u"pubmed",
u"elar.rsvpu.ru", #these ones based on complaint in email
u"elib.uraic.ru",
u"elar.usfeu.ru",
u"elar.urfu.ru",
u"elar.uspu.ru"]
for url_fragment in dont_scrape_list:
if url_fragment in url:
logger.info(u"not scraping {} because is on our do not scrape list.".format(url))
return
try:
self.r = http_get(url, stream=True, publisher=self.publisher, session_id=self.session_id, ask_slowly=self.ask_slowly)
self.resolved_url = self.r.url
if self.r.status_code != 200:
if self.r.status_code in [401]:
# not authorized, so not open
pass
else:
self.error += u"ERROR: status_code={} on {} in scrape_for_fulltext_link".format(self.r.status_code, url)
return
# if our url redirects to a pdf, we're done.
# = open repo http://hdl.handle.net/2060/20140010374
if self.is_a_pdf_page():
if accept_direct_pdf_links(self.resolved_url):
if DEBUG_SCRAPING:
logger.info(u"this is a PDF. success! [{}]".format(self.resolved_url))
self.scraped_pdf_url = url
else:
if DEBUG_SCRAPING:
logger.info(u"ignoring direct pdf link".format(self.resolved_url))
return
else:
if DEBUG_SCRAPING:
logger.info(u"is not a PDF for {}. continuing more checks".format(url))
if is_a_word_doc(self.r):
if DEBUG_SCRAPING:
logger.info(u"this is a word doc. success! [{}]".format(url))
self.scraped_open_metadata_url = url
return
# now before reading the content, bail it too large
if is_response_too_large(self.r):
logger.info(u"landing page is too large, skipping")
return
# get the HTML tree
page = self.r.content_small()
# remove script tags
try:
soup = BeautifulSoup(page, 'html.parser')
[script.extract() for script in soup('script')]
page = str(soup)
except HTMLParseError as e:
logger.error(u'error parsing html, skipped script removal: {}'.format(e))
# set the license if we can find one
scraped_license = find_normalized_license(page)
if scraped_license:
self.scraped_license = scraped_license
pdf_download_link = None
# special exception for citeseer because we want the pdf link where
# the copy is on the third party repo, not the cached link, if we can get it
if url and u"citeseerx.ist.psu.edu/" in url:
matches = re.findall(u'<h3>Download Links</h3>.*?href="(.*?)"', page, re.DOTALL)
if matches:
pdf_download_link = DuckLink(unicode(matches[0], "utf-8"), "download")
# osf doesn't have their download link in their pages
# so look at the page contents to see if it is osf-hosted
# if so, compute the url. example: http://osf.io/tyhqm
elif page and u"osf-cookie" in unicode(page, "utf-8", errors='replace'):
pdf_download_link = DuckLink(u"{}/download".format(url), "download")
# otherwise look for it the normal way
else:
pdf_download_link = self.find_pdf_link(page)
if pdf_download_link is None:
if re.search(ur'https?://cdm21054\.contentdm\.oclc\.org/digital/collection/IR/id/(\d+)', self.resolved_url):
pdf_download_link = DuckLink(
'/digital/api/collection/IR/id/{}/download'.format(
re.search(
ur'https?://cdm21054\.contentdm\.oclc\.org/digital/collection/IR/id/(\d+)',
self.resolved_url
).group(1)
),
'download'
)
if pdf_download_link is not None:
if DEBUG_SCRAPING:
logger.info(u"found a PDF download link: {} {} [{}]".format(
pdf_download_link.href, pdf_download_link.anchor, url))
pdf_url = get_link_target(pdf_download_link.href, self.r.url)
# if they are linking to a PDF, we need to follow the link to make sure it's legit
if DEBUG_SCRAPING:
logger.info(u"checking to see the PDF link actually gets a PDF [{}]".format(url))
if (pdf_download_link.anchor == u'<meta citation_pdf_url>' and
re.match(r'https?://(www\.)?osti\.gov/servlets/purl/[0-9]+', pdf_url)):
# try the pdf URL with cookies
osti_pdf_response = http_get(
pdf_url, stream=True, publisher=self.publisher,
session_id=self.session_id, ask_slowly=self.ask_slowly, cookies=self.r.cookies
)
if is_a_pdf_page(osti_pdf_response, self.publisher):
self.scraped_open_metadata_url = url
direct_pdf_url = osti_pdf_response.url
# make sure the resolved PDF URL works without cookies before saving it
direct_pdf_response = http_get(
direct_pdf_url, stream=True, publisher=self.publisher,
session_id=self.session_id, ask_slowly=self.ask_slowly
)
if is_a_pdf_page(direct_pdf_response, self.publisher):
self.scraped_pdf_url = osti_pdf_response.url
self.r = direct_pdf_response
return
if self.gets_a_pdf(pdf_download_link, self.r.url):
self.scraped_open_metadata_url = url
if not _discard_pdf_url(pdf_url):
self.scraped_pdf_url = pdf_url
return
# try this later because would rather get a pdfs
# if they are linking to a .docx or similar, this is open.
doc_link = find_doc_download_link(page)
if doc_link is None and _try_pdf_link_as_doc(self.resolved_url):
doc_link = pdf_download_link
if doc_link is not None:
absolute_doc_url = get_link_target(doc_link.href, self.resolved_url)
if DEBUG_SCRAPING:
logger.info(u"found a possible .doc download link [{}]".format(absolute_doc_url))
if self.gets_a_word_doc(doc_link, self.r.url):
if DEBUG_SCRAPING:
logger.info(u"we've decided this is a word doc. [{}]".format(absolute_doc_url))
self.scraped_open_metadata_url = url
return
else:
if DEBUG_SCRAPING:
logger.info(u"we've decided this ain't a word doc. [{}]".format(absolute_doc_url))
bhl_link = find_bhl_view_link(self.resolved_url, page)
if bhl_link is not None:
logger.info('found a BHL document link: {}'.format(get_link_target(bhl_link.href, self.resolved_url)))
self.scraped_open_metadata_url = url
return
if _trust_repo_license(self.resolved_url) and self.scraped_license:
logger.info(u'trusting license {}'.format(self.scraped_license))
self.scraped_open_metadata_url = self.url
except requests.exceptions.ConnectionError as e:
self.error += u"ERROR: connection error on {} in scrape_for_fulltext_link: {}".format(url, unicode(e.message).encode("utf-8"))
logger.info(self.error)
return
except requests.Timeout as e:
self.error += u"ERROR: timeout error on {} in scrape_for_fulltext_link: {}".format(url, unicode(e.message).encode("utf-8"))
logger.info(self.error)
return
except requests.exceptions.InvalidSchema as e:
self.error += u"ERROR: InvalidSchema error on {} in scrape_for_fulltext_link: {}".format(url, unicode(e.message).encode("utf-8"))
logger.info(self.error)
return
except requests.exceptions.RequestException as e:
self.error += u"ERROR: RequestException in scrape_for_fulltext_link"
logger.info(self.error)
return
except requests.exceptions.ChunkedEncodingError as e:
self.error += u"ERROR: ChunkedEncodingError error on {} in scrape_for_fulltext_link: {}".format(url, unicode(e.message).encode("utf-8"))
logger.info(self.error)
return
except NoDoiException as e:
self.error += u"ERROR: NoDoiException error on {} in scrape_for_fulltext_link: {}".format(url, unicode(e.message).encode("utf-8"))
logger.info(self.error)
return
except Exception as e:
self.error += u"ERROR: Exception error on in scrape_for_fulltext_link"
logger.exception(self.error)
return
if DEBUG_SCRAPING:
logger.info(u"found no PDF download link. end of the line. [{}]".format(url))
return self
def accept_direct_pdf_links(url):
if re.match(ur'^https?://pure\.mpg\.de', url):
# direct pdf lnks to supplementary materials
return False
return True
class PmhRepoWebpage(RepoWebpage):
@property
def base_open_version_source_string(self):
if self.match_type:
return u"oa repository (via OAI-PMH {} match)".format(self.match_type)
return u"oa repository (via OAI-PMH)"
def find_doc_download_link(page):
for link in get_useful_links(page):
# there are some links that are FOR SURE not the download for this article
if has_bad_href_word(link.href):
continue
if has_bad_anchor_word(link.anchor):
continue
# = open repo https://lirias.kuleuven.be/handle/123456789/372010
if ".doc" in link.href or ".doc" in link.anchor:
if DEBUG_SCRAPING:
logger.info(u"link details: {} {}".format(link.href, link.anchor))
return link
return None
def find_bhl_view_link(url, page_content):
hostname = urlparse(url).hostname
if not (hostname and hostname.endswith(u'biodiversitylibrary.org')):
return None
view_links = [link for link in get_useful_links(page_content) if link.anchor == 'view article']
return view_links[0] if view_links else None
class DuckLink(object):
def __init__(self, href, anchor):
self.href = href
self.anchor = anchor
def get_useful_links(page):
links = []
tree = get_tree(page)
if tree is None:
return []
# remove related content sections
bad_section_finders = [
# references and related content sections
"//div[@class=\'relatedItem\']", #http://www.tandfonline.com/doi/abs/10.4161/auto.19496
"//div[@class=\'citedBySection\']", #10.3171/jns.1966.25.4.0458
"//div[@class=\'references\']", #https://www.emeraldinsight.com/doi/full/10.1108/IJCCSM-04-2017-0089
"//div[@class=\'moduletable\']", # http://vestnik.mrsu.ru/index.php/en/articles2-en/80-19-1/671-10-15507-0236-2910-029-201901-1
"//div[contains(@class, 'ref-list')]", #https://www.jpmph.org/journal/view.php?doi=10.3961/jpmph.16.069
"//div[@id=\'supplementary-material\']", #https://www.jpmph.org/journal/view.php?doi=10.3961/jpmph.16.069
"//div[@id=\'toc\']", # https://www.elgaronline.com/view/edcoll/9781781004326/9781781004326.xml
"//div[contains(@class, 'cta-guide-authors')]", # https://www.journals.elsevier.com/physics-of-the-dark-universe/
"//div[contains(@class, 'footer-publication')]", # https://www.journals.elsevier.com/physics-of-the-dark-universe/
"//d-appendix", # https://distill.pub/2017/aia/
"//dt-appendix", # https://distill.pub/2016/handwriting/
"//div[starts-with(@id, 'dt-cite')]", # https://distill.pub/2017/momentum/
"//ol[contains(@class, 'ref-item')]", # http://www.cjcrcn.org/article/html_9778.html
"//div[contains(@class, 'NLM_back')]", # https://pubs.acs.org/doi/10.1021/acs.est.7b05624
"//div[contains(@class, 'NLM_citation')]", # https://pubs.acs.org/doi/10.1021/acs.est.7b05624
"//div[@id=\'relatedcontent\']", # https://pubs.acs.org/doi/10.1021/acs.est.7b05624
"//div[@id=\'author-infos\']", # https://www.tandfonline.com/doi/full/10.1080/01639374.2019.1670767
"//ul[@id=\'book-metrics\']", # https://link.springer.com/book/10.1007%2F978-3-319-63811-9
"//section[@id=\'article_references\']", # https://www.nejm.org/doi/10.1056/NEJMms1702111
"//section[@id=\'SupplementaryMaterial\']", # https://link.springer.com/article/10.1057%2Fs41267-018-0191-3
"//div[@id=\'attach_additional_files\']", # https://digitalcommons.georgiasouthern.edu/ij-sotl/vol5/iss2/14/
"//span[contains(@class, 'fa-lock')]", # https://www.dora.lib4ri.ch/eawag/islandora/object/eawag%3A15303
"//ul[@id=\'reflist\']", # https://elibrary.steiner-verlag.de/article/10.25162/sprib-2019-0002
"//div[@class=\'listbibl\']", # http://sk.sagepub.com/reference/the-sage-handbook-of-television-studies
"//div[contains(@class, 'summation-section')]", # https://www.tandfonline.com/eprint/EHX2T4QAGTIYVPK7MJBF/full?target=10.1080/20507828.2019.1614768
"//ul[contains(@class, 'references')]", # https://www.tandfonline.com/eprint/EHX2T4QAGTIYVPK7MJBF/full?target=10.1080/20507828.2019.1614768
"//p[text()='References']/following-sibling::p", # http://researcherslinks.com/current-issues/Effect-of-Different-Temperatures-on-Colony/20/1/2208/html
"//span[contains(@class, 'ref-lnk')]", # https://www.tandfonline.com/doi/full/10.1080/19386389.2017.1285143
"//div[@id=\'referenceContainer\']", # https://www.jbe-platform.com/content/journals/10.1075/ld.00050.kra
"//div[contains(@class, 'table-of-content')]", # https://onlinelibrary.wiley.com/doi/book/10.1002/9781118897126
"//img[contains(@src, 'supplementary_material')]/following-sibling::p", # https://pure.mpg.de/pubman/faces/ViewItemOverviewPage.jsp?itemId=item_2171702
# can't tell what chapter/section goes with what doi
"//div[@id=\'booktoc\']", # https://link.springer.com/book/10.1007%2F978-3-319-63811-9
"//div[@id=\'tocWrapper\']", # https://www.elgaronline.com/view/edcoll/9781786431417/9781786431417.xml
]
for section_finder in bad_section_finders:
for bad_section in tree.xpath(section_finder):
bad_section.clear()
# now get the links
link_elements = tree.xpath("//a")
for link in link_elements:
link_text = link.text_content().strip().lower()
if link_text:
link.anchor = link_text
if "href" in link.attrib:
link.href = link.attrib["href"]
elif u'title' in link.attrib and u'download fulltext' in link.attrib[u'title'].lower():
link.anchor = u'title: {}'.format(link.attrib[u'title'])
if u'href' in link.attrib:
link.href = link.attrib[u'href']
else:
# also a useful link if it has a solo image in it, and that image includes "pdf" in its filename
link_content_elements = [l for l in link]
if len(link_content_elements)==1:
link_insides = link_content_elements[0]
if link_insides.tag=="img":
if "src" in link_insides.attrib and "pdf" in link_insides.attrib["src"]:
link.anchor = u"image: {}".format(link_insides.attrib["src"])
if "href" in link.attrib:
link.href = link.attrib["href"]
if hasattr(link, "anchor") and hasattr(link, "href"):
links.append(link)
return links
def is_purchase_link(link):
# = closed journal http://www.sciencedirect.com/science/article/pii/S0147651300920050
if "purchase" in link.anchor:
logger.info(u"found a purchase link! {} {}".format(link.anchor, link.href))
return True
return False
def has_bad_href_word(href):
href_blacklist = [
# = closed 10.1021/acs.jafc.6b02480
# editorial and advisory board
"/eab/",
# = closed 10.1021/acs.jafc.6b02480
"/suppl_file/",
# https://lirias.kuleuven.be/handle/123456789/372010
"supplementary+file",
# http://www.jstor.org/action/showSubscriptions
"showsubscriptions",
# 10.7763/ijiet.2014.v4.396
"/faq",
# 10.1515/fabl.1988.29.1.21
"{{",
# 10.2174/1389450116666150126111055
"cdt-flyer",
# 10.1111/fpa.12048
"figures",
# https://www.crossref.org/iPage?doi=10.3138%2Fecf.22.1.1
"price-lists",
# https://aaltodoc.aalto.fi/handle/123456789/30772
"aaltodoc_pdf_a.pdf",
# prescribing information, see http://www.nejm.org/doi/ref/10.1056/NEJMoa1509388#t=references
"janssenmd.com",
# prescribing information, see http://www.nejm.org/doi/ref/10.1056/NEJMoa1509388#t=references
"community-register",
# prescribing information, see http://www.nejm.org/doi/ref/10.1056/NEJMoa1509388#t=references
"quickreference",
# 10.4158/ep.14.4.458
"libraryrequestform",
# http://www.nature.com/nutd/journal/v6/n7/full/nutd201620a.html
"iporeport",
#https://ora.ox.ac.uk/objects/uuid:06829078-f55c-4b8e-8a34-f60489041e2a
"no_local_copy",
".zip",
# https://zenodo.org/record/1238858
".gz",
# https://zenodo.org/record/1238858
".tar.",
# http://www.bioone.org/doi/full/10.1642/AUK-18-8.1
"/doi/full/10.1642",
# dating site :( 10.1137/S0036142902418680 http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.144.7627
"hyke.org",
# is a citation http://orbit.dtu.dk/en/publications/autonomous-multisensor-microsystem-for-measurement-of-ocean-water-salinity(1dea807b-c309-40fd-a623-b6c28999f74f).html
"&rendering=",
".fmatter",
"/samples/",
# http://ira.lib.polyu.edu.hk/handle/10397/78907
"letter_to_publisher",
# https://www.sciencedirect.com/science/article/abs/pii/S1428226796700911?via%3Dihub
'first-page',
# https://www.mitpressjournals.org/doi/abs/10.1162/evco_a_00219
'lib_rec_form',
# http://www.eurekaselect.com/107875/chapter/climate-change-and-snow-cover-in-the-european-alp
'ebook-flyer',
# http://digital.csic.es/handle/10261/134122
'accesoRestringido',
# https://www.springer.com/statistics/journal/11222
'/productFlyer/',
# https://touroscholar.touro.edu/nymc_fac_pubs/622/
'/author_agreement',
# http://orca.cf.ac.uk/115888/
'supinfo.pdf',
# http://orca.cf.ac.uk/619/
'/Appendix',
# https://digitalcommons.fairfield.edu/business-facultypubs/31/
'content_policy.pdf',
# http://cds.cern.ch/record/1338672
'BookTOC.pdf',
'BookBackMatter.pdf',
# https://www.goodfellowpublishers.com/academic-publishing.php?content=doi&doi=10.23912/9781911396512-3599
'publishers-catalogue',
# https://orbi.uliege.be/handle/2268/212705
"_toc_",
# https://pubs.usgs.gov/of/2004/1004/
"adobe.com/products/acrobat",
# https://physics.aps.org/articles/v13/31
"featured-article-pdf",
# http://www.jstor.org.libezproxy.open.ac.uk/stable/1446650
"modern-slavery-act-statement.pdf",
# https://pearl.plymouth.ac.uk/handle/10026.1/15597
"Deposit_Agreement",
# https://www.e-elgar.com/shop/gbp/the-elgar-companion-to-social-economics-second-edition-9781783478538.html
'/product_flyer/',
# https://journals.lww.com/jbjsjournal/FullText/2020/05200/Better_Late_Than_Never,_but_Is_Early_Best__.15.aspx
'links.lww.com/JBJS/F791',
]
href_whitelist = [
# https://zenodo.org/record/3831263
'190317_MainText_Figures_JNNP.pdf',
]
for good_word in href_whitelist:
if good_word.lower() in href.lower():
return False
for bad_word in href_blacklist:
if bad_word.lower() in href.lower():
return True
return False
def has_bad_anchor_word(anchor_text):
anchor_blacklist = [
# = closed repo https://works.bepress.com/ethan_white/27/
"user",
"guide",
# = closed 10.1038/ncb3399
"checklist",
# wrong link
"abstracts",
# http://orbit.dtu.dk/en/publications/autonomous-multisensor-microsystem-for-measurement-of-ocean-water-salinity(1dea807b-c309-40fd-a623-b6c28999f74f).html
"downloaded publications",
# https://hal.archives-ouvertes.fr/hal-00085700
"metadata from the pdf file",
u"récupérer les métadonnées à partir d'un fichier pdf",
# = closed http://europepmc.org/abstract/med/18998885
"bulk downloads",
# http://www.utpjournals.press/doi/pdf/10.3138/utq.35.1.47
"license agreement",
# = closed 10.1021/acs.jafc.6b02480
"masthead",
# closed http://eprints.soton.ac.uk/342694/
"download statistics",
# no examples for these yet
"supplement",
"figure",
"faq",
# https://www.biodiversitylibrary.org/bibliography/829
"download MODS",
"BibTeX citations",
"RIS citations",
'ACS ActiveView PDF',
# https://doi.org/10.11607/jomi.4336
'Submission Form',
# https://doi.org/10.1117/3.651915
'Sample Pages',
# https://babel.hathitrust.org/cgi/pt?id=uc1.e0000431916&view=1up&seq=24
'Download this page',
'Download left page',
'Download right page',
# https://touroscholar.touro.edu/nymc_fac_pubs/622/
'author agreement',
# https://www.longwoods.com/content/25849
'map to our office',
# https://www.e-elgar.com/shop/the-art-of-mooting
'download flyer',
# https://www.nowpublishers.com/article/Details/ENT-062
'download extract',
# https://utpjournals.press/doi/full/10.3138/jsp.48.3.137
'Call for Papers',
# https://brill.com/view/title/14711
'View PDF Flyer',
]
for bad_word in anchor_blacklist:
if bad_word.lower() in anchor_text.lower():
return True
return False
def get_pdf_in_meta(page):
if "citation_pdf_url" in page:
if DEBUG_SCRAPING:
logger.info(u"citation_pdf_url in page")
tree = get_tree(page)
if tree is not None:
metas = tree.xpath("//meta")
for meta in metas:
if "name" in meta.attrib:
if meta.attrib["name"] == "citation_pdf_url":
if "content" in meta.attrib:
link = DuckLink(href=meta.attrib["content"], anchor="<meta citation_pdf_url>")
return _transform_meta_pdf(link, page)
else:
# backup if tree fails
regex = r'<meta name="citation_pdf_url" content="(.*?)">'
matches = re.findall(regex, page)
if matches:
link = DuckLink(href=matches[0], anchor="<meta citation_pdf_url>")
return _transform_meta_pdf(link, page)
return None
def _transform_meta_pdf(link, page):
if link and link.href:
link.href = re.sub('(https?://[\w\.]*onlinelibrary.wiley.com/doi/)pdf(/.+)', r'\1pdfdirect\2', link.href)
link.href = re.sub('(^https?://drops\.dagstuhl\.de/.*\.pdf)/$', r'\1', link.href)
# preview PDF
nature_pdf = re.match(ur'^https?://www\.nature\.com(/articles/[a-z0-9-]*.pdf)', link.href)
if nature_pdf:
reference_pdf = re.sub(ur'\.pdf$', '_reference.pdf', nature_pdf.group(1))
if reference_pdf in page:
link.href = reference_pdf
return link
def get_pdf_from_javascript(page):
matches = re.findall('"pdfUrl":"(.*?)"', page)
if matches:
link = DuckLink(href=matches[0], anchor="pdfUrl")
return link
return None
def _discard_pdf_url(url):
# count the landing page as an OA location but don't use the PDF URL
parsed_url = urlparse(url)
# PDF URLs work but aren't stable
if parsed_url.hostname and parsed_url.hostname.endswith('exlibrisgroup.com') \
and parsed_url.query and 'Expires=' in parsed_url.query:
return True
return False
|
Impactstory/oadoi
|
webpage.py
|
Python
|
mit
| 58,365
|
[
"ORCA"
] |
1acd6c7a4debd6af6ad1a91936ce46aabd33f1c37024a703617ceece25d3f2ad
|
#!/usr/bin/env python
import os
import sys
import json
import pychemia
filename = None
if len(sys.argv) == 2:
filename = sys.argv[1]
else:
print('Enter the JSON filename to store the data')
exit(1)
dirs = [x for x in os.listdir('.') if os.path.isdir(x)]
ret = []
for idir in dirs:
if os.path.isfile(idir + '/POSCAR'):
try:
st = pychemia.code.vasp.read_poscar(idir + '/POSCAR')
except ValueError:
print('Bad POSCAR\n\n' + open(idir + '/POSCAR').read())
continue
# shutil.copy2(idir+'/POSCAR',idir+'_POSCAR')
print(idir, st.natom)
else:
st = pychemia.Structure.load_json(idir + '/structure.json')
# shutil.copy2(idir+'/structure.json',idir+'_structure.json')
print('ERROR:', idir, st.natom)
continue
if os.path.isfile(idir + '/OUTCAR'):
try:
vo = pychemia.code.vasp.VaspOutput(idir + '/OUTCAR')
except ValueError:
print('Error reading Vasp Output @ ' + idir + '/OUTCAR')
continue
if not vo.has_forces_stress_energy():
print('Error extracting forces @ ' + idir + '/OUTCAR')
continue
else:
print('No OUTCAR found @ ' + idir)
continue
spacegroup = pychemia.crystal.CrystalSymmetry(st).number()
energy_pa = vo.final_data['energy']['free_energy'] / st.natom
data = {'id': idir, 'energy_pa': energy_pa, 'natom': st.natom, 'spacegroup': spacegroup,
'forces': vo.relaxation_info()['avg_force'],
'stress': max(vo.relaxation_info()['avg_stress_diag'], vo.relaxation_info()['avg_stress_non_diag'])}
ret.append(data)
wf = open(filename, 'w')
json.dump(ret, wf, sort_keys=True, indent=4, separators=(',', ': '))
wf.close()
|
MaterialsDiscovery/PyChemia
|
scripts/relax_to_json.py
|
Python
|
mit
| 1,785
|
[
"CRYSTAL",
"VASP"
] |
d3d6d4a6f625e2b5f063d724c2a3e25e47e8f83d1f1ebf2ba6cecc3d71673621
|
from simphony.api import CUBA
def get_box(particle_data_containers,
command_format=False,
change_existing=False):
""" Get simulation box commands
Using CUBA.VECTOR and CUBA.ORIGIN, return the
string used by LAMMPS to define the simulation box in
the LAMMPS data file or as a command.
Currently the box vectors (and origin) have to be
the same for each particle container.
Parameters:
-----------
particle_data_containers: collection of DataContainer
list of containers of data containers from particles
command_format: boolean
if command format is true, then box command suitable
for lammps-command is returned. Otherwise, the
string returned is suitable for LAMMPS data file.
change_existing: boolean
if true, the lammps-command suitable for changing the
simulation box is returned
"""
origin = None
vectors = None
for dc in particle_data_containers:
# find box vectors (and origin) and ensure
# that they are the same for each particle container
if CUBA.VECTOR in dc:
if (vectors and
vectors != dc[CUBA.VECTOR]):
raise RuntimeError(
"Box vectors of each Particles need to match")
vectors = dc[CUBA.VECTOR]
else:
raise RuntimeError("CUBA.VECTOR was not set")
if CUBA.ORIGIN in dc:
if origin and origin != dc[CUBA.ORIGIN]:
raise RuntimeError(
"Box origin of each Particles need to match")
origin = dc[CUBA.ORIGIN]
# origin is optional
if not origin:
origin = (0.0, 0.0, 0.0)
# Note: For LAMMPS we can define a orthogonal simulation
# or non-orthogonal simulation box. For the non-orthogonal
# simulation box, the lammps doc states the following:
# "a must lie on the positive x axis. b must lie in
# the xy plane, with strictly positive y component. c may
# have any orientation with strictly positive z component.
# The requirement that a, b, and c have strictly positive
# x, y, and z components, respectively, ensures that a, b,
# and c form a complete right-handed basis."
if not vectors:
raise RuntimeError("CUBA.VECTOR was not set")
else:
_check_vectors(vectors)
box_string = ""
if command_format:
if change_existing:
box_string = _get_change_region_box_string()
else:
box_string = _get_command_region_box_string()
else:
if change_existing:
RuntimeError("change existing is not supported for data file")
box_string = _get_data_file_box_string()
return box_string.format(origin[0], vectors[0][0]+origin[0],
origin[1], vectors[1][1]+origin[1],
origin[2], vectors[2][2]+origin[2])
def _check_vectors(vectors):
# TODO: currently only handling orthogonal simulation box
# (where a must lie on positive x axis..) so only something
# like the following is allowed: (x, 0, 0), (0, y, 0)
# and (0, 0, z).
for i, v in enumerate(vectors):
for j, x in enumerate(v):
if i != j and float(x) != 0.0:
msg = ("Box vectors must have the form "
"(x, 0, 0), (0, y, 0) and (0, 0, z)")
raise RuntimeError(msg)
def _get_data_file_box_string():
box = "{:.16e} {:.16e} xlo xhi\n"
box += "{:.16e} {:.16e} ylo yhi\n"
box += "{:.16e} {:.16e} zlo zhi\n"
return box
def _get_command_region_box_string():
box = "region box block {:.16e} {:.16e} "
box += "{:.16e} {:.16e} "
box += "{:.16e} {:.16e}\n"
return box
def _get_change_region_box_string():
box = "change_box all x final {:.16e} {:.16e} "
box += "y final {:.16e} {:.16e} "
box += "z final {:.16e} {:.16e}\n"
return box
|
simphony/simphony-lammps-md
|
simlammps/config/domain.py
|
Python
|
bsd-2-clause
| 3,957
|
[
"LAMMPS"
] |
59e27bbf79098b82cfb2a8ff6d893a60ed6532a19f9a13aacfc955a41268346d
|
# Copyright (C) 2013-2021 The Debsources developers
# <[email protected]>.
# See the AUTHORS file at the top-level directory of this distribution and at
# https://salsa.debian.org/qa/debsources/blob/master/AUTHORS
#
# This file is part of Debsources. Debsources is free software: you can
# redistribute it and/or modify it under the terms of the GNU Affero General
# Public License as published by the Free Software Foundation, either version 3
# of the License, or (at your option) any later version. For more information
# see the COPYING file at the top-level directory of this distribution and at
# https://salsa.debian.org/qa/debsources/blob/master/COPYING
import datetime
import json
import os
import unittest
from pathlib import Path
from nose.plugins.attrib import attr
from debsources.app.app_factory import AppWrapper
from debsources.tests.db_testing import DbTestFixture
from debsources.tests.testdata import TEST_DB_NAME
@attr("basewebapp")
class DebsourcesBaseWebTests(DbTestFixture):
@classmethod
def setUpClass(cls):
# We use the class method here. setUpClass is called at the class
# creation, and tearDownClass at the class destruction (instead of
# setUp and tearDown before and after each test). This is doable here
# because the app never modifies the db (so it's useless to
# create/destroy it many times), and this a big gain of time.
cls.db_setup_cls()
# creates an app object, which is used to run queries
from debsources.app import app_wrapper
# erases a few configuration parameters needed for testing:
uri = "postgresql:///" + TEST_DB_NAME
app_wrapper.app.config["DB_URI"] = uri
app_wrapper.app.config["LIST_OFFSET"] = 5
app_wrapper.app.testing = True
app_wrapper.go()
cls.app = app_wrapper.app.test_client()
cls.app_wrapper = app_wrapper
@classmethod
def tearDownClass(cls):
cls.app_wrapper.engine.dispose()
cls.db_teardown_cls()
@attr("webapp")
class DebsourcesTestCase(DebsourcesBaseWebTests, unittest.TestCase):
def test_app_config(self):
# use existing config to initialize app wrapper
config = dict(domain="test.debian.test")
app_wrapper = AppWrapper(config=config)
self.assertEqual(app_wrapper.app.config["domain"], "test.debian.test")
def test_invalid_loglevel(self):
"""test with wrong supplied logging level"""
import logging
config = dict(LOG_LEVEL="invalid-test")
app_wrapper = AppWrapper(config=config)
app_wrapper.setup_logging()
logger = app_wrapper.app.logger
# no name, just know the index
# the second handler is our streamhandler.
self.assertEqual(logger.handlers[1].level, logging.INFO)
def test_api_ping(self):
rv = json.loads(self.app.get("/api/ping/").data)
self.assertEqual(rv["status"], "ok")
self.assertEqual(rv["http_status_code"], 200)
def test_api_package_search(self):
# test exact search result
rv = json.loads(self.app.get("/api/search/gnubg/").data)
self.assertEqual(rv["query"], "gnubg")
self.assertEqual(rv["results"]["other"], [])
self.assertEqual(rv["results"]["exact"], {"name": "gnubg"})
# with suite specified
rv = json.loads(self.app.get("/api/search/gnubg/?suite=squeeze").data)
self.assertEqual(rv["query"], "gnubg")
self.assertEqual(rv["results"]["other"], [])
self.assertEqual(rv["results"]["exact"], {"name": "gnubg"})
# with a non-existing suite name specified
rv = json.loads(self.app.get("/api/search/gnubg/?suite=nonexisting").data)
self.assertEqual(rv["query"], "gnubg")
self.assertEqual(rv["results"]["other"], [])
self.assertIsNone(rv["results"]["exact"])
# other results
rv = json.loads(self.app.get("/api/search/gnu/").data)
self.assertEqual(rv["query"], "gnu")
self.assertEqual(rv["results"]["other"], [{"name": "gnubg"}])
self.assertIsNone(rv["results"]["exact"])
def test_static_files(self):
static_files = [
"/static/css/base.css",
"/static/css/debian.css",
"/static/favicon.ico",
"/static/bootstrap/bootstrap.min.css",
"/static/bootstrap/bootstrap.min.js",
]
for path in static_files:
status = self.app.get(path).status_code
self.assertEqual(status, 200)
def test_package_search(self):
# test exact search result
rv = self.app.get("/search/gnubg/")
self.assertIn(b"/src/gnubg/", rv.data)
# with suite specified
rv = self.app.get("/search/gnubg/?suite=squeeze")
self.assertIn(b"/src/gnubg/", rv.data)
# with a non-existing suite name specified
rv = self.app.get("/search/gnubg/?suite=nonexisting")
self.assertNotIn(b"/src/gnubg/", rv.data)
# other results
rv = self.app.get("/search/gnu/")
self.assertIn(b"/src/gnubg/", rv.data)
def test_api_case_insensitive_package_search(self):
# exact search (lower case)
rv = json.loads(self.app.get("/api/search/gnubg/").data)
self.assertEqual(rv["query"], "gnubg")
self.assertEqual(rv["results"]["exact"], {"name": "gnubg"})
# other results (mixed case)
rv = json.loads(self.app.get("/api/search/GnUbG/").data)
self.assertEqual(rv["query"], "GnUbG")
self.assertEqual(rv["results"]["other"], [{"name": "gnubg"}])
# suite specified (mixed case)
rv = json.loads(self.app.get("/api/search/gnubg/?suite=SQueeZe").data)
self.assertEqual(rv["query"], "gnubg")
self.assertEqual(rv["results"]["exact"], {"name": "gnubg"})
def test_case_insensitive_package_search(self):
# exact search (mixed case)
rv = self.app.get("/search/gNuBg/")
self.assertIn(b"/src/gnubg/", rv.data)
# with suite specified (mixed case)
rv = self.app.get("/search/gnubg/?suite=sQuEeZe")
self.assertIn(b"/src/gnubg/", rv.data)
# other results (mixed case)
rv = self.app.get("/search/gNu/")
self.assertIn(b"/src/gnubg/", rv.data)
def test_static_pages(self):
rv = self.app.get("/")
self.assertIn(b"Debsources", rv.data)
rv = self.app.get("/advancedsearch/")
self.assertIn(b"Package search", rv.data)
self.assertIn(b"File search", rv.data)
self.assertIn(b"Code search", rv.data)
rv = self.app.get("/doc/overview/")
self.assertIn(b"Debsources provides Web access", rv.data)
rv = self.app.get("/doc/api/")
self.assertIn(b"API documentation", rv.data)
rv = self.app.get("/doc/url/")
self.assertIn(b"URL scheme", rv.data)
rv = self.app.get("/doc/about/")
self.assertIn(b"source code", rv.data)
self.assertIn(b"is available", rv.data)
def test_api_packages_list(self):
rv = json.loads(self.app.get("/api/list/").data)
self.assertIn({"name": "libcaca"}, rv["packages"])
self.assertEqual(len(rv["packages"]), 19)
def test_api_by_prefix(self):
rv = json.loads(self.app.get("/api/prefix/libc/").data)
self.assertIn({"name": "libcaca"}, rv["packages"])
# suite specified
rv = json.loads(self.app.get("/api/prefix/libc/?suite=squeeze").data)
self.assertIn({"name": "libcaca"}, rv["packages"])
# a non-existing suite specified
rv = json.loads(self.app.get("/api/prefix/libc/?suite=non-existing").data)
self.assertEqual([], rv["packages"])
# special suite name "all" is specified
rv = json.loads(self.app.get("/api/prefix/libc/?suite=all").data)
self.assertIn({"name": "libcaca"}, rv["packages"])
# lib* must not be in 'l'
rv = json.loads(self.app.get("/api/prefix/l/").data)
self.assertNotIn({"name": "libcaca"}, rv["packages"])
def test_by_prefix(self):
rv = self.app.get("/prefix/libc/")
self.assertIn(b"/src/libcaca", rv.data)
# suite specified
rv = self.app.get("/prefix/libc/?suite=squeeze")
self.assertIn(b"/src/libcaca", rv.data)
# a non-existing suite specified
rv = self.app.get("/prefix/libc/?suite=non-existing")
self.assertNotIn(b"/src/libcaca", rv.data)
# special suite name "all" is specified
rv = self.app.get("/prefix/libc/?suite=all")
self.assertIn(b"/src/libcaca", rv.data)
def test_api_case_insensitive_prefix(self):
rv_lower_case = json.loads(self.app.get("/api/prefix/g/").data)
rv_upper_case = json.loads(self.app.get("/api/prefix/G/").data)
self.assertEqual(rv_lower_case["packages"], rv_upper_case["packages"])
# suite specified
rv_lower_case = json.loads(self.app.get("/api/prefix/g/?suite=squeeze").data)
rv_upper_case = json.loads(self.app.get("/api/prefix/G/?suite=SQUEEZE").data)
self.assertEqual(rv_lower_case["packages"], rv_upper_case["packages"])
def test_case_insensitive_prefix(self):
rv_lower_case = self.app.get("/api/prefix/g/").data
rv_upper_case = self.app.get("/api/prefix/G/").data
self.assertEqual(rv_lower_case, rv_upper_case)
# suite specified
rv_lower_case = self.app.get("/api/prefix/g/?suite=squeeze").data
rv_upper_case = self.app.get("/api/prefix/G/?suite=SQUEEZE").data
self.assertEqual(rv_lower_case, rv_upper_case)
def test_api_package(self):
rv = json.loads(self.app.get("/api/src/ledit/").data)
self.assertEqual(rv["path"], "ledit")
self.assertEqual(len(rv["versions"]), 3)
self.assertEqual(rv["type"], "package")
# list index/order may be changed
_v = [v for v in rv["versions"] if v["version"] == "2.01-6"][0]
self.assertIn("squeeze", _v["suites"])
# with suite specified
rv = json.loads(self.app.get("/api/src/ledit/?suite=squeeze").data)
self.assertEqual(rv["path"], "ledit")
self.assertEqual(len(rv["versions"]), 1)
self.assertEqual(rv["type"], "package")
_v = [v for v in rv["versions"] if v["version"] == "2.01-6"][0]
self.assertIn("squeeze", _v["suites"])
# with a non-existing suite
rv = json.loads(self.app.get("/api/src/ledit/?suite=non-existing").data)
self.assertEqual(rv["path"], "ledit")
self.assertEqual([], rv["versions"])
self.assertEqual(rv["type"], "package")
def test_package(self):
rv = self.app.get("/src/ledit/")
self.assertIn(b"/src/ledit/2.01-6/", rv.data)
self.assertIn(b"/src/ledit/2.03-1/", rv.data)
self.assertIn(b"/src/ledit/2.03-2/", rv.data)
self.assertIn(b"[jessie, sid]", rv.data)
# with suite specified
rv = self.app.get("/src/ledit/?suite=squeeze")
self.assertIn(b"/src/ledit/2.01-6/", rv.data)
self.assertNotIn(b"/src/ledit/2.03-1/", rv.data)
self.assertNotIn(b"/src/ledit/2.03-2/", rv.data)
self.assertNotIn(b"[jessie, sid]", rv.data)
# with a non-existing suite
rv = self.app.get("/src/ledit/?suite=non-existing")
self.assertNotIn(b"/src/ledit/2.01-6/", rv.data)
self.assertNotIn(b"/src/ledit/2.03-1/", rv.data)
self.assertNotIn(b"/src/ledit/2.03-2/", rv.data)
self.assertNotIn(b"[jessie, sid]", rv.data)
def test_api_folder(self):
rv = json.loads(self.app.get("/api/src/ledit/2.01-6/").data)
self.assertEqual(rv["type"], "directory")
self.assertEqual(rv["path"], "ledit/2.01-6")
self.assertEqual(rv["package"], "ledit")
self.assertEqual(rv["directory"], "2.01-6")
self.assertIn(
{
"type": "file",
"name": "ledit.ml",
"percent_encoded_name": "ledit.ml",
"hidden": False,
"stat": {
"perms": "rw-r--r--",
"size": 45858,
"type": "-",
"symlink_dest": None,
},
},
rv["content"],
)
def test_api_hidden_files_folder(self):
rv = json.loads(self.app.get("/api/src/nvidia-xconfig/319.72-1/").data)
hidden_element = {}
shown_element = {}
for el in rv["content"]:
if el["name"] == ".pc":
hidden_element = el
elif el["name"] == "lscf.c":
shown_element = el
self.assertTrue(hidden_element["hidden"])
self.assertFalse(shown_element["hidden"])
def test_api_symlink_dest(self):
rv = json.loads(self.app.get("/api/src/beignet/1.0.0-1/").data)
self.assertIn(
{
"type": "file",
"name": "README.md",
"percent_encoded_name": "README.md",
"hidden": False,
"stat": {
"perms": "rwxrwxrwx",
"size": 17,
"type": "l",
"symlink_dest": "docs/Beignet.mdwn",
},
},
rv["content"],
)
def test_symlink(self):
rv = self.app.get("/src/beignet/1.0.0-1/README.md/")
# safe symlink
self.assertIn(b"/src/beignet/1.0.0-1/docs/Beignet.mdwn/", rv.data)
# unsafe symlinks (relatives and absolutes)
sources_dir = self.app_wrapper.app.config["SOURCES_DIR"]
s_relative = (
sources_dir / "main" / "b" / "beignet" / "1.0.0-1" / "relative-link"
)
s_absolute = (
sources_dir / "main" / "b" / "beignet" / "1.0.0-1" / "absolute-link"
)
try:
# create symlinks
if not os.path.lexists(s_relative):
os.symlink(
"../../../../non-free/b/bsdgames-nonfree/"
+ "2.17-3/debian/control",
s_relative,
)
if not os.path.lexists(s_absolute):
os.symlink("/etc/passwd", s_absolute)
# try to access them via Debsources
rv = self.app.get("/src/beignet/1.0.0-1/relative-link/")
self.assertEqual(403, rv.status_code)
rv = self.app.get("/src/beignet/1.0.0-1/absolute-link/")
self.assertEqual(403, rv.status_code)
finally: # clean up
if os.path.lexists(s_relative):
os.remove(s_relative)
if os.path.lexists(s_absolute):
os.remove(s_absolute)
def test_source_file(self):
rv = self.app.get("/src/ledit/2.01-6/ledit.ml/")
# source code detection
self.assertIn(b'<code id="sourcecode" class="ocaml">', rv.data)
# highlight.js present?
self.assertIn(b"hljs.highlightBlock", rv.data)
self.assertIn(
b'<script src="/static/javascript-lib/highlight/highlight.min.js">'
b"</script>",
rv.data,
)
# content of the file
self.assertIn(b"Institut National de Recherche en Informatique", rv.data)
# correct number of lines
self.assertIn(b"1506 lines", rv.data)
# stat of the file
self.assertIn(b"stat: -rw-r--r-- 45,858 bytes", rv.data)
# raw file link
self.assertIn(
b'<a id="link_download"'
+ b' href="/data/main/l/ledit/2.01-6/ledit.ml">'
+ b"download</a>",
rv.data,
)
# parent folder link
self.assertIn(
b'<a id="link_parent_folder" href="/src/ledit/2.01-6/">'
+ b"parent folder</a>",
rv.data,
)
def test_source_file_text(self):
rv = self.app.get("/src/ledit/2.01-6/README/")
self.assertIn(b'<code id="sourcecode" class="no-highlight">', rv.data)
def test_popup(self):
# One popup
rv = self.app.get(
"src/ledit/2.01-6/go.ml/?msg=22:Cowsay:See? \
%20Cowsay%20variables%20are%20declared%20here."
)
self.assertIn(b'<pre class="messages" data-position="22">', rv.data)
# two popups
rv = self.app.get(
"src/ledit/2.01-6/go.ml/?msg=22:Cowsay:See? \
%20Cowsay%20variables%20are%20declared%20here. \
&msg=10:Cowsay:See? \
%20Cowsay%20variables%20are%20declared%20here"
)
self.assertIn(b'<pre class="messages" data-position="22">', rv.data)
self.assertIn(b'<pre class="messages" data-position="10">', rv.data)
def test_source_file_embedded(self):
rv = self.app.get("/embed/file/ledit/2.01-6/ledit.ml/")
self.assertIn(b'<code id="sourcecode" class="ocaml">', rv.data)
self.assertIn(b"Institut National de Recherche en Informatique", rv.data)
self.assertNotIn(b'<div id="logo">', rv.data)
def test_source_file_lang(self):
# note we must have a trailing slash here.
rv = self.app.get("/src/make-doc-non-dfsg/4.0-2/doc/make.info-1/")
# redirection to the raw file.
self.assertEqual(302, rv.status_code)
# no redirection. no highlight
rv = self.app.get("/src/make-doc-non-dfsg/4.0-2/doc/" "make.info-1/?lang=none")
self.assertIn(b'<code id="sourcecode" class="no-highlight">', rv.data)
# no redirection. highlight with cpp syntax
rv = self.app.get("/src/make-doc-non-dfsg/4.0-2/doc/" "make.info-1/?lang=cpp")
self.assertIn(b'<code id="sourcecode" class="cpp">', rv.data)
def test_api_errors(self):
rv = json.loads(self.app.get("/api/src/blablabla/").data)
self.assertEqual(rv["error"], 404)
def test_api_latest(self):
rv = json.loads(
self.app.get("/api/src/ledit/latest/", follow_redirects=True).data
)
self.assertIn("2.03-2", rv["path"])
def test_suite_folder(self):
rv = json.loads(self.app.get("/api/src/ledit/sid/", follow_redirects=True).data)
self.assertIn("2.03-2", rv["path"])
def test_source_file_text_suite(self):
rv = self.app.get("/src/ledit/unstable/README", follow_redirects=True)
self.assertIn(b'<code id="sourcecode" class="no-highlight">', rv.data)
rv = json.loads(
self.app.get("/api/src/ledit/unstable/README/", follow_redirects=True).data
)
self.assertIn("2.03-2", rv["path"])
def test_suite_folder_alias(self):
rv = json.loads(
self.app.get("/api/src/ledit/unstable/", follow_redirects=True).data
)
self.assertIn("2.03-2", rv["path"])
def test_source_file_text_suite_alias(self):
rv = self.app.get("/src/ledit/sid/README", follow_redirects=True)
self.assertIn(b'<code id="sourcecode" class="no-highlight">', rv.data)
rv = json.loads(
self.app.get("/api/src/ledit/sid/README/", follow_redirects=True).data
)
self.assertIn("2.03-2", rv["path"])
def test_multiple_versions_in_suite(self):
rv = json.loads(self.app.get("/api/src/patch/sid/", follow_redirects=True).data)
self.assertIn("2.7.5-1", rv["path"])
def test_multiple_versions_in_suite_alias(self):
rv = json.loads(
self.app.get("/api/src/patch/unstable/", follow_redirects=True).data
)
self.assertIn("2.7.5-1", rv["path"])
def test_codesearch_box(self):
rv = self.app.get("/src/ledit/2.03-2/ledit.ml/")
self.assertIn(b'value="package:ledit "', rv.data)
def test_pagination(self):
rv = self.app.get("/list/2/")
self.assertIn(b'<a href="/list/1/">« Previous</a>', rv.data)
self.assertIn(b'<a href="/list/3/">Next »</a>', rv.data)
self.assertIn(b"<strong>2</strong>", rv.data)
def test_api_file_duplicates(self):
rv = json.loads(
self.app.get("/api/src/bsdgames-nonfree/" "2.17-3/COPYING/").data
)
self.assertEqual(rv["number_of_duplicates"], 3)
self.assertEqual(
rv["checksum"],
("be43f81c20961702327c10e9bd5f5a9a2b1cc" "eea850402ea562a9a76abcfa4bf"),
)
def test_checksum_search(self):
rv = self.app.get(
"/sha256/?checksum="
"be43f81c20961702327c10e9bd5f5a9a2b1cceea850402ea562a9a76abcfa4bf"
"&page=1"
)
self.assertIn(b"3 results", rv.data)
self.assertIn(
b"Checksum: "
b"be43f81c20961702327c10e9bd5f5a9a2b1cceea850402ea562a9a76abcfa4bf",
rv.data,
)
def test_api_checksum_search(self):
rv = json.loads(
self.app.get(
"/api/sha256/?checksum=be43f81c20961702327"
"c10e9bd5f5a9a2b1cceea850402ea562a9a76abcf"
"a4bf&page=1"
).data
)
self.assertEqual(rv["count"], 3)
self.assertEqual(len(rv["results"]), 3)
def test_checksum_search_within_package(self):
rv = self.app.get(
"/sha256/?checksum="
"4f721b8e5b0add185d6af7a93e577638d25eaa5c341297d95b4a27b7635b4d3f"
"&package=susv2"
)
self.assertIn(b"1 result", rv.data)
self.assertIn(
b"Checksum: "
b"4f721b8e5b0add185d6af7a93e577638d25eaa5c341297d95b4a27b7635b4d3f",
rv.data,
)
def test_api_checksum_search_within_package(self):
rv = json.loads(
self.app.get(
"/api/sha256/?checksum=4f721b8e5b0add185d6"
"af7a93e577638d25eaa5c341297d95b4a27b7635b"
"4d3f&package=susv2"
).data
)
self.assertEqual(rv["count"], 1)
def test_api_search_ctag(self):
rv = json.loads(self.app.get("/api/ctag/?ctag=name").data)
self.assertEqual(rv["count"], 195)
self.assertEqual(len(rv["results"]), 195)
def test_api_search_ctag_within_package(self):
rv = json.loads(self.app.get("/api/ctag/?ctag=name&package=ledger").data)
self.assertEqual(rv["count"], 14)
self.assertEqual(len(rv["results"]), 14)
def test_api_pkg_infobox(self):
rv = json.loads(self.app.get("/api/src/libcaca/0.99.beta17-1/").data)
self.assertEqual(rv["pkg_infos"]["suites"], ["squeeze"])
self.assertEqual(rv["pkg_infos"]["area"], "main")
self.assertEqual(rv["pkg_infos"]["sloc"][0], ["ansic", 22607])
self.assertEqual(rv["pkg_infos"]["metric"]["size"], 6584)
p = "http://svn.debian.org/wsvn/sam-hocevar/pkg-misc/unstable/libcaca/"
self.assertEqual(rv["pkg_infos"]["vcs_browser"], p)
self.assertEqual(rv["pkg_infos"]["vcs_type"], "svn")
self.assertEqual(
rv["pkg_infos"]["pts_link"], "https://tracker.debian.org/pkg/libcaca"
)
self.assertEqual(rv["pkg_infos"]["ctags_count"], 3145)
self.assertEqual(
rv["pkg_infos"]["license"], "/copyright/license/libcaca/0.99.beta17-1/"
)
def test_pkg_infobox_embed(self):
rv = self.app.get("/embed/pkginfo/libcaca/0.99.beta17-1/")
self.assertIn(b'<div id="pkginfobox" class="pkginfobox_large">', rv.data)
self.assertNotIn(b"<footer", rv.data) # it's an infobox-only page
def test_info_version(self):
rv = self.app.get("/info/package/libcaca/0.99.beta17-1/")
self.assertIn(b'<div id="pkginfobox" class="pkginfobox_large">', rv.data)
def test_api_stats_suite(self):
rv = json.loads(self.app.get("/api/stats/jessie/").data)
self.assertEqual(rv["suite"], "jessie")
self.assertEqual(rv["results"]["debian_jessie.ctags"], 23816)
self.assertEqual(rv["results"]["debian_jessie.disk_usage"], 51428)
self.assertEqual(rv["results"]["debian_jessie.source_files"], 2059)
self.assertEqual(rv["results"]["debian_jessie.sloccount.python"], 2916)
def test_api_released_suite(self):
rv = json.loads(self.app.get("/api/stats/wheezy/").data)
self.assertEqual(rv["suite"], "wheezy")
self.assertEqual(rv["results"]["debian_wheezy.sloccount.cpp"], 37375)
self.assertEqual(rv["results"]["debian_wheezy.source_packages"], 12)
wheezy_rel = datetime.datetime.strptime("04052013", "%d%m%Y").date()
self.assertEqual(rv["rel_date"], str(wheezy_rel))
self.assertEqual(rv["rel_version"], "7")
def test_api_stats_all(self):
rv = json.loads(self.app.get("/api/stats/").data)
self.assertEqual(
sorted(rv["all_suites"]),
[
"debian_etch",
"debian_experimental",
"debian_jessie",
"debian_sid",
"debian_squeeze",
"debian_wheezy",
],
)
self.assertIn("ansic", rv["languages"])
self.assertEqual(rv["results"]["debian_sid.sloccount.ansic"], 208800)
def test_suggestions_when_404(self):
rv = self.app.get("/src/libcaca/0.NOPE.beta17-1/src/cacaview.c/")
self.assertIn(b"other versions of this package are available", rv.data)
link2 = b'<a href="/src/libcaca/0.99.beta17-1/src/cacaview.c/'
self.assertIn(link2, rv.data)
def test_bp_copyright_setup(self):
if self.app_wrapper.app.config.get("BLUEPRINT_COPYRIGHT"):
rv = self.app.get("/copyright/")
self.assertEqual(200, rv.status_code)
def test_news(self):
# news_routes = { news_filename: associated_route }
news_routes = {
"sources_news.html": "/",
"copyright_news.html": "/copyright/",
"patches_news.html": "/patches/",
}
local_dir = Path(self.app_wrapper.app.config["LOCAL_DIR"])
if not local_dir.is_dir():
if local_dir.exists():
# for some reason local_dir is a file, raise an IOError
raise IOError(f"{local_dir} should be a directory.")
else:
local_dir.mkdir()
# Go through each news_route and ensure it contains the data we expect
# which is the data in local/news.html file.
# If data doesn't exist, create dummy data to test.
for news_file in news_routes.keys():
fullpath = local_dir / news_file
news_string = ""
if fullpath.is_file():
with open(fullpath, "r") as f:
news_string = f.read()
else:
news_string = (
"<ul><li>This item was created in a test for "
+ news_file
+ "</li></ul>"
)
with open(fullpath, "w") as f:
f.write(news_string)
rv = self.app.get(news_routes[news_file])
self.assertIn(news_string, rv.data.decode())
def test_non_utf8_filename(self):
# List folder containing a non-utf8 filename.
rv = self.app.get("/src/aspell-is/0.51-0-4/")
self.assertEqual(200, rv.status_code)
self.assertIn(
(
b'<a href="/src/aspell-is/0.51-0-4/%25EDslenska.alias/">'
b"%EDslenska.alias</a>"
),
rv.data,
)
# Visit that file.
rv = self.app.get("/src/aspell-is/0.51-0-4/%25EDslenska.alias/")
self.assertEqual(200, rv.status_code)
self.assertIn(b"<h2>File: %EDslenska.alias</h2>", rv.data)
if __name__ == "__main__":
unittest.main(exit=False)
|
Debian/debsources
|
lib/debsources/tests/test_webapp.py
|
Python
|
agpl-3.0
| 27,718
|
[
"VisIt"
] |
5d0491ed988e05c75718b04c2106e62ca5e020313e25bccb7f8f8b10ed5f06e0
|
# Copyright (c) 2001 Autonomous Zone Industries
# This file is licensed under the
# GNU Lesser General Public License v2.1.
# See the file COPYING or visit http://www.gnu.org/ for details.
"""
Sends and receives buffers on TCP connections.
"""
__revision__ = "$Id: TCPConnection.py,v 1.12 2003/03/09 18:54:57 zooko Exp $"
# Python Standard Library modules
import asyncore, socket, struct, sys, threading, time, traceback, types
# pyutil modules
from pyutil import Asyncore, DoQ
from pyutil.config import DEBUG_MODE
from pyutil.debugprint import debugprint, debugstream
from pyutil.humanreadable import hr
# EGTP modules
from egtp import CommsError, idlib
True = 1 == 1
False = 0 == 1
# This is the maximum lowlevel EGTP message size; attempting to
# receive a message longer than this will cause the EGTP connection
# to be aborted. Attempting to send a message longer than this
# will cause a fast fail.
MAXIMUM_MSG_SIZE = 4*(2**20) # 4 megabytes
class TCPConnection(asyncore.dispatcher):
"""
Sends and receives buffers on TCP connections. Prepends lengths for each message.
"""
def __init__(self, inmsg_handler_func, key, close_handler_func=None, host=None, port=None, sock=None, commstratobj=None, throttlerin=None, throttlerout=None, cid_for_debugging=None, timeout=600):
"""
@param key: a key for identifying this connection; (Hint: if you know the counterparty id use that, else use `idlib.make_new_random_id(thingtype='TCPConnection')')
@param close_handler_func: a function that gets called when the TCPConnection closes
@param timeout: inactivity timeout for our TCP connections in
seconds; We never time-out a connection if we want it because we
are either expecting a reply, or maintaining a connection with a
frequently-used counterparty.
@precondition: `key' must be a binary id.: idlib.is_binary_id(key): "key: %s :: %s" % tuple(map(hr, (key, type(key),)))
"""
assert idlib.is_binary_id(key), "precondition: `key' must be a binary id." + " -- " + "key: %s :: %s" % tuple(map(hr, (key, type(key),)))
# `_cid_for_debugging' is for debugging.
self._cid_for_debugging = cid_for_debugging
self._timeout = timeout
# `_key' is what we use to index this object in the TCPConnCache. If we know the
# counterparty id, then `_key' gets set to the counterparty id by the higher-level code
# (by direct access to the `_key' member), else `_key' is set to a random unique id by
# the higher-level code (in the constructor).
self._key = key
# XXX Hey -- could this be causing unexpected behaviour at some point if there were two different comm strats using the same key because they were with the same counterparty? It shouldn't, because currently we supposedly always forget an old comm strat for a given counterparty before trying a new one. (Although I don't know off the top of my head if this is accomplished with an if: condition or just by replacing the entry in the _conncache.) But in the future we might keep multiple comm strats for one counterparty, so perhaps we shouldn't use the counterparty id as the key... --Zooko 2001-05-07
# for passing through to the fast fail handler
self._commstratobj = commstratobj
self._upward_inmsg_handler = inmsg_handler_func
self._close_handler_func = close_handler_func
# XXX multi-threading issues re: throttler
self._throttlerread = throttlerin
self._throttlerwrite = throttlerout
self._timeout = timeout
self._readthrottled = False
self._writethrottled = False
self._inbufq = [] # this is a list of strings
self._inbuflen = 0 # the current aggregate unconsumed bytes in inbufq (there can be leading byte in inbufq[0] which have already been consumed and are not counted by inbuflen)
self._nextinmsglen = None # the length of the next incoming msg or `None' if unknown
self._offset = 0 # the index of the beginning of the length-prefix of the next message (when there is no next message, `_offset' is 0)
self._outmsgq = [] # contains (msg, fast_fail_handler_func) # for all subsequent outgoing messages that haven't begun sending yet
self._outbuf = '' # the not-yet-sent part of the current outgoing message
self._current_fast_fail_handler = None # the ffh for the current outgoing message
if sock:
self._everconnected = True # we're already connected
else:
self._everconnected = False # `handle_connect()' sets this to true
self._closing = False
self._startedclosingonpyutilasync = False # to prevent multiple _finish_closing_on_pyutilasync calls, thus making `close()' idempotent
self._closed = False # This gets set to true in `close()'.
self._last_io_time = time.time() # the last time an IO event happened on this connection
self._inmsgs = 0 # The total number of incoming messages that have come through this connection.
self._nummsgs = 0 # The sum of outgoing and incoming messages that have come through this connection.
self._outbytes = 0L # The total bytes ever sent over this connection.
self._inbytes = 0L # The total bytes ever received over this connection.
self._writable = False
self._readable = True
if self._throttlerread:
self._throttlerread.register(self._throttle_read, self._unthrottle_read)
if self._throttlerwrite:
self._throttlerwrite.register(self._throttle_write, self._unthrottle_write)
asyncore.dispatcher.__init__(self, sock=sock)
if (not sock) and (host):
self.create_socket(socket.AF_INET, socket.SOCK_STREAM)
try:
self.connect((host, port,))
except socket.error, le:
# This is not guaranteed to detect a failed connection, since connect() is a non-blocking call. But if we _do_ get a socket error here then we have definitely failed.
# debugprint("%s: couldn't connect to (%s, %s): %s\n", args=(self, host, port, le), v=1, vs="commstrats")
self.close(reason=("socket.error on `connect()'", le,))
raise CommsError.CannotSendError, ("socket.error on connect", le)
DoQ.doq.add_task(self._fail_if_not_connected, delay=self._timeout)
debugprint("%s created\n", args=(self,), v=5, vs="debug")
def __repr__(self):
try:
if self._closed:
state="closed"
elif self._closing:
state="closing"
elif not self._everconnected:
state="connecting"
elif self._readthrottled:
if self._writethrottled:
state="throttled in/out"
else:
state="throttled in"
elif self._writethrottled:
state="throttled out"
else:
state="connected"
try:
pn = self.getpeername()
return '<%s %s to %s at %s:%s, %x>' % (self.__class__.__name__, state, hr(self._cid_for_debugging), pn[0], pn[1], id(self))
except:
# `getpeername()' raises an exception sometimes, for example if the socket isn't connected yet. That's a pretty silly interface, isn't it? --Zooko 2001-06-17
return '<%s %s to %s, %x>' % (self.__class__.__name__, state, hr(self._cid_for_debugging), id(self))
except:
debugprint("exception in TCPConnection.__repr__():\n")
traceback.print_exc(file=debugstream)
raise
def _fail_if_not_connected(self):
"""
@precondition: This method must be called on the DoQ.: DoQ.doq.is_currently_doq()
"""
assert DoQ.doq.is_currently_doq(), "precondition: This method must be called on the DoQ."
if self._everconnected:
return
# this causes it to be cleaned up and the fast fail handlers to be called appropriately
self._fast_fail_reason = "connect() timeout"
self.close(reason="not connected after timeout")
def _set_closing(self):
self._writable = False
self._readable = False
self._closing = True
def _throttle_read(self):
"""
No more data will be read in from the network until `unthrottle_read()' is called.
@precondition: This method must be called on the Asyncore thread.: Asyncore.selector.is_currently_asyncore_thread()
"""
assert Asyncore.selector.is_currently_asyncore_thread(), "precondition: This method must be called on the Asyncore thread."
self._readthrottled = True
self._readable = False
def _unthrottle_read(self):
"""
@precondition: This method must be called on the Asyncore thread.: Asyncore.selector.is_currently_asyncore_thread()
"""
assert Asyncore.selector.is_currently_asyncore_thread(), "precondition: This method must be called on the Asyncore thread."
self._readthrottled = False
# Now if we are not closing then we are now ready to read.
if not self._closing and not self._readable:
self._readable = True
def _throttle_write(self):
"""
No more data will be written out to the network until `unthrottle_write()' is called.
@precondition: This method must be called on the Asyncore thread.: Asyncore.selector.is_currently_asyncore_thread()
"""
assert Asyncore.selector.is_currently_asyncore_thread(), "precondition: This method must be called on the Asyncore thread."
self._writethrottled = True
self._writable = False
def _unthrottle_write(self):
"""
@precondition: This method must be called on the Asyncore thread.: Asyncore.selector.is_currently_asyncore_thread()
"""
assert Asyncore.selector.is_currently_asyncore_thread(), "precondition: This method must be called on the Asyncore thread."
self._writethrottled = False
# Now if we are not closing, and if there is data waiting to be sent, then we are ready to write.
if not self._closing and (self._outbuf or self._outmsgq) and not self._writable:
self._writable = True
def send(self, msg, fast_fail_handler=None, pack=struct.pack):
"""
@precondition: This method must be called on the DoQ.: DoQ.doq.is_currently_doq()
"""
assert DoQ.doq.is_currently_doq(), "precondition: This method must be called on the DoQ."
if self._closing:
debugprint("%s.send(%s): fast failed due to closing\n", args=(self, msg,), v=3, vs="debug")
if fast_fail_handler:
DoQ.doq.add_task(fast_fail_handler, kwargs={'failure_reason': "closing", 'bad_commstrat': self._commstratobj})
return
lenmsg = len(msg)
if lenmsg > MAXIMUM_MSG_SIZE:
if fast_fail_handler:
DoQ.doq.add_task(fast_fail_handler, kwargs={'failure_reason': "message too long: %s" % hr(lenmsg)})
return
# TODO this is gross, it does a full data copy of the message just to prepend 4 bytes
# (send should ideally accept buffer chains instead of a single string messages)
# (this is not such a big deal as we're already spending many more cycles to encrypt every message)
str = pack('>L', lenmsg) + msg
self._outmsgq.append((str, fast_fail_handler,))
# Now if we are not closing, and not write-throttled, then we are now ready to write.
# (Note that it is possible for us to be closing now even though we tested just a few lines up because we are operating on the DoQ thread here and the asyncore thread can cause us to become closing.)
if not self._closing and not self._writethrottled and not self._writable:
self._writable = True
Asyncore.selector.wake_select()
self._nummsgs = self._nummsgs + 1
def is_idle(self, idletimeout=30):
"""
@return: true if and only if there have been no I/O events have occured on this socket in >= idletimeout seconds or if it is closed
"""
if self._closing:
return True
return (time.time() - self._last_io_time) >= idletimeout
def is_talking(self):
"""
@return: true if and only if there is a message actually half-sent or half-received
"""
return (len(self._outbuf) > 0) or (self._inbuflen > 0) or (len(self._outmsgq) > 0)
def is_busy(self, idletimeout):
"""
@return: true if and only if (there is a message actually (half-sent or half-received)
and not `is_idle(idletimeout)')
"""
return self.is_talking() and not self.is_idle(idletimeout)
#### methods below here are for internal use
# The `handle_spam()' methods are the "bottom" interface, to be called by the asyncore thread. There can be (one) thread touching the bottom interface and (one) thread touching the top interface at the same time. Also `_chunkify()', which get called from `handle_spam()' methods.
def close(self, reason=None):
"""
The sequence of functions that get called to close a TCPConnection instance are:
[*] close() -> [a] _finish_closing_on_pyutilasync -> [D] _finish_closing_on_doq
`[*]' means that the function can be invoked from any thread, `[D]' means that the function must be
invoked on the DoQ thread and `[a]' means that the function must be invoked on the Asyncore thread.
You should only ever call `close()', never any of the others.
"""
debugprint("%s.close(reason: %s)\n", args=(self, reason,), v=5, vs="TCPConnection")
self._set_closing()
if Asyncore.selector.is_currently_asyncore_thread():
self._finish_closing_on_pyutilasync()
else:
assert DoQ.doq.is_currently_doq()
Asyncore.selector.add_task(self._finish_closing_on_pyutilasync)
Asyncore.selector.wake_select()
def _finish_closing_on_pyutilasync(self):
"""
It calls `asyncore.dispatcher.close(self)' to clean up the socket object.
It then puts a task on the DoQ to do any cleaning-up that interacts with the DoQ world.
(See `_finish_closing_on_doq'.)
@precondition: This method must be called on the Asyncore thread.: Asyncore.selector.is_currently_asyncore_thread()
"""
assert Asyncore.selector.is_currently_asyncore_thread(), "precondition: This method must be called on the Asyncore thread."
debugprint("%s._finish_closing_on_pyutilasync()\n", args=(self,), v=5, vs="TCPConnection")
if self._startedclosingonpyutilasync:
return
self._startedclosingonpyutilasync = True
# debugprint("%s.close(): about to asyncore.dispatcher.close()...\n", args=(self,))
asyncore.dispatcher.close(self)
# debugprint("%s.close(): done with asyncore.dispatcher.close()\n", args=(self,))
DoQ.doq.add_task(self._finish_closing_on_doq)
def _finish_closing_on_doq(self):
"""
Does a fast-fail on any messages that were queued to be sent but haven't been sent yet.
Unregisters from throttlers.
@precondition: This method must be called on the DoQ.: DoQ.doq.is_currently_doq()
"""
assert DoQ.doq.is_currently_doq(), "precondition: This method must be called on the DoQ."
assert self._closing
assert self._startedclosingonpyutilasync
debugprint("%s._finish_closing_on_doq()\n", args=(self,), v=5, vs="TCPConnection")
if self._throttlerread:
self._throttlerread.unregister(self._throttle_read, self._unthrottle_read)
if self._throttlerwrite:
self._throttlerwrite.unregister(self._throttle_write, self._unthrottle_write)
if (not self._everconnected) or (self._outbytes == 0 and self._inbytes == 0):
# debugprint("%s: connection refused: commstrat=%s, _everconnected=%s, _outbytes=%s, _inbytes=%s\n", args=(self, self._commstratobj, self._everconnected, self._outbytes, self._inbytes), v=6, vs="TCPConnection")
connection_refused = 1
else:
connection_refused = None
# Fail any partially sent message:
# debugprint("%s.close(): about to Fail any partially sent message...\n", args=(self,))
if (len(self._outbuf) > 0) and (self._current_fast_fail_handler):
if hasattr(self, '_fast_fail_reason'):
self._current_fast_fail_handler(failure_reason="TCPConnection: "+self._fast_fail_reason, bad_commstrat=self._commstratobj)
elif connection_refused:
self._current_fast_fail_handler(failure_reason="TCPConnection: connection refused", bad_commstrat=self._commstratobj)
else:
self._current_fast_fail_handler(failure_reason="TCPConnection: closed before message was sent")
# debugprint("%s.close(): done with Fail any partially sent message\n", args=(self,))
# debugprint("%s.close(): about to Fail any queued messages...\n", args=(self,))
# Now fail any queued messages.
while len(self._outmsgq) > 0:
(str, ffh) = self._outmsgq.pop(0)
if ffh:
if connection_refused:
ffh(failure_reason="TCPConnection: connection refused", bad_commstrat=self._commstratobj)
else:
ffh(failure_reason="TCPConnection: cannot send message")
# send the event out to the TCPCommsHandler
self._close_handler_func(self)
# break the circular reference (hopefully our only remaining reference) from CommStrat.TCP object so that we disappear
# (Note: all the rest of the stuff in this function shouldn't be necessary with good garbage collection but currently (Python >= 2.0 and at least
# up to Python 2.2) the garbage collector won't collect garbage that has both a reference cycle and a __del__ method...) --Zooko 2001-10-07
if self._commstratobj and (self is self._commstratobj.asyncsock):
self._commstratobj.asyncsock = None
self._commstratobj = None
# remove no longer needed function references
self._upward_inmsg_handler = None
self._current_fast_fail_handler = None
self._close_handler_func = None
# remove no longer needed object references
self._commstratobj = None
self._throttlerread = None
self._throttlerwrite = None
# empty our buffers
self._outbuf = ''
self._inbufq = []
self._inbuflen = 0
self._nextinmsglen = None
self._closed = True
def handle_write(self):
if self._closing:
return
self._last_io_time = time.time()
# load up the next message if any.
if (len(self._outbuf) == 0) and (len(self._outmsgq) > 0):
(str, ffh) = self._outmsgq.pop(0)
self._current_fast_fail_handler = ffh
self._outbuf = str
if len(self._outbuf) > 0:
try:
num_sent = asyncore.dispatcher.send(self, self._outbuf)
# debugprint("%s.handle_write(): sent [%s] bytes\n", args=(self, num_sent), v=9, vs="commstrats") ### for faster operation, comment this line out. --Zooko 2000-12-11
except socket.error, le:
# debugprint("%s.handle_write(): got exception: %s\n", args=(self, le,), v=6, vs="commstrats")
self.close(reason=("socket.error on `send()'", le,))
return
self._outbytes = self._outbytes + num_sent
self._outbuf = self._outbuf[num_sent:]
if len(self._outbuf) == 0:
self._current_fast_fail_handler = None # remove the no longer needed function reference!
# Now if there are no more messages waiting to be sent, then we are no longer ready to write.
if not self._outmsgq:
self._writable = False
if self._throttlerwrite:
self._throttlerwrite.used(num_sent) # notify throttler we just used up some bandwidth
def writable(self):
return self._writable
def readable(self):
return self._readable
def _chunkify(self, nextstream, unpack=struct.unpack):
"""
@precondition: `self._upward_inmsg_handler' must be callable.: callable(self._upward_inmsg_handler): "self._upward_inmsg_handler: %s :: %s" % tuple(map(hr, (self._upward_inmsg_handler, type(self._upward_inmsg_handler),)))
"""
assert callable(self._upward_inmsg_handler), "precondition: `self._upward_inmsg_handler' must be callable." + " -- " + "self._upward_inmsg_handler: %s :: %s" % tuple(map(hr, (self._upward_inmsg_handler, type(self._upward_inmsg_handler),)))
assert (self._inbuflen == 0) or (len(self._inbufq) > 0), "self._inbuflen: %s, self._inbufq: %s" % tuple(map(hr, (self._inbuflen, self._inbufq,)))
lennextstream = len(nextstream)
if lennextstream == 0:
debugprint("warning %s._chunkify(%s): length 0\n", args=(self, nextstream,), v=0, vs="debug")
return
# Using local variables is faster inside the coming loop, but on the other hand there is a cost to creating the local variables. I think it's probably still a win though, as these can get accessed multiple times during a single call to `_chunkify()'. --Zooko 2001-09-21
nextinmsglen = self._nextinmsglen
inbufq = self._inbufq
inbuflen = self._inbuflen
offset = self._offset
inbuflen = inbuflen + lennextstream
inbufq.append(nextstream)
# debugprint("%s._chunkify() called, nextinmsglen: %s, inbuflen: %s, offset: %s, inbufq: %s\n", args=(self, nextinmsglen, inbuflen, offset, inbufq,), v=0, vs="debug")
assert (inbuflen == 0) or (len(inbufq) > 0), "inbuflen: %s, inbufq: %s" % tuple(map(hr, (inbuflen, inbufq,)))
if (nextinmsglen is None) and (inbuflen >= 4):
# collect the four bytes. (Note that 99% of the time we will execute the while loop body zero times and the remaining 1% of the time we will execute it one time, unless there is something REALLY funny going on -- that is, unless `_chunkify()' was called with `nextstream' was of size 1.)
assert len(inbufq) > 0, "inbufq: %s" % hr(inbufq)
while len(inbufq[0]) < (offset + 4):
assert len(inbufq) > 1, "inbufq: %s" % hr(inbufq)
inbufq[0] = inbufq[0] + inbufq[1]
del inbufq[1]
assert len(inbufq[0]) >= (offset + 4), "inbufq: %s, offset: %s" % tuple(map(hr, (inbufq, offset,)))
nextinmsglen = unpack('>L', inbufq[0][offset:(offset + 4)])[0]
assert type(nextinmsglen) is types.LongType
# debugprint("%s._chunkify(): nextinmsglen: %s\n", args=(self, nextinmsglen,), v=6, vs="debug")
if nextinmsglen > MAXIMUM_MSG_SIZE:
# Too big.
debugprint("%s._chunkify(): killing due to overlarge msg size. nextinmsglen: %s, inbuflen:%s, offset: %s, inbufq: %s\n", args=(self, nextinmsglen, inbuflen, offset, inbufq,), v=0, vs="debug")
self.close(reason=("overlarge msg size", nextinmsglen,))
return
nextinmsglen = int(nextinmsglen)
if DEBUG_MODE and nextinmsglen > (260 * 2**10):
debugprint("%s._chunkify(): suspiciously large msg size. nextinmsglen: %s, inbuflen:%s, offset: %s, inbufq: %s\n", args=(self, nextinmsglen, inbuflen, offset, inbufq,), v=0, vs="debug")
# Now this is the loop to extract and upsend each message. Note that we replicate the "extract next msg len" code from above at the end of this loop. This is the common idiom of "compute a value; while it is big enough: do some stuff; compute the value again"
while (nextinmsglen is not None) and (inbuflen >= (nextinmsglen + 4)):
# debugprint("%s._chunkify(), in loop offset: %s, inbufq: %s\n", args=(self, offset, inbufq,), v=0, vs="debug")
assert (inbuflen == 0) or (len(inbufq) > 0), "inbuflen: %s, inbufq: %s" % tuple(map(hr, (inbuflen, inbufq)))
# debugprint("%s._chunkify(): collecting next message of length: %s\n", args=(self, nextinmsglen,), v=6, vs="debug")
leninbufq0 = len(inbufq[0])
nextchunki = nextinmsglen+offset+4
assert leninbufq0 >= (offset + 4)
if leninbufq0 > nextchunki:
msg = inbufq[0][(offset + 4):(nextinmsglen+offset+4)]
# Set offset to point to the beginning of the unconsumed part:
offset = nextinmsglen+offset+4
elif leninbufq0 == nextchunki:
msg = inbufq[0][(offset + 4):]
offset = 0
del inbufq[0]
else: # leninbufq0 < nextchunki
msg = inbufq[0][(offset + 4):]
remain = nextinmsglen - len(msg)
i = 1
offset = 0
while remain > 0:
leninbufqi = len(inbufq[i])
if leninbufqi > remain:
# Append the part of the buf that is the trailing part of this msg.
msg = msg + inbufq[i][:remain]
# Set offset to point to the beginning of the unconsumed part:
offset = remain
remain = 0
del inbufq[:i]
elif leninbufqi == remain:
msg = msg + inbufq[i]
offset = 0
remain = 0
del inbufq[:i+1]
else: # leninbufqi < remain
msg = msg + inbufq[i]
remain = remain - leninbufqi
i = i + 1
inbuflen = inbuflen - (nextinmsglen + 4)
assert (inbuflen == 0) or (len(inbufq) > 0), "inbuflen: %s, inbufq: %s" % tuple(map(hr, (inbuflen, inbufq,)))
self._inmsgs = self._inmsgs + 1
self._nummsgs = self._nummsgs + 1
# debugprint("%s._chunkify(): got message of length: %s, msg: %s\n", args=(self, nextinmsglen, msg,), v=6, vs="debug")
DoQ.doq.add_task(self._upward_inmsg_handler, args=(self, msg))
# Okay we're done with that message! Now recompute nextinmsglen.
if inbuflen < 4:
nextinmsglen = None
else:
# collect the four bytes. (Note that 99% of the time we will execute the while loop body zero times and the remaining 1% of the time we will execute it one time, unless there is something REALLY funny going on -- that is, unless `_chunkify()' was called with `nextstream' was of size 1.)
assert len(inbufq) > 0, "inbufq: %s" % hr(inbufq)
while len(inbufq[0]) < (offset + 4):
assert len(inbufq) > 1, "inbufq: %s" % hr(inbufq)
inbufq[0] = inbufq[0] + inbufq[1]
del inbufq[1]
assert len(inbufq[0]) >= (offset + 4), "inbufq: %s, offset: %s" % tuple(map(hr, (inbufq, offset,)))
nextinmsglen = unpack('>L', inbufq[0][offset:(offset + 4)])[0]
assert type(nextinmsglen) is types.LongType
# debugprint("%s._chunkify(): nextinmsglen: %s\n", args=(self, nextinmsglen,), v=6, vs="debug")
if nextinmsglen > MAXIMUM_MSG_SIZE:
# Too big.
debugprint("%s._chunkify(): killing due to overlarge msg size. nextinmsglen: %s, inbuflen:%s, offset: %s, inbufq: %s\n", args=(self, nextinmsglen, inbuflen, offset, inbufq,), v=0, vs="debug")
self.close(reason=("overlarge msg size", nextinmsglen,))
return
nextinmsglen = int(nextinmsglen)
if DEBUG_MODE and nextinmsglen > (260 * 2**10):
debugprint("%s._chunkify(): suspiciously large msg size. nextinmsglen: %s, inbuflen:%s, offset: %s, inbufq: %s\n", args=(self, nextinmsglen, inbuflen, offset, inbufq,), v=0, vs="debug")
self._nextinmsglen = nextinmsglen
self._inbufq = inbufq
self._inbuflen = inbuflen
self._offset = offset
assert (self._inbuflen == 0) or (len(self._inbufq) > 0), "self._inbuflen: %s, self._inbufq: %s" % tuple(map(hr, (self._inbuflen, self._inbufq)))
def handle_read(self):
if self._closing:
return
self._last_io_time = time.time()
try:
data = self.recv(65536)
# debugprint("%s.handle_read(): received [%s] bytes\n", args=(self, len(data)), v=9, vs="commstrats") ### for faster operation, comment this line out. --Zooko 2000-12-11
except socket.error, le:
# This is the socket's way of telling us that we are _closed_.
# debugprint("%s: closed socket detected. le:%s\n", args=(self, le), v=6, vs="commstrats")
self.close(reason=("socket.error on `recv()'", le,))
return
except MemoryError, le:
debugprint("memory error in TCPConnection.read()\n")
self.close() # the best thing to do is close the connection, that frees some memory, makes our internal state consistent, and signals our peers that we're not so good right now
return
if len(data) == 0:
# This is the socket's way of telling us that we are _closed_.
# debugprint("%s: closed socket detected. (read of length 0)\n", args=(self,), v=6, vs="commstrats")
self.close()
return
self._inbytes = self._inbytes + len(data)
if self._throttlerread:
self._throttlerread.used(len(data)) # notify throttler we just used up some bandwidth
self._chunkify(data)
def handle_accept(self) :
debugprint("%s.handle_accept() checking to see if this gets called...\n", args=(self,), v=0, vs="debug")
def handle_connect(self):
self._last_io_time = time.time()
self._everconnected = True
# debugprint("%s.handle_connect()\n", args=(self,), v=6, vs="commstrats")
def handle_close(self):
# debugprint("%s.handle_close()\n", args=(self,), v=6, vs="commstrats")
self.close()
def log(self, message):
# for faster operation, comment this whole method out and replace it with "def log(): return". --Zooko 2000-12-11
return
## if message[-1:] == "\n":
## debugprint("%s: asyncore log: %s", args=(self, message,), v=15, vs="commstrats")
## else:
## debugprint("%s: asyncore log: %s\n", args=(self, message,), v=15, vs="commstrats")
|
zooko/egtp_new
|
egtp/TCPConnection.py
|
Python
|
lgpl-2.1
| 31,090
|
[
"VisIt"
] |
1ca7b569036a4c029ce1026ce2fafb9251fa397930044068686db17c8158b98c
|
#!/usr/bin/env python
##############################################################################################
#
#
# CMIP6_hybrid_regrid_emissions_N96e.py
#
#
# Requirements:
# Iris 1.10, time, cf_units, numpy
#
#
# This Python script has been written by N.L. Abraham as part of the UKCA Tutorials:
# http://www.ukca.ac.uk/wiki/index.php/UKCA_Chemistry_and_Aerosol_Tutorials_at_vn10.4
#
# Copyright (C) 2015 University of Cambridge
#
# This is free software: you can redistribute it and/or modify it under the
# terms of the GNU Lesser General Public License as published by the Free Software
# Foundation, either version 3 of the License, or (at your option) any later
# version.
#
# It is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A
# PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details.
#
# You find a copy of the GNU Lesser General Public License at <http://www.gnu.org/licenses/>.
#
# Written by N. Luke Abraham 2016-10-20 <[email protected]>
# Modified by Marcus Koehler 2017-10-11 <[email protected]>
#
#
##############################################################################################
# preamble
import time
import iris
import cf_units
import numpy
# --- CHANGE THINGS BELOW THIS LINE TO WORK WITH YOUR FILES ETC. ---
# name of file containing an ENDGame grid, e.g. your model output
# NOTE: all the fields in the file should be on the same horizontal
# grid, as the field used MAY NOT be the first in order of STASH
grid_file='/group_workspaces/jasmin2/ukca/vol1/mkoehler/um/archer/ag542/apm.pp/ag542a.pm1988dec'
#
# name of emissions file
# NOTE: We use the fluxes from the Gregorian calendar file also for the 360_day emission files
emissions_file='/group_workspaces/jasmin2/ukca/vol1/mkoehler/emissions/OXBUDS/0.5x0.5/cmip6_hybrid/v2/CMIP6_hybrid_combined_iso-butane_1960-2020_v2_greg.nc'
# --- BELOW THIS LINE, NOTHING SHOULD NEED TO BE CHANGED ---
species_name='i-C4H10'
# this is the grid we want to regrid to, e.g. N96 ENDGame
grd=iris.load(grid_file)[0]
grd.coord(axis='x').guess_bounds()
grd.coord(axis='y').guess_bounds()
# This is the original data
ems=iris.load_cube(emissions_file)
# make intersection between 0 and 360 longitude to ensure that
# the data is regridded correctly
nems = ems.intersection(longitude=(0, 360))
# make sure that we use the same coordinate system, otherwise regrid won't work
nems.coord(axis='x').coord_system=grd.coord_system()
nems.coord(axis='y').coord_system=grd.coord_system()
# now guess the bounds of the new grid prior to regridding
nems.coord(axis='x').guess_bounds()
nems.coord(axis='y').guess_bounds()
# now regrid
ocube=nems.regrid(grd,iris.analysis.AreaWeighted())
# now add correct attributes and names to netCDF file
ocube.var_name='emissions_'+str.strip(species_name)
ocube.long_name='iso-butane surface emissions'
ocube.standard_name='tendency_of_atmosphere_mass_content_of_butane_due_to_emission'
ocube.units=cf_units.Unit('kg m-2 s-1')
ocube.attributes['vertical_scaling']='surface'
ocube.attributes['tracer_name']=str.strip(species_name)
# global attributes, so don't set in local_keys
# NOTE: all these should be strings, including the numbers!
# basic emissions type
ocube.attributes['emission_type']='1' # time series
ocube.attributes['update_type']='1' # same as above
ocube.attributes['update_freq_in_hours']='120' # i.e. 5 days
ocube.attributes['um_version']='10.6' # UM version
ocube.attributes['source']='CMIP6_hybrid_combined_iso-butane_1960-2020_v2_greg.nc'
ocube.attributes['title']='Time-varying monthly surface emissions of iso-butane from 1960 to 2020.'
ocube.attributes['File_version']='CMIP6_hybrid_v2'
ocube.attributes['File_creation_date']=time.ctime(time.time())
ocube.attributes['grid']='regular 1.875 x 1.25 degree longitude-latitude grid (N96e)'
ocube.attributes['history']=time.ctime(time.time())+': '+__file__+' \n'+ocube.attributes['history']
ocube.attributes['institution']='Centre for Atmospheric Science, Department of Chemistry, University of Cambridge, U.K.'
ocube.attributes['reference']='Hoesly et al., Geosci. Mod. Dev., 2018; Granier et al., Clim. Change, 2011; Lamarque et al., Atmos. Chem. Phys., 2010; Helmig et al., Atmos. Environ., 2014.'
del ocube.attributes['file_creation_date']
del ocube.attributes['description']
# rename and set time coord - mid-month from 1960-Jan to 2020-Dec
# this bit is annoyingly fiddly
ocube.coord(axis='t').var_name='time'
ocube.coord(axis='t').standard_name='time'
ocube.coords(axis='t')[0].units=cf_units.Unit('days since 1960-01-01 00:00:00', calendar='360_day')
ocube.coord(axis='t').points=numpy.array([
15, 45, 75, 105, 135, 165, 195, 225, 255, 285, 315, 345, 375, 405,
435, 465, 495, 525, 555, 585, 615, 645, 675, 705, 735, 765, 795, 825,
855, 885, 915, 945, 975, 1005, 1035, 1065, 1095, 1125, 1155, 1185, 1215,
1245, 1275, 1305, 1335, 1365, 1395, 1425, 1455, 1485, 1515, 1545, 1575,
1605, 1635, 1665, 1695, 1725, 1755, 1785, 1815, 1845, 1875, 1905, 1935,
1965, 1995, 2025, 2055, 2085, 2115, 2145, 2175, 2205, 2235, 2265, 2295,
2325, 2355, 2385, 2415, 2445, 2475, 2505, 2535, 2565, 2595, 2625, 2655,
2685, 2715, 2745, 2775, 2805, 2835, 2865, 2895, 2925, 2955, 2985, 3015,
3045, 3075, 3105, 3135, 3165, 3195, 3225, 3255, 3285, 3315, 3345, 3375,
3405, 3435, 3465, 3495, 3525, 3555, 3585, 3615, 3645, 3675, 3705, 3735,
3765, 3795, 3825, 3855, 3885, 3915, 3945, 3975, 4005, 4035, 4065, 4095,
4125, 4155, 4185, 4215, 4245, 4275, 4305, 4335, 4365, 4395, 4425, 4455,
4485, 4515, 4545, 4575, 4605, 4635, 4665, 4695, 4725, 4755, 4785, 4815,
4845, 4875, 4905, 4935, 4965, 4995, 5025, 5055, 5085, 5115, 5145, 5175,
5205, 5235, 5265, 5295, 5325, 5355, 5385, 5415, 5445, 5475, 5505, 5535,
5565, 5595, 5625, 5655, 5685, 5715, 5745, 5775, 5805, 5835, 5865, 5895,
5925, 5955, 5985, 6015, 6045, 6075, 6105, 6135, 6165, 6195, 6225, 6255,
6285, 6315, 6345, 6375, 6405, 6435, 6465, 6495, 6525, 6555, 6585, 6615,
6645, 6675, 6705, 6735, 6765, 6795, 6825, 6855, 6885, 6915, 6945, 6975,
7005, 7035, 7065, 7095, 7125, 7155, 7185, 7215, 7245, 7275, 7305, 7335,
7365, 7395, 7425, 7455, 7485, 7515, 7545, 7575, 7605, 7635, 7665, 7695,
7725, 7755, 7785, 7815, 7845, 7875, 7905, 7935, 7965, 7995, 8025, 8055,
8085, 8115, 8145, 8175, 8205, 8235, 8265, 8295, 8325, 8355, 8385, 8415,
8445, 8475, 8505, 8535, 8565, 8595, 8625, 8655, 8685, 8715, 8745, 8775,
8805, 8835, 8865, 8895, 8925, 8955, 8985, 9015, 9045, 9075, 9105, 9135,
9165, 9195, 9225, 9255, 9285, 9315, 9345, 9375, 9405, 9435, 9465, 9495,
9525, 9555, 9585, 9615, 9645, 9675, 9705, 9735, 9765, 9795, 9825, 9855,
9885, 9915, 9945, 9975, 10005, 10035, 10065, 10095, 10125, 10155, 10185,
10215, 10245, 10275, 10305, 10335, 10365, 10395, 10425, 10455, 10485,
10515, 10545, 10575, 10605, 10635, 10665, 10695, 10725, 10755, 10785,
10815, 10845, 10875, 10905, 10935, 10965, 10995, 11025, 11055, 11085,
11115, 11145, 11175, 11205, 11235, 11265, 11295, 11325, 11355, 11385,
11415, 11445, 11475, 11505, 11535, 11565, 11595, 11625, 11655, 11685,
11715, 11745, 11775, 11805, 11835, 11865, 11895, 11925, 11955, 11985,
12015, 12045, 12075, 12105, 12135, 12165, 12195, 12225, 12255, 12285,
12315, 12345, 12375, 12405, 12435, 12465, 12495, 12525, 12555, 12585,
12615, 12645, 12675, 12705, 12735, 12765, 12795, 12825, 12855, 12885,
12915, 12945, 12975, 13005, 13035, 13065, 13095, 13125, 13155, 13185,
13215, 13245, 13275, 13305, 13335, 13365, 13395, 13425, 13455, 13485,
13515, 13545, 13575, 13605, 13635, 13665, 13695, 13725, 13755, 13785,
13815, 13845, 13875, 13905, 13935, 13965, 13995, 14025, 14055, 14085,
14115, 14145, 14175, 14205, 14235, 14265, 14295, 14325, 14355, 14385,
14415, 14445, 14475, 14505, 14535, 14565, 14595, 14625, 14655, 14685,
14715, 14745, 14775, 14805, 14835, 14865, 14895, 14925, 14955, 14985,
15015, 15045, 15075, 15105, 15135, 15165, 15195, 15225, 15255, 15285,
15315, 15345, 15375, 15405, 15435, 15465, 15495, 15525, 15555, 15585,
15615, 15645, 15675, 15705, 15735, 15765, 15795, 15825, 15855, 15885,
15915, 15945, 15975, 16005, 16035, 16065, 16095, 16125, 16155, 16185,
16215, 16245, 16275, 16305, 16335, 16365, 16395, 16425, 16455, 16485,
16515, 16545, 16575, 16605, 16635, 16665, 16695, 16725, 16755, 16785,
16815, 16845, 16875, 16905, 16935, 16965, 16995, 17025, 17055, 17085,
17115, 17145, 17175, 17205, 17235, 17265, 17295, 17325, 17355, 17385,
17415, 17445, 17475, 17505, 17535, 17565, 17595, 17625, 17655, 17685,
17715, 17745, 17775, 17805, 17835, 17865, 17895, 17925, 17955, 17985,
18015, 18045, 18075, 18105, 18135, 18165, 18195, 18225, 18255, 18285,
18315, 18345, 18375, 18405, 18435, 18465, 18495, 18525, 18555, 18585,
18615, 18645, 18675, 18705, 18735, 18765, 18795, 18825, 18855, 18885,
18915, 18945, 18975, 19005, 19035, 19065, 19095, 19125, 19155, 19185,
19215, 19245, 19275, 19305, 19335, 19365, 19395, 19425, 19455, 19485,
19515, 19545, 19575, 19605, 19635, 19665, 19695, 19725, 19755, 19785,
19815, 19845, 19875, 19905, 19935, 19965, 19995, 20025, 20055, 20085,
20115, 20145, 20175, 20205, 20235, 20265, 20295, 20325, 20355, 20385,
20415, 20445, 20475, 20505, 20535, 20565, 20595, 20625, 20655, 20685,
20715, 20745, 20775, 20805, 20835, 20865, 20895, 20925, 20955, 20985,
21015, 21045, 21075, 21105, 21135, 21165, 21195, 21225, 21255, 21285,
21315, 21345, 21375, 21405, 21435, 21465, 21495, 21525, 21555, 21585,
21615, 21645, 21675, 21705, 21735, 21765, 21795, 21825, 21855, 21885,
21915, 21945 ])
# make z-direction.
zdims=iris.coords.DimCoord(numpy.array([0]),standard_name = 'model_level_number',
units='1',attributes={'positive':'up'})
ocube.add_aux_coord(zdims)
ocube=iris.util.new_axis(ocube, zdims)
# now transpose cube to put Z 2nd
ocube.transpose([1,0,2,3])
# make coordinates 64-bit
ocube.coord(axis='x').points=ocube.coord(axis='x').points.astype(dtype='float64')
ocube.coord(axis='y').points=ocube.coord(axis='y').points.astype(dtype='float64')
#ocube.coord(axis='z').points=ocube.coord(axis='z').points.astype(dtype='float64') # integer
ocube.coord(axis='t').points=ocube.coord(axis='t').points.astype(dtype='float64')
# for some reason, longitude_bounds are double, but latitude_bounds are float
ocube.coord('latitude').bounds=ocube.coord('latitude').bounds.astype(dtype='float64')
# add forecast_period & forecast_reference_time
# forecast_reference_time
frt=numpy.array([
15, 45, 75, 105, 135, 165, 195, 225, 255, 285, 315, 345, 375, 405,
435, 465, 495, 525, 555, 585, 615, 645, 675, 705, 735, 765, 795, 825,
855, 885, 915, 945, 975, 1005, 1035, 1065, 1095, 1125, 1155, 1185, 1215,
1245, 1275, 1305, 1335, 1365, 1395, 1425, 1455, 1485, 1515, 1545, 1575,
1605, 1635, 1665, 1695, 1725, 1755, 1785, 1815, 1845, 1875, 1905, 1935,
1965, 1995, 2025, 2055, 2085, 2115, 2145, 2175, 2205, 2235, 2265, 2295,
2325, 2355, 2385, 2415, 2445, 2475, 2505, 2535, 2565, 2595, 2625, 2655,
2685, 2715, 2745, 2775, 2805, 2835, 2865, 2895, 2925, 2955, 2985, 3015,
3045, 3075, 3105, 3135, 3165, 3195, 3225, 3255, 3285, 3315, 3345, 3375,
3405, 3435, 3465, 3495, 3525, 3555, 3585, 3615, 3645, 3675, 3705, 3735,
3765, 3795, 3825, 3855, 3885, 3915, 3945, 3975, 4005, 4035, 4065, 4095,
4125, 4155, 4185, 4215, 4245, 4275, 4305, 4335, 4365, 4395, 4425, 4455,
4485, 4515, 4545, 4575, 4605, 4635, 4665, 4695, 4725, 4755, 4785, 4815,
4845, 4875, 4905, 4935, 4965, 4995, 5025, 5055, 5085, 5115, 5145, 5175,
5205, 5235, 5265, 5295, 5325, 5355, 5385, 5415, 5445, 5475, 5505, 5535,
5565, 5595, 5625, 5655, 5685, 5715, 5745, 5775, 5805, 5835, 5865, 5895,
5925, 5955, 5985, 6015, 6045, 6075, 6105, 6135, 6165, 6195, 6225, 6255,
6285, 6315, 6345, 6375, 6405, 6435, 6465, 6495, 6525, 6555, 6585, 6615,
6645, 6675, 6705, 6735, 6765, 6795, 6825, 6855, 6885, 6915, 6945, 6975,
7005, 7035, 7065, 7095, 7125, 7155, 7185, 7215, 7245, 7275, 7305, 7335,
7365, 7395, 7425, 7455, 7485, 7515, 7545, 7575, 7605, 7635, 7665, 7695,
7725, 7755, 7785, 7815, 7845, 7875, 7905, 7935, 7965, 7995, 8025, 8055,
8085, 8115, 8145, 8175, 8205, 8235, 8265, 8295, 8325, 8355, 8385, 8415,
8445, 8475, 8505, 8535, 8565, 8595, 8625, 8655, 8685, 8715, 8745, 8775,
8805, 8835, 8865, 8895, 8925, 8955, 8985, 9015, 9045, 9075, 9105, 9135,
9165, 9195, 9225, 9255, 9285, 9315, 9345, 9375, 9405, 9435, 9465, 9495,
9525, 9555, 9585, 9615, 9645, 9675, 9705, 9735, 9765, 9795, 9825, 9855,
9885, 9915, 9945, 9975, 10005, 10035, 10065, 10095, 10125, 10155, 10185,
10215, 10245, 10275, 10305, 10335, 10365, 10395, 10425, 10455, 10485,
10515, 10545, 10575, 10605, 10635, 10665, 10695, 10725, 10755, 10785,
10815, 10845, 10875, 10905, 10935, 10965, 10995, 11025, 11055, 11085,
11115, 11145, 11175, 11205, 11235, 11265, 11295, 11325, 11355, 11385,
11415, 11445, 11475, 11505, 11535, 11565, 11595, 11625, 11655, 11685,
11715, 11745, 11775, 11805, 11835, 11865, 11895, 11925, 11955, 11985,
12015, 12045, 12075, 12105, 12135, 12165, 12195, 12225, 12255, 12285,
12315, 12345, 12375, 12405, 12435, 12465, 12495, 12525, 12555, 12585,
12615, 12645, 12675, 12705, 12735, 12765, 12795, 12825, 12855, 12885,
12915, 12945, 12975, 13005, 13035, 13065, 13095, 13125, 13155, 13185,
13215, 13245, 13275, 13305, 13335, 13365, 13395, 13425, 13455, 13485,
13515, 13545, 13575, 13605, 13635, 13665, 13695, 13725, 13755, 13785,
13815, 13845, 13875, 13905, 13935, 13965, 13995, 14025, 14055, 14085,
14115, 14145, 14175, 14205, 14235, 14265, 14295, 14325, 14355, 14385,
14415, 14445, 14475, 14505, 14535, 14565, 14595, 14625, 14655, 14685,
14715, 14745, 14775, 14805, 14835, 14865, 14895, 14925, 14955, 14985,
15015, 15045, 15075, 15105, 15135, 15165, 15195, 15225, 15255, 15285,
15315, 15345, 15375, 15405, 15435, 15465, 15495, 15525, 15555, 15585,
15615, 15645, 15675, 15705, 15735, 15765, 15795, 15825, 15855, 15885,
15915, 15945, 15975, 16005, 16035, 16065, 16095, 16125, 16155, 16185,
16215, 16245, 16275, 16305, 16335, 16365, 16395, 16425, 16455, 16485,
16515, 16545, 16575, 16605, 16635, 16665, 16695, 16725, 16755, 16785,
16815, 16845, 16875, 16905, 16935, 16965, 16995, 17025, 17055, 17085,
17115, 17145, 17175, 17205, 17235, 17265, 17295, 17325, 17355, 17385,
17415, 17445, 17475, 17505, 17535, 17565, 17595, 17625, 17655, 17685,
17715, 17745, 17775, 17805, 17835, 17865, 17895, 17925, 17955, 17985,
18015, 18045, 18075, 18105, 18135, 18165, 18195, 18225, 18255, 18285,
18315, 18345, 18375, 18405, 18435, 18465, 18495, 18525, 18555, 18585,
18615, 18645, 18675, 18705, 18735, 18765, 18795, 18825, 18855, 18885,
18915, 18945, 18975, 19005, 19035, 19065, 19095, 19125, 19155, 19185,
19215, 19245, 19275, 19305, 19335, 19365, 19395, 19425, 19455, 19485,
19515, 19545, 19575, 19605, 19635, 19665, 19695, 19725, 19755, 19785,
19815, 19845, 19875, 19905, 19935, 19965, 19995, 20025, 20055, 20085,
20115, 20145, 20175, 20205, 20235, 20265, 20295, 20325, 20355, 20385,
20415, 20445, 20475, 20505, 20535, 20565, 20595, 20625, 20655, 20685,
20715, 20745, 20775, 20805, 20835, 20865, 20895, 20925, 20955, 20985,
21015, 21045, 21075, 21105, 21135, 21165, 21195, 21225, 21255, 21285,
21315, 21345, 21375, 21405, 21435, 21465, 21495, 21525, 21555, 21585,
21615, 21645, 21675, 21705, 21735, 21765, 21795, 21825, 21855, 21885,
21915, 21945 ], dtype='float64')
frt_dims=iris.coords.AuxCoord(frt,standard_name = 'forecast_reference_time',
units=cf_units.Unit('days since 1960-01-01 00:00:00', calendar='360_day'))
ocube.add_aux_coord(frt_dims,data_dims=0)
ocube.coord('forecast_reference_time').guess_bounds()
# forecast_period
fp=numpy.array([-360],dtype='float64')
fp_dims=iris.coords.AuxCoord(fp,standard_name = 'forecast_period',
units=cf_units.Unit('hours'),bounds=numpy.array([-720,0],dtype='float64'))
ocube.add_aux_coord(fp_dims,data_dims=None)
# add-in cell_methods
ocube.cell_methods = [iris.coords.CellMethod('mean', 'time')]
# set _FillValue
fillval=1e+20
ocube.data = numpy.ma.array(data=ocube.data, fill_value=fillval, dtype='float32')
# output file name, based on species
outpath='ukca_emiss_iC4H10.nc'
# don't want time to be cattable, as is a periodic emissions file
iris.FUTURE.netcdf_no_unlimited=True
# annoying hack to set a missing_value attribute as well as a _FillValue attribute
dict.__setitem__(ocube.attributes, 'missing_value', fillval)
# now write-out to netCDF
saver = iris.fileformats.netcdf.Saver(filename=outpath, netcdf_format='NETCDF3_CLASSIC')
saver.update_global_attributes(Conventions=iris.fileformats.netcdf.CF_CONVENTIONS_VERSION)
saver.write(ocube, local_keys=['vertical_scaling', 'missing_value','um_stash_source','tracer_name'])
# end of script
|
acsis-project/emissions
|
emissions/python/CMIP6_hybrid/CMIP6_hybrid_regrid_iC4H10_emissions_n96e_360d.py
|
Python
|
gpl-3.0
| 17,251
|
[
"NetCDF"
] |
3467d7fa7af239e36ca3de7903c06e6f7f24aaa3c644d5465bec16deb7995444
|
import ocl
import camvtk
import time
import vtk
import datetime
import math
def drawPoints(myscreen, clpoints, ccpoints):
c=camvtk.PointCloud( pointlist=clpoints, collist=ccpoints)
c.SetPoints()
myscreen.addActor(c )
def drawFiber(myscreen, f, fibercolor):
inter = f.getInts()
print("fiber has ", len(inter) , " intervals")
for i in inter:
if not i.empty():
ip1 = f.point( i.lower )
ip2 = f.point( i.upper )
myscreen.addActor( camvtk.Line(p1=(ip1.x,ip1.y,ip1.z),p2=(ip2.x,ip2.y,ip2.z), color=fibercolor) )
myscreen.addActor( camvtk.Sphere(center=(ip1.x,ip1.y,ip1.z),radius=0.005, color=camvtk.clColor( i.lower_cc) ) )
myscreen.addActor( camvtk.Sphere(center=(ip2.x,ip2.y,ip2.z),radius=0.005, color=camvtk.clColor( i.upper_cc) ) )
#cc1 = i.lower_cc
#cc2 = i.upper_cc
#myscreen.addActor( camvtk.Sphere(center=(cc1.x,cc1.y,cc1.z),radius=0.005, color=camvtk.lgreen ) )
#myscreen.addActor( camvtk.Sphere(center=(cc2.x,cc2.y,cc2.z),radius=0.005, color=camvtk.lgreen ) )
# cutter circle
#c1 = camvtk.Circle(center=(ip1.x,ip1.y,ip1.z), radius = 0.3/2, color=fibercolor)
#myscreen.addActor(c1)
#c2 = camvtk.Circle(center=(ip2.x,ip2.y,ip2.z), radius = 0.3/2, color=fibercolor)
#myscreen.addActor(c2)
def drawFiber_clpts(myscreen, f, clcolor):
inter = f.getInts()
#print "fiber has ", len(inter) , " intervals"
for i in inter:
if not i.empty():
ip1 = f.point( i.lower )
ip2 = f.point( i.upper )
#myscreen.addActor( camvtk.Line(p1=(ip1.x,ip1.y,ip1.z),p2=(ip2.x,ip2.y,ip2.z), color=fibercolor) )
sphcolor = camvtk.clColor( i.lower_cc)
myscreen.addActor( camvtk.Sphere(center=(ip1.x,ip1.y,ip1.z),radius=0.005, color=sphcolor ) )
sphcolor = camvtk.clColor( i.upper_cc)
myscreen.addActor( camvtk.Sphere(center=(ip2.x,ip2.y,ip2.z),radius=0.005, color=sphcolor ) )
#cc1 = i.lower_cc
#cc2 = i.upper_cc
#myscreen.addActor( camvtk.Sphere(center=(cc1.x,cc1.y,cc1.z),radius=0.005, color=camvtk.pink ) )
#myscreen.addActor( camvtk.Sphere(center=(cc2.x,cc2.y,cc2.z),radius=0.005, color=camvtk.pink ) )
def yfiber(yvals,t,zh,myscreen,cutter,color):
for y in yvals:
f1 = ocl.Point(-0.5,y,zh) # start point of fiber
f2 = ocl.Point(1.5,y,zh) # end point of fiber
f = ocl.Fiber( f1, f2)
i = ocl.Interval()
#cutter.vertexPush(f,i,t)
#cutter.facetPush(f,i,t)
#cutter.edgePush(f,i,t)
cutter.pushCutter(f,i,t)
f.addInterval(i)
drawFiber_clpts(myscreen, f, color)
def xfiber(xvals,t,zh,myscreen,cutter,color):
for x in xvals:
f1 = ocl.Point(x,-0.5,zh) # start point of fiber
f2 = ocl.Point(x,1.5,zh) # end point of fiber
f = ocl.Fiber( f1, f2)
i = ocl.Interval()
#cutter.vertexPush(f,i,t)
#cutter.facetPush(f,i,t)
#cutter.edgePush(f,i,t)
cutter.pushCutter(f,i,t)
f.addInterval(i)
drawFiber_clpts(myscreen, f, color)
def oneCutterWaterline(myscreen, cutter, zh, color):
fiber_range=2
Nmax = 100
yvals = [float(n-float(Nmax)/2)/Nmax*fiber_range for n in range(0,Nmax+1)]
xvals = [float(n-float(Nmax)/2)/Nmax*fiber_range for n in range(0,Nmax+1)]
yfiber(yvals,t,zh,myscreen,cutter, color)
xfiber(xvals,t,zh,myscreen,cutter, color)
if __name__ == "__main__":
print(ocl.version())
myscreen = camvtk.VTKScreen()
myscreen.camera.SetPosition(0.5, 3, 2)
myscreen.camera.SetFocalPoint(0.5, 0.5, 0)
camvtk.drawArrows(myscreen,center=(-0.5,-0.5,-0.5))
camvtk.drawOCLtext(myscreen)
a = ocl.Point(0,1,0.2)
b = ocl.Point(1,0.5,0.0)
c = ocl.Point(0.1,0.1,0.0)
myscreen.addActor(camvtk.Point(center=(a.x,a.y,a.z), color=(1,0,1)))
myscreen.addActor(camvtk.Point(center=(b.x,b.y,b.z), color=(1,0,1)))
myscreen.addActor(camvtk.Point(center=(c.x,c.y,c.z), color=(1,0,1)))
myscreen.addActor( camvtk.Line(p1=(a.x,a.y,a.z),p2=(c.x,c.y,c.z)) )
myscreen.addActor( camvtk.Line(p1=(c.x,c.y,c.z),p2=(b.x,b.y,b.z)) )
myscreen.addActor( camvtk.Line(p1=(a.x,a.y,a.z),p2=(b.x,b.y,b.z)) )
t = ocl.Triangle(b,c,a)
angle = math.pi/4
diameter=0.3
length=5
cutter1 = ocl.BallCutter(diameter, length)
cutter2 = ocl.CylCutter(diameter, length)
cutter3 = ocl.BullCutter(diameter, diameter/4, length)
cutter4 = ocl.ConeCutter(diameter, angle, length)
#cutter = cutter.offsetCutter( 0.1 )
fiber_range=2
Nmax = 50
yvals = [float(n-float(Nmax)/2)/Nmax*fiber_range for n in range(0,Nmax+1)]
xvals = [float(n-float(Nmax)/2)/Nmax*fiber_range for n in range(0,Nmax+1)]
zmin = -0.1
zmax = 0.25
zNmax =5
dz = (zmax-zmin)/(zNmax-1)
zvals=[]
for n in range(0,zNmax):
zvals.append(zmin+n*dz)
for zh in zvals:
oneCutterWaterline(myscreen, cutter1, zh, camvtk.yellow)
oneCutterWaterline(myscreen, cutter2, zh, camvtk.cyan)
oneCutterWaterline(myscreen, cutter3, zh, camvtk.mag)
oneCutterWaterline(myscreen, cutter4, zh, camvtk.mag)
#yfiber(yvals,t,zh,myscreen)
#xfiber(xvals,t,zh,myscreen)
print("done.")
myscreen.render()
#w2if = vtk.vtkWindowToImageFilter()
#w2if.SetInput(myscreen.renWin)
#lwr = vtk.vtkPNGWriter()
#lwr.SetInput( w2if.GetOutput() )
myscreen.iren.Start()
#raw_input("Press Enter to terminate")
|
aewallin/opencamlib
|
examples/python/fiber/fiber_13_onetri_cutter_comparison.py
|
Python
|
lgpl-2.1
| 5,644
|
[
"VTK"
] |
2b8d556303ae6564e1c79cfe460be88a5c22d5db263f66e9a408706b8cf73993
|
####
# This sample is published as part of the blog article at www.toptal.com/blog
# Visit www.toptal.com/blog and subscribe to our newsletter to read great posts
####
import asyncio
import logging
import os
from time import time
import aiohttp
from download import setup_download_dir, get_links
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)
async def async_download_link(session, directory, link):
"""
Async version of the download_link method we've been using in the other examples.
:param session: aiohttp ClientSession
:param directory: directory to save downloads
:param link: the url of the link to download
:return:
"""
download_path = directory / os.path.basename(link)
async with session.get(link) as response:
with download_path.open('wb') as f:
while True:
# await pauses execution until the 1024 (or less) bytes are read from the stream
chunk = await response.content.read(1024)
if not chunk:
# We are done reading the file, break out of the while loop
break
f.write(chunk)
logger.info('Downloaded %s', link)
# Main is now a coroutine
async def main():
client_id = os.getenv('IMGUR_CLIENT_ID')
if not client_id:
raise Exception("Couldn't find IMGUR_CLIENT_ID environment variable!")
download_dir = setup_download_dir()
# We use a session to take advantage of tcp keep-alive
# Set a 3 second read and connect timeout. Default is 5 minutes
async with aiohttp.ClientSession(conn_timeout=3, read_timeout=3) as session:
tasks = [(async_download_link(session, download_dir, l)) for l in get_links(client_id)]
# gather aggregates all the tasks and schedules them in the event loop
await asyncio.gather(*tasks, return_exceptions=True)
if __name__ == '__main__':
ts = time()
# Create the asyncio event loop
loop = asyncio.get_event_loop()
try:
loop.run_until_complete(main())
finally:
# Shutdown the loop even if there is an exception
loop.close()
logger.info('Took %s seconds to complete', time() - ts)
|
volker48/python-concurrency
|
async_imgur.py
|
Python
|
mit
| 2,279
|
[
"VisIt"
] |
82a0c53a93a8d953555cd2ddabe6699e817c3702c1fd0f890e407208200e0617
|
"""
Obtains filepaths from examples for testing.
"""
import os
test_dir = os.path.dirname(os.path.abspath(__file__))
examples_dir = os.path.join(test_dir, "..", "examples")
def get_example_filename(*filename):
program = filename[0].lower()
# Make sure we have written these programs
if program not in ["amber", "lammps", "gromacs"]:
raise KeyError("Examples for program %s not found!" % program)
# Make sure file exists
fname = os.path.join(examples_dir, *filename)
if not os.path.exists(fname):
raise OSError("File %s not found!" % fname)
return fname
def get_scratch_directory(filename):
scr_dir = os.path.join(test_dir, "scratch")
if not os.path.exists(scr_dir):
os.mkdir(scr_dir)
return os.path.join(scr_dir, filename)
|
dgasmith/EEX_scratch
|
tests/eex_find_files.py
|
Python
|
bsd-3-clause
| 798
|
[
"Amber",
"Gromacs",
"LAMMPS"
] |
e8f8f9d13b6c353c522507275e01a1811d97603761f7f88b1d1482b823d93365
|
# ----------------------------------------------------------------------------
# Copyright (c) 2013--, scikit-bio development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function
class SequenceCollectionError(Exception):
"""General error for sequence collection validation failures."""
pass
class AlignmentError(SequenceCollectionError):
"""General error for alignment validation failures."""
pass
|
demis001/scikit-bio
|
skbio/alignment/_exception.py
|
Python
|
bsd-3-clause
| 658
|
[
"scikit-bio"
] |
404d7eb2314bc2f59345307a84ac7eb5ebd420095d2dddc8beb950ce9ec1d19d
|
#
# Copyright (c) 2013, Scott J Maddox
#
# This file is part of SimplePL.
#
# SimplePL is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# SimplePL is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public
# License along with semicontrol. If not, see
# <http://www.gnu.org/licenses/>.
#
#######################################################################
# std lib imports
import os.path
# third party imports
from PySide import QtGui, QtCore
import pyqtgraph as pg
import numpy as np
# local imports
from spectra_plot_item import SpectraPlotItem
from measured_spectrum import openMeasuredSpectrum
from spectra_control_widget import SpectraControlWidget
#TODO:
############################################################################
# File # Simulate # Fit #
############################################################################
# Min Value Max #
# | 0. || 1. || 1. | #
############################
# Constant X | #
# ------o------------ Lk | #
# ______________________ | #
# Gaussian X | #
# A ----------o------ Lk | #
# C ---------o------- Lk | #
# W ----o------------ Lk ^ #
# ______________________ U #
# Gaussian X U #
# A ----------o------ Lk V #
# C ---------o------- Lk | #
# W ----o------------ Lk | #
# ______________________ | #
# | Add Constant | | #
# ______________________ | #
# | Add Gaussian | | #
# ______________________ | #
# | Add Lorentzian | | #
# ______________________ | #
############################
# Simulate options:
## Add Constant
## Add Gaussian
## Add Lorentzian
# Fit options:
## (whatever the various fitting algorithms are)
# The left panel should be dockable.
# Even better would be to have icons instead of the A, C, and W labels
class MainWindow(QtGui.QMainWindow):
def __init__(self):
super(MainWindow, self).__init__()
self.initUI()
self.spectrum = None
def initUI(self):
self.setWindowTitle('SimpleFit')
openAction = QtGui.QAction('&Open a spectrum', self)
openAction.setStatusTip('Open a spectrum')
openAction.setToolTip('Open a spectrum')
openAction.setShortcut('Ctrl+O')
openAction.triggered.connect(self.openFile)
saveAction = QtGui.QAction('&Save parameters', self)
saveAction.setStatusTip('Save parameters')
saveAction.setToolTip('Save parameters')
saveAction.setShortcut('Ctrl+S')
saveAction.triggered.connect(self.saveFile)
aboutAction = QtGui.QAction('&About', self)
aboutAction.triggered.connect(self.about)
autoFitAction = QtGui.QAction('Auto&fit', self)
autoFitAction.setStatusTip('Autofit the spectrum')
autoFitAction.setToolTip('Autofit the spectrum')
autoFitAction.setShortcut('Ctrl+F')
autoFitAction.triggered.connect(self.autoFit)
copyNumericIntegralAction = QtGui.QAction('Copy &numeric integral', self)
copyNumericIntegralAction.setStatusTip('Integrate numerically and copy the result')
copyNumericIntegralAction.setToolTip('Integrate numerically and copy the result')
copyNumericIntegralAction.setShortcut('Ctrl+N')
copyNumericIntegralAction.triggered.connect(self.copyNumericIntegral)
copyPeakIntegralAction = QtGui.QAction('Copy fit &integral', self)
copyPeakIntegralAction.setStatusTip('Integrate the fit peaks and copy the result')
copyPeakIntegralAction.setToolTip('Integrate the fit peaks and copy the result')
copyPeakIntegralAction.setShortcut('Ctrl+I')
copyPeakIntegralAction.triggered.connect(self.copyPeakIntegral)
copyFitChi2Action = QtGui.QAction('Copy fit chi^2', self)
copyFitChi2Action.setStatusTip('Copy the fitting chi^2 to the clipboard')
copyFitChi2Action.setToolTip('Copy the fitting chi^2 to the clipboard')
copyFitChi2Action.setShortcut('Ctrl+X')
copyFitChi2Action.triggered.connect(self.copyFitChi2)
copyFitValuesAndStddevsAction = QtGui.QAction('&Copy fit values and stddevs', self)
copyFitValuesAndStddevsAction.setStatusTip('Copy the fit parameter values and stddevs to the clipboard')
copyFitValuesAndStddevsAction.setToolTip('Copy the fit parameter values and stddevs to the clipboard')
copyFitValuesAndStddevsAction.setShortcut('Ctrl+C')
copyFitValuesAndStddevsAction.triggered.connect(self.copyFitValuesAndStddevs)
copyAllResultsAction = QtGui.QAction('Copy &all of the above', self)
copyAllResultsAction.setStatusTip('Copy all the above values to the clipboard')
copyAllResultsAction.setToolTip('Copy all the above values to the clipboard')
copyAllResultsAction.setShortcut('Ctrl+A')
copyAllResultsAction.triggered.connect(self.copyAllResults)
copyFitValuesAction = QtGui.QAction('&Copy fit values', self)
copyFitValuesAction.setStatusTip('Copy the fit parameter values to the clipboard')
copyFitValuesAction.setToolTip('Copy the fit parameter values to the clipboard')
copyFitValuesAction.triggered.connect(self.copyFitValues)
copyFitStddevsAction = QtGui.QAction('&Copy fit stddevs', self)
copyFitStddevsAction.setStatusTip('Copy the fit parameter stddevs to the clipboard')
copyFitStddevsAction.setToolTip('Copy the fit parameter stddevs to the clipboard')
copyFitStddevsAction.triggered.connect(self.copyFitStddevs)
pasteFitValuesAction = QtGui.QAction('&Paste fit values', self)
pasteFitValuesAction.setStatusTip('Paste the fit parameter values from the clipboard')
pasteFitValuesAction.setToolTip('Paste the fit parameter values from the clipboard')
pasteFitValuesAction.setShortcut('Ctrl+V')
pasteFitValuesAction.triggered.connect(self.pasteFitValues)
menubar = self.menuBar()
fileMenu = menubar.addMenu('&File')
fileMenu.addAction(openAction)
fileMenu.addAction(saveAction)
toolsMenu = menubar.addMenu('Tools')
toolsMenu.addAction(autoFitAction)
toolsMenu.addAction(copyNumericIntegralAction)
toolsMenu.addAction(copyPeakIntegralAction)
toolsMenu.addAction(copyFitChi2Action)
toolsMenu.addAction(copyFitValuesAndStddevsAction)
toolsMenu.addAction(copyAllResultsAction)
toolsMenu.addAction(copyFitValuesAction)
toolsMenu.addAction(copyFitStddevsAction)
toolsMenu.addAction(pasteFitValuesAction)
aboutMenu = menubar.addMenu('&About')
aboutMenu.addAction(aboutAction)
view = pg.GraphicsLayoutWidget()
self.setCentralWidget(view)
self.plot = SpectraPlotItem(xaxis='energy')
view.addItem(self.plot, 0, 0)
self.setCentralWidget(view)
self.control = SpectraControlWidget()
self.plot.addSpectrum(self.control.summedSpectrum)
dw = QtGui.QDockWidget()
dw.setWidget(self.control)
self.addDockWidget(QtCore.Qt.LeftDockWidgetArea, dw)
self.control.sigSpectrumAdded.connect(self.plot.addSpectrum)
self.control.sigSpectrumRemoved.connect(self.plot.removeSpectrum)
self.setWindowTitle('SimpleFit')
self.resize(1280,800)
#self.moveCenter()
self.moveTopLeft()
def openFile(self):
filepath, filter = QtGui.QFileDialog.getOpenFileName(parent=self,
caption='Open a PL spectrum file')
if not filepath:
return
dirpath, filename = os.path.split(filepath)
self.setWindowTitle(u'SimpleFit - {}'.format(filename))
spectrum = openMeasuredSpectrum(filepath)
# Check if the system response removed is included.
# If not, ask user to select a system response file.
print spectrum.intensity
if not len(spectrum.intensity):
sysres_filepath, filter = QtGui.QFileDialog.getOpenFileName(
parent=self, caption='Open a system response file')
if not sysres_filepath:
return
spectrum = openMeasuredSpectrum(filepath, sysres_filepath)
# remove the previous measured spectrum
if self.spectrum:
self.plot.removeSpectrum(self.spectrum)
# plot the measured spectrum
self.plot.addSpectrum(spectrum)
self.spectrum = spectrum
# update the simulated spectrum
self.control.setEnergy(spectrum.energy)
self.control.setIntensity(spectrum.intensity)
def saveFile(self):
filepath, filter = QtGui.QFileDialog.getSaveFileName(parent=self,
caption='Save fitting parameters to a file')
self.control.saveParameters(filepath)
def about(self):
title = 'About SimpleFit'
text = """
Copyright (c) 2013, Scott J Maddox
This file is part of SimplePL.
SimplePL is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as
published by the Free Software Foundation, either version 3 of the
License, or (at your option) any later version.
SimplePL is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public
License along with semicontrol. If not, see
<http://www.gnu.org/licenses/>.
"""
QtGui.QMessageBox.about(self, title, text)
def autoFit(self):
if self.spectrum is None:
return # do nothing if no measured spectrum
self.control.autoFit(self.spectrum)
def getNumericIntegral(self):
if self.spectrum is None:
return # do nothing if no measured spectrum
x = self.spectrum.energy[::-1]
y = self.spectrum.intensity[::-1]
from scipy.integrate import cumtrapz
return cumtrapz(y, x).max()
def copyNumericIntegral(self):
integral = self.getNumericIntegral()
print 'numeric integral = %E'%integral
QtGui.QApplication.clipboard().setText('%E'%integral)
def copyPeakIntegral(self):
integral = self.control.getPeakIntegral()
print 'peak integral = %E'%integral
QtGui.QApplication.clipboard().setText('%E'%integral)
def copyFitChi2(self):
chi2 = self.control.getFitChi2()
print 'fitting chi2 = %E'%chi2
QtGui.QApplication.clipboard().setText('%E'%chi2)
def copyFitValuesAndStddevs(self):
values = self.control.getFitValues()
stddevs = self.control.getFitStddevs()
s = []
for value, stddev in zip(values, stddevs):
s.append('%E'%value)
s.append('%E'%stddev)
result = '\t'.join(s)
print 'copying fit values and stddevs:', result
QtGui.QApplication.clipboard().setText(result)
def copyAllResults(self):
vals = []
vals.append(self.getNumericIntegral())
vals.append(self.control.getPeakIntegral())
vals.append(self.control.getFitChi2())
values = self.control.getFitValues()
stddevs = self.control.getFitStddevs()
for value, stddev in zip(values, stddevs):
vals.append(value)
vals.append(stddev)
s = []
for val in vals:
s.append('%E'%val)
result = '\t'.join(s)
print 'copying all results:', result
QtGui.QApplication.clipboard().setText(result)
def copyFitValues(self):
values = self.control.getFitValues()
s = []
for value in values:
s.append('%E'%value)
result = '\t'.join(s)
print 'copying fit values:', result
QtGui.QApplication.clipboard().setText(result)
def copyFitStddevs(self):
stddevs = self.control.getFitStddevs()
s = []
for stddev in stddevs:
s.append('%E'%stddev)
result = '\t'.join(s)
print 'copying fit stddevs:', result
QtGui.QApplication.clipboard().setText(result)
def pasteFitValues(self):
'''
Pastes the fitting parameter values from the clipboard.
If there are twice as many values as parameters, it is assumed that
the values are paired with stddev's (which will be ignored).
'''
s = QtGui.QApplication.clipboard().text()
vals = []
for sval in s.split('\t'):
if not sval:
vals.append(np.nan)
else:
vals.append(float(sval))
print 'setting parameters:', vals
self.control.setFitValues(vals)
def moveCenter(self):
qr = self.frameGeometry()
cp = QtGui.QDesktopWidget().availableGeometry().center()
qr.moveCenter(cp)
self.move(qr.topLeft())
def moveTopLeft(self):
qr = self.frameGeometry()
p = QtGui.QDesktopWidget().availableGeometry().topLeft()
self.move(p)
def closeEvent(self, event):
reply = QtGui.QMessageBox.question(self, 'Quit?',
'Are you sure you want to quit?',
QtGui.QMessageBox.Yes | QtGui.QMessageBox.No,
QtGui.QMessageBox.No)
if reply == QtGui.QMessageBox.Yes:
event.accept()
return
else:
event.ignore()
return
#TODO do something if data is unsaved?
|
scott-maddox/simplepl
|
src/simplefit/main_window.py
|
Python
|
agpl-3.0
| 14,092
|
[
"Gaussian"
] |
9c4e7b5cd5982550d50dc65647dafc9958da049b675c7324dd116bf55e1fc60a
|
#!/usr/bin/env python
# encoding: utf-8
# === IMPORTANT ====
# NOTE: In order to support non-ASCII file names,
# your system's locale MUST be set to 'utf-8'
# CAVEAT: DOESN'T work with proxy, the underlying reason being
# the 'requests' package used for http communication doesn't seem
# to work properly with proxies, reason unclear.
# NOTE: It seems Baidu doesn't handle MD5 quite right after combining files,
# so it may return erroneous MD5s. Perform a rapidupload again may fix the problem.
# That's why I changed default behavior to no-verification.
# NOTE: syncup / upload, syncdown / downdir are partially duplicates
# the difference: syncup/down compare and perform actions
# while down/up just proceed to download / upload (but still compare during actions)
# so roughly the same, except that sync can delete extra files
#
# TODO: Dry run?
# TODO: Use batch functions for better performance
'''
bypy -- Python client for Baidu Yun
---
https://github.com/houtianze/bypy
---
bypy is a Baidu Yun client written in Python (2.7).
(NOTE: You need to install the 'requests' library by running 'pip install requests')
It offers some file operations like: list, download, upload, syncup, syncdown, etc.
The main purpose is to utilize Baidu Yun in Linux environment (e.g. Raspberry Pi)
It uses a server for OAuth authorization, to conceal the Application's Secret Key.
Alternatively, you can create your own App at Baidu and replace the 'ApiKey' and 'SecretKey' with your copies,
and then, change 'ServerAuth' to 'False'
---
@author: Hou Tianze
@license: MIT
@contact: GitHub: houtianze, Twitter: @ibic, G+: +TianzeHou
'''
# it takes days just to fix you, unicode ...
# some references
# https://stackoverflow.com/questions/4374455/how-to-set-sys-stdout-encoding-in-python-3
# https://stackoverflow.com/questions/492483/setting-the-correct-encoding-when-piping-stdout-in-python
# http://drj11.wordpress.com/2007/05/14/python-how-is-sysstdoutencoding-chosen/
# https://stackoverflow.com/questions/11741574/how-to-set-the-default-encoding-to-utf-8-in-python
# https://stackoverflow.com/questions/2276200/changing-default-encoding-of-python
from __future__ import unicode_literals
EIncorrectPythonVersion = 1
import sys
vi = sys.version_info
if not hasattr(sys.version_info, 'major') or vi.major != 2 or vi.minor < 7:
print("Error: Incorrect Python version. " + \
"You need 2.7 or above (but not 3)")
sys.exit(EIncorrectPythonVersion)
#reload(sys)
#sys.setdefaultencoding(SystemEncoding)
import os
import locale
SystemLanguageCode, SystemEncoding = locale.getdefaultlocale()
if SystemEncoding and not sys.platform.startswith('win32'):
sysenc = SystemEncoding.upper()
if sysenc != 'UTF-8' and sysenc != 'UTF8':
err = "You MUST set system locale to 'UTF-8' to support unicode file names.\n" + \
"Current locale is '{}'".format(SystemEncoding)
ex = Exception(err)
print(err)
raise ex
if not SystemEncoding:
# ASSUME UTF-8 encoding, if for whatever reason,
# we can't get the default system encoding
print("*WARNING*: Cannot detect the system encoding, assume it's 'UTF-8'")
SystemEncoding = 'utf-8'
import codecs
# no idea who is the asshole that screws the sys.stdout.encoding
# the locale is 'UTF-8', sys.stdin.encoding is 'UTF-8',
# BUT, sys.stdout.encoding is None ...
if not (sys.stdout.encoding and sys.stdout.encoding.lower() == 'utf-8'):
encoding_to_use = sys.stdout.encoding
try:
codecs.lookup(encoding_to_use)
u'汉字'.encode(encoding_to_use)
except: # (LookupError, TypeError, UnicodeEncodeError):
encoding_to_use = 'utf-8'
sys.exc_clear()
sys.stdout = codecs.getwriter(encoding_to_use)(sys.stdout)
sys.stderr = codecs.getwriter(encoding_to_use)(sys.stderr)
import signal
import time
import shutil
import posixpath
#import types
import traceback
import inspect
import logging
import httplib
import urllib
import json
import hashlib
import binascii
import re
import cPickle as pickle
import pprint
import socket
import math
#from collections import OrderedDict
from os.path import expanduser
from argparse import ArgumentParser
from argparse import RawDescriptionHelpFormatter
# Defines that should never be changed
OneK = 1024
OneM = OneK * OneK
OneG = OneM * OneK
OneT = OneG * OneK
OneP = OneT * OneK
OneE = OneP * OneK
OneZ = OneE * OneK
OneY = OneZ * OneK
SIPrefixNames = [ '', 'k', 'M', 'G', 'T', 'P', 'E', 'Z', 'Y' ]
SIPrefixTimes = {
'K' : OneK,
'M' : OneM,
'G' : OneG,
'T' : OneT,
'E' : OneE,
'Z' : OneZ,
'Y' : OneY }
# special variables
__all__ = []
__version__ = '1.0.20'
# ByPy default values
DefaultSliceInMB = 20
DefaultSliceSize = 20 * OneM
DefaultDlChunkSize = 20 * OneM
RetryDelayInSec = 10
# Baidu PCS constants
MinRapidUploadFileSize = 256 * OneK
MaxSliceSize = 2 * OneG
MaxSlicePieces = 1024
# return (error) codes
ENoError = 0 # plain old OK, fine, no error.
#EIncorrectPythonVersion = 1
EApiNotConfigured = 10 # ApiKey, SecretKey and AppPcsPath not properly configured
EArgument = 10 # invalid program command argument
EAbort = 20 # aborted
EException = 30 # unhandled exception occured
EParameter = 40 # invalid parameter passed to ByPy
EInvalidJson = 50
EHashMismatch = 60 # MD5 hashes of the local file and remote file don't match each other
EFileWrite = 70
EFileTooBig = 80 # file too big to upload
EFailToCreateLocalDir = 90
EFailToCreateLocalFile = 100
EFailToDeleteDir = 110
EFailToDeleteFile = 120
EFileNotFound = 130
EMaxRetry = 140
ERequestFailed = 150 # request failed
ECacheNotLoaded = 160
EMigrationFailed = 170
EDownloadCerts = 180
EFatal = -1 # No way to continue
# internal errors
IEMD5NotFound = 31079 # File md5 not found, you should use upload API to upload the whole file.
IEBDUSSExpired = -6
# PCS configuration constants
# ==== NOTE ====
# I use server auth, because it's the only possible method to protect the SecretKey.
# If you don't like that and want to perform local authorization using 'Device' method, you need to:
# - Change to: ServerAuth = False
# - Paste your own ApiKey and SecretKey.
# - Change the AppPcsPath to your own App's directory at Baidu PCS
# Then you are good to go
ServerAuth = True # change it to 'False' if you use your own appid
GaeUrl = 'https://bypyoauth.appspot.com'
OpenShiftUrl = 'https://bypy-tianze.rhcloud.com'
HerokuUrl = 'https://bypyoauth.herokuapp.com'
GaeRedirectUrl = GaeUrl + '/auth'
GaeRefreshUrl = GaeUrl + '/refresh'
OpenShiftRedirectUrl = OpenShiftUrl + '/auth'
OpenShiftRefreshUrl = OpenShiftUrl + '/refresh'
HerokuRedirectUrl = HerokuUrl + '/auth'
HerokuRefreshUrl = HerokuUrl + '/refresh'
AuthServerList = [
# url, rety?, message
(GaeRedirectUrl, False, "Authorizing with the GAE server ..."),
(OpenShiftRedirectUrl, True, "I think you are WALLed, so let's authorize with the OpenShift server ..."),
(HerokuRedirectUrl, True, "OpenShift also failed. Last resort: authorizing with the Heroku server ..."),
]
RefreshServerList = [
# url, rety?, message
(GaeRefreshUrl, False, "Refreshing with the GAE server ..."),
(OpenShiftRefreshUrl, True, "I think you are WALLed, so let's refresh with the OpenShift server ..."),
(HerokuRefreshUrl, True, "OpenShift also failed. Last resort: refreshing with the Heroku server ..."),
]
ApiKey = 'q8WE4EpCsau1oS0MplgMKNBn' # replace with your own ApiKey if you use your own appid
SecretKey = '' # replace with your own SecretKey if you use your own appid
if not SecretKey:
ServerAuth = True
# NOTE: no trailing '/'
AppPcsPath = '/apps/bypy' # change this to the App's direcotry you specified when creating the app
AppPcsPathLen = len(AppPcsPath)
# Program setting constants
HomeDir = expanduser('~')
# os.path.join() may not handle unicode well
ConfigDir = HomeDir + os.sep + '.bypy'
TokenFilePath = ConfigDir + os.sep + 'bypy.json'
HashCachePath = ConfigDir + os.sep + 'bypy.pickle'
BDUSSPath = ConfigDir + os.sep + 'bypy.bduss'
ByPyCertsFile = 'bypy.cacerts.pem'
ByPyCertsPath = ConfigDir + os.sep + ByPyCertsFile
#UserAgent = 'Mozilla/5.0'
#UserAgent = "Mozilla/5.0 (compatible; MSIE 10.0; Windows NT 6.1; WOW64; Trident/6.0)"
# According to seanlis@github, this User-Agent string affects the download.
UserAgent = None
CleanOptionShort= '-c'
CleanOptionLong= '--clean'
DisableSslCheckOption = '--disable-ssl-check'
CaCertsOption = '--cacerts'
# Baidu PCS URLs etc.
OpenApiUrl = "https://openapi.baidu.com"
OpenApiVersion = "2.0"
OAuthUrl = OpenApiUrl + "/oauth/" + OpenApiVersion
ServerAuthUrl = OAuthUrl + "/authorize"
DeviceAuthUrl = OAuthUrl + "/device/code"
TokenUrl = OAuthUrl + "/token"
PcsUrl = 'https://pcs.baidu.com/rest/2.0/pcs/'
CPcsUrl = 'https://c.pcs.baidu.com/rest/2.0/pcs/'
DPcsUrl = 'https://d.pcs.baidu.com/rest/2.0/pcs/'
PanAPIUrl = 'http://pan.baidu.com/api/'
# mutable, actual ones used, capital ones are supposed to be immutable
# this is introduced to support mirrors
pcsurl = PcsUrl
cpcsurl = CPcsUrl
dpcsurl = DPcsUrl
try:
# non-standard python library, needs 'pip install requests'
import requests
except:
print("Fail to import the 'requests' library\n" + \
"You need to install the 'requests' python library\n" + \
"You can install it by running 'pip install requests'")
raise
requests_version = requests.__version__.split('.')
if int(requests_version[0]) < 1:
print("You Python Requests Library version is to lower than 1.\n" + \
"You can run 'pip install requests' to upgrade it.")
raise
# non-standard python library, needs 'pip install requesocks'
#import requesocks as requests # if you need socks proxy
# when was your last time flushing a toilet?
__last_flush = time.time()
#__last_flush = 0
PrintFlushPeriodInSec = 5.0
# save cache if more than 10 minutes passed
last_cache_save = time.time()
CacheSavePeriodInSec = 10 * 60.0
# https://stackoverflow.com/questions/287871/print-in-terminal-with-colors-using-python
# https://en.wikipedia.org/wiki/ANSI_escape_code#Colors
# 0 - black, 1 - red, 2 - green, 3 - yellow
# 4 - blue, 5 - magenta, 6 - cyan 7 - white
class TermColor:
NumOfColors = 8
Black, Red, Green, Yellow, Blue, Magenta, Cyan, White = range(NumOfColors)
Nil = -1
def colorstr(msg, fg, bg):
CSI = '\x1b['
fgs = ''
bgs = ''
if fg >=0 and fg <= 7:
fgs = str(fg + 30)
if bg >= 0 and bg <=7:
bgs = str(bg + 40)
cs = ';'.join([fgs, bgs]).strip(';')
if cs:
return CSI + cs + 'm' + msg + CSI + '0m'
else:
return msg
def prc(msg):
print(msg)
# we need to flush the output periodically to see the latest status
global __last_flush
now = time.time()
if now - __last_flush >= PrintFlushPeriodInSec:
sys.stdout.flush()
__last_flush = now
pr = prc
def prcolorc(msg, fg, bg):
if sys.stdout.isatty() and not sys.platform.startswith('win32'):
pr(colorstr(msg, fg, bg))
else:
pr(msg)
prcolor = prcolorc
def plog(tag, msg, showtime = True, showdate = False,
prefix = '', suffix = '', fg = TermColor.Nil, bg = TermColor.Nil):
if showtime or showdate:
now = time.localtime()
if showtime:
tag += time.strftime("[%H:%M:%S] ", now)
if showdate:
tag += time.strftime("[%Y-%m-%d] ", now)
if prefix:
prcolor("{}{}".format(tag, prefix), fg, bg)
prcolor("{}{}".format(tag, msg), fg, bg)
if suffix:
prcolor("{}{}".format(tag, suffix), fg, bg)
def perr(msg, showtime = True, showdate = False, prefix = '', suffix = ''):
return plog('<E> ', msg, showtime, showdate, prefix, suffix, TermColor.Red)
def pwarn(msg, showtime = True, showdate = False, prefix = '', suffix = ''):
return plog('<W> ', msg, showtime, showdate, prefix, suffix, TermColor.Yellow)
def pinfo(msg, showtime = True, showdate = False, prefix = '', suffix = ''):
return plog('<I> ', msg, showtime, showdate, prefix, suffix, TermColor.Green)
def pdbg(msg, showtime = True, showdate = False, prefix = '', suffix = ''):
return plog('<D> ', msg, showtime, showdate, prefix, suffix, TermColor.Cyan)
def askc(msg, enter = True):
pr(msg)
if enter:
pr('Press [Enter] when you are done')
return raw_input()
ask = askc
# print progress
# https://stackoverflow.com/questions/3173320/text-progress-bar-in-the-console
def pprgrc(finish, total, start_time = None, existing = 0,
prefix = '', suffix = '', seg = 20):
# we don't want this goes to the log, so we use stderr
if total > 0:
segth = seg * finish // total
percent = 100 * finish // total
else:
segth = seg
percent = 100
eta = ''
now = time.time()
if start_time is not None and percent > 5 and finish > 0:
finishf = float(finish) - float(existing)
totalf = float(total)
remainf = totalf - float(finish)
elapsed = now - start_time
speed = human_speed(finishf / elapsed)
eta = 'ETA: ' + human_time_short(elapsed * remainf / finishf) + \
' (' + speed + ', ' + \
human_time_short(elapsed) + ' gone)'
msg = '\r' + prefix + '[' + segth * '=' + (seg - segth) * '_' + ']' + \
" {}% ({}/{})".format(percent, human_size(finish), human_size(total)) + \
' ' + eta + suffix
sys.stderr.write(msg + ' ') # space is used as a clearer
sys.stderr.flush()
pprgr = pprgrc
def remove_backslash(s):
return s.replace(r'\/', r'/')
def rb(s):
return s.replace(r'\/', r'/')
# marshaling
def str2bool(s):
if isinstance(s, basestring):
if s:
sc = s.lower()[0]
if sc == 't' or sc == 'y' or (sc >= '1' and sc <= '9'):
return True
else:
return False
else:
return False
else:
# don't change
return s
def str2int(s):
if isinstance(s, basestring):
return int(s)
else:
# don't change
return s
def str2float(s):
if isinstance(s, basestring):
return float(s)
else:
# don't change
return s
def human_time(seconds):
''' DocTests:
>>> human_time(0)
u''
>>> human_time(122.1)
u'2m2s'
>>> human_time(133)
u'2m13s'
>>> human_time(12345678)
u'20W2D21h21m18s'
'''
isec = int(seconds)
s = isec % 60
m = isec / 60 % 60
h = isec / 60 / 60 % 24
d = isec / 60 / 60 / 24 % 7
w = isec / 60 / 60 / 24 / 7
result = ''
for t in [ ('W', w), ('D', d), ('h', h), ('m', m), ('s', s) ]:
if t[1]:
result += str(t[1]) + t[0]
return result
def limit_unit(timestr, num = 2):
''' DocTests:
>>> limit_unit('1m2s', 1)
u'1m'
>>> limit_unit('1m2s')
u'1m2s'
>>> limit_unit('1m2s', 4)
u'1m2s'
>>> limit_unit('1d2h3m2s')
u'1d2h'
>>> limit_unit('1d2h3m2s', 1)
u'1d'
'''
l = len(timestr)
i = 0
p = 0
while i < num and p <= l:
at = 0
while p < l:
c = timestr[p]
if at == 0:
if c.isdigit():
p += 1
else:
at += 1
elif at == 1:
if not c.isdigit():
p += 1
else:
at += 1
else:
break
i += 1
return timestr[:p]
def human_time_short(seconds):
return limit_unit(human_time(seconds))
def interpret_size(si):
'''
>>> interpret_size(10)
10
>>> interpret_size('10')
10
>>> interpret_size('10b')
10
>>> interpret_size('10k')
10240
>>> interpret_size('10K')
10240
>>> interpret_size('10kb')
10240
>>> interpret_size('10kB')
10240
>>> interpret_size('a10')
Traceback (most recent call last):
ValueError
>>> interpret_size('10a')
Traceback (most recent call last):
KeyError: 'A'
'''
m = re.match(r"\s*(\d+)\s*([ac-z]?)(b?)\s*$", str(si), re.I)
if m:
if not m.group(2) and m.group(3):
times = 1
else:
times = SIPrefixTimes[m.group(2).upper()] if m.group(2) else 1
return int(m.group(1)) * times
else:
raise ValueError
def human_num(num, precision = 0, filler = ''):
# https://stackoverflow.com/questions/15263597/python-convert-floating-point-number-to-certain-precision-then-copy-to-string/15263885#15263885
numfmt = '{{:.{}f}}'.format(precision)
exp = math.log(num, OneK) if num > 0 else 0
expint = int(math.floor(exp))
maxsize = len(SIPrefixNames) - 1
if expint > maxsize:
pwarn("Ridiculously large number '{}' pased to 'human_num()'".format(num))
expint = maxsize
unit = SIPrefixNames[expint]
return numfmt.format(num / float(OneK ** expint)) + filler + unit
def human_size(num, precision = 3):
''' DocTests:
>>> human_size(1000, 0)
u'1000B'
>>> human_size(1025)
u'1.001kB'
'''
return human_num(num, precision) + 'B'
def human_speed(speed, precision = 0):
return human_num(speed, precision) + 'B/s'
# no leading, trailing '/'
# remote path rule:
# - all public methods of ByPy shall accept remote path as "partial path"
# (before calling get_pcs_path())
# - all private methods of ByPy shall accept remote path as "full path"
# (after calling get_pcs_path())
def get_pcs_path(path):
if not path or path == '/' or path == '\\':
return AppPcsPath
return (AppPcsPath + '/' + path.strip('/')).rstrip('/')
# guarantee no-exception
def copyfile(src, dst):
result = ENoError
try:
shutil.copyfile(src, dst)
except (shutil.Error, IOError) as ex:
perr("Fail to copy '{}' to '{}'.\nException:\n{}\nStack:{}\n".format(
src, dst, ex, traceback.format_exc()))
result = EFailToCreateLocalFile
return result
def movefile(src, dst):
result = ENoError
try:
shutil.move(src, dst)
except (shutil.Error, OSError) as ex:
perr("Fail to move '{}' to '{}'.\nException:\n{}\nStack:\n{}\n".format(
src, dst, ex, traceback.format_exc()))
result = EFailToCreateLocalFile
return result
def removefile(path, verbose = False):
result = ENoError
try:
if verbose:
pr("Removing local file '{}'".format(path))
if path:
os.remove(path)
except Exception as ex:
perr("Fail to remove local fle '{}'.\nException:\n{}\nStack:{}\n".format(
path, ex, traceback.format_exc()))
result = EFailToDeleteFile
return result
def removedir(path, verbose = False):
result = ENoError
try:
if verbose:
pr("Removing local directory '{}'".format(path))
if path:
shutil.rmtree(path)
except Exception as ex:
perr("Fail to remove local directory '{}'.\nException:\n{}\nStack:{}\n".format(
path, ex, traceback.format_exc()))
result = EFailToDeleteDir
return result
def makedir(path, mode = 0o777, verbose = False):
result = ENoError
if verbose:
pr("Creating local directory '{}'".format(path))
if path and not os.path.exists(path):
try:
os.makedirs(path, mode)
except os.error as ex:
perr("Failed at creating local dir '{}'.\nException:\n{}\nStack:{}\n".format(
path, ex, traceback.format_exc()))
result = EFailToCreateLocalDir
return result
# guarantee no-exception
def getfilesize(path):
size = -1
try:
size = os.path.getsize(path)
except os.error:
perr("Exception occured while getting size of '{}'. Exception:\n{}".format(path, traceback.format_exc()))
return size
# guarantee no-exception
def getfilemtime(path):
mtime = -1
try:
mtime = os.path.getmtime(path)
except os.error:
perr("Exception occured while getting modification time of '{}'. Exception:\n{}".format(path, traceback.format_exc()))
return mtime
# seems os.path.join() doesn't handle Unicode well
def joinpath(first, second, sep = os.sep):
head = ''
if first:
head = first.rstrip(sep) + sep
tail = ''
if second:
tail = second.lstrip(sep)
return head + tail
def donothing():
pass
# https://urllib3.readthedocs.org/en/latest/security.html#insecurerequestwarning
def disable_urllib3_warning():
try:
import requests.packages.urllib3
requests.packages.urllib3.disable_warnings()
except:
pass
# https://stackoverflow.com/questions/10883399/unable-to-encode-decode-pprint-output
class MyPrettyPrinter(pprint.PrettyPrinter):
def format(self, obj, context, maxlevels, level):
if isinstance(obj, unicode):
#return (obj.encode('utf8'), True, False)
return (obj, True, False)
if isinstance(obj, str):
convert = False
#for c in obj:
# if ord(c) >= 128:
# convert = True
# break
try:
codecs.decode(obj)
except:
convert = True
if convert:
return ("0x{}".format(binascii.hexlify(obj)), True, False)
return pprint.PrettyPrinter.format(self, obj, context, maxlevels, level)
# there is room for more space optimization (like using the tree structure),
# but it's not added at the moment. for now, it's just simple pickle.
# SQLite might be better for portability
# NOTE: file names are case-sensitive
class cached(object):
''' simple decorator for hash caching (using pickle) '''
usecache = True
verbose = False
debug = False
cache = {}
cacheloaded = False
dirty = False
# we don't do cache loading / unloading here because it's an decorator,
# and probably multiple instances are created for md5, crc32, etc
# it's a bit complex, and i thus don't have the confidence to do it in ctor/dtor
def __init__(self, f):
self.f = f
def __call__(self, *args):
assert len(args) > 0
result = None
path = args[0]
dir, file = os.path.split(path) # the 'filename' parameter
absdir = os.path.abspath(dir)
if absdir in cached.cache:
entry = cached.cache[absdir]
if file in entry:
info = entry[file]
if self.f.__name__ in info \
and info['size'] == getfilesize(path) \
and info['mtime'] == getfilemtime(path) \
and self.f.__name__ in info \
and cached.usecache:
result = info[self.f.__name__]
if cached.debug:
pdbg("Cache hit for file '{}',\n{}: {}\nsize: {}\nmtime: {}".format(
path, self.f.__name__,
result if isinstance(result, (int, long, float, complex)) else binascii.hexlify(result),
info['size'], info['mtime']))
else:
result = self.f(*args)
self.__store(info, path, result)
else:
result = self.f(*args)
entry[file] = {}
info = entry[file]
self.__store(info, path, result)
else:
result = self.f(*args)
cached.cache[absdir] = {}
entry = cached.cache[absdir]
entry[file] = {}
info = entry[file]
self.__store(info, path, result)
return result
def __store(self, info, path, value):
cached.dirty = True
info['size'] = getfilesize(path)
info['mtime'] = getfilemtime(path)
info[self.f.__name__] = value
if cached.debug:
situation = "Storing cache"
if cached.usecache:
situation = "Cache miss"
pdbg((situation + " for file '{}',\n{}: {}\nsize: {}\nmtime: {}").format(
path, self.f.__name__,
value if isinstance(value, (int, long, float, complex)) else binascii.hexlify(value),
info['size'], info['mtime']))
# periodically save to prevent loss in case of system crash
global last_cache_save
now = time.time()
if now - last_cache_save >= CacheSavePeriodInSec:
cached.savecache()
last_cache_save = now
if cached.debug:
pdbg("Periodically saving Hash Cash")
@staticmethod
def loadcache():
# load cache even we don't use cached hash values,
# because we will save (possibly updated) and hash values
if not cached.cacheloaded: # no double-loading
if cached.verbose:
pr("Loading Hash Cache File '{}'...".format(HashCachePath))
if os.path.exists(HashCachePath):
try:
with open(HashCachePath, 'rb') as f:
cached.cache = pickle.load(f)
cached.cacheloaded = True
if cached.verbose:
pr("Hash Cache File loaded.")
except (
pickle.PickleError,
# the following is for dealing with corrupted cache file
EOFError, TypeError, ValueError):
perr("Fail to load the Hash Cache, no caching. Exception:\n{}".format(traceback.format_exc()))
cached.cache = {}
else:
if cached.verbose:
pr("Hash Cache File not found, no caching")
else:
if cached.verbose:
pr("Not loading Hash Cache since 'cacheloaded' is '{}'".format( cached.cacheloaded))
return cached.cacheloaded
@staticmethod
def savecache(force_saving = False):
saved = False
# even if we were unable to load the cache, we still save it.
if cached.dirty or force_saving:
if cached.verbose:
pr("Saving Hash Cache...")
try:
with open(HashCachePath, 'wb') as f:
pickle.dump(cached.cache, f)
f.close()
if cached.verbose:
pr("Hash Cache saved.")
saved = True
cached.dirty = False
except Exception:
perr("Failed to save Hash Cache. Exception:\n{}".format(traceback.format_exc()))
else:
if cached.verbose:
pr("Not saving Hash Cache since 'dirty' is '{}' and 'force_saving' is '{}'".format(
cached.dirty, force_saving))
return saved
@staticmethod
def cleancache():
if cached.loadcache():
for absdir in cached.cache.keys():
if not os.path.exists(absdir):
if cached.verbose:
pr("Directory: '{}' no longer exists, removing the cache entries".format(absdir))
cached.dirty = True
del cached.cache[absdir]
else:
oldfiles = cached.cache[absdir]
files = {}
needclean = False
for f in oldfiles.keys():
#p = os.path.join(absdir, f)
p = joinpath(absdir, f)
if os.path.exists(p):
files[f] = oldfiles[f]
else:
if cached.verbose:
needclean = True
pr("File '{}' no longer exists, removing the cache entry".format(p))
if needclean:
cached.dirty = True
cached.cache[absdir] = files
cached.savecache()
@cached
def md5(filename, slice = OneM):
m = hashlib.md5()
with open(filename, "rb") as f:
while True:
buf = f.read(slice)
if buf:
m.update(buf)
else:
break
return m.digest()
# slice md5 for baidu rapidupload
@cached
def slice_md5(filename):
m = hashlib.md5()
with open(filename, "rb") as f:
buf = f.read(256 * OneK)
m.update(buf)
return m.digest()
@cached
def crc32(filename, slice = OneM):
with open(filename, "rb") as f:
buf = f.read(slice)
crc = binascii.crc32(buf)
while True:
buf = f.read(slice)
if buf:
crc = binascii.crc32(buf, crc)
else:
break
return crc & 0xffffffff
def enable_http_logging():
httplib.HTTPConnection.debuglevel = 1
logging.basicConfig() # you need to initialize logging, otherwise you will not see anything from requests
logging.getLogger().setLevel(logging.DEBUG)
requests_log = logging.getLogger("requests.packages.urllib3")
requests_log.setLevel(logging.DEBUG)
requests_log.propagate = True
def ls_type(isdir):
return 'D' if isdir else 'F'
def ls_time(itime):
return time.strftime('%Y-%m-%d, %H:%M:%S', time.localtime(itime))
def print_pcs_list(json, foundmsg = "Found:", notfoundmsg = "Nothing found."):
list = json['list']
if list:
pr(foundmsg)
for f in list:
pr("{} {} {} {} {} {}".format(
ls_type(f['isdir']),
f['path'],
f['size'],
ls_time(f['ctime']),
ls_time(f['mtime']),
f['md5']))
else:
pr(notfoundmsg)
# tree represented using dictionary, (Obsolete: OrderedDict no longer required)
# NOTE: No own-name is kept, so the caller needs to keep track of that
# NOTE: Case-sensitive, as I don't want to waste time wrapping up a case-insensitive one
# single-linked-list, no backwards travelling capability
class PathDictTree(dict):
def __init__(self, type = 'D', **kwargs):
self.type = type
self.extra = {}
for k, v in kwargs.items():
self.extra[k] = v
super(PathDictTree, self).__init__()
def __str__(self):
return self.__str('')
def __str(self, prefix):
result = ''
for k, v in self.iteritems():
result += "{} - {}/{} - size: {} - md5: {} \n".format(
v.type, prefix, k,
v.extra['size'] if 'size' in v.extra else '',
binascii.hexlify(v.extra['md5']) if 'md5' in v.extra else '')
for k, v in self.iteritems():
if v.type == 'D':
result += v.__str(prefix + '/' + k)
return result
def add(self, name, child):
self[name] = child
return child
# returns the child tree at the given path
# assume that path is only separated by '/', instead of '\\'
def get(self, path):
place = self
if path:
# Linux can have file / folder names with '\\'?
if sys.platform.startswith('win32'):
assert '\\' not in path
route = filter(None, path.split('/'))
for part in route:
if part in place:
sub = place[part]
assert place.type == 'D' # sanity check
place = sub
else:
return None
return place
# return a string list of all 'path's in the tree
def allpath(self):
result = []
for k, v in self.items():
result.append(k)
if v.type == 'D':
for p in self.get(k).allpath():
result.append(k + '/' + p)
return result
class ByPy(object):
'''The main class of the bypy program'''
# public static properties
HelpMarker = "Usage:"
ListFormatDict = {
'$t' : (lambda json: ls_type(json['isdir'])),
'$f' : (lambda json: json['path'].split('/')[-1]),
'$c' : (lambda json: ls_time(json['ctime'])),
'$m' : (lambda json: ls_time(json['mtime'])),
'$d' : (lambda json: str(json['md5'] if 'md5' in json else '')),
'$s' : (lambda json: str(json['size'])),
'$i' : (lambda json: str(json['fs_id'])),
'$b' : (lambda json: str(json['block_list'] if 'block_list' in json else '')),
'$u' : (lambda json: 'HasSubDir' if 'ifhassubdir' in json and json['ifhassubdir'] else 'NoSubDir'),
'$$' : (lambda json: '$')
}
# Old setting locations, should be moved to ~/.bypy to be clean
OldTokenFilePath = HomeDir + os.sep + '.bypy.json'
OldHashCachePath = HomeDir + os.sep + '.bypy.pickle'
@staticmethod
def migratesettings():
result = ENoError
filesToMove = [
[ByPy.OldTokenFilePath, TokenFilePath],
[ByPy.OldHashCachePath, HashCachePath]
]
result = makedir(ConfigDir, 0o700) and result # make it secretive
# this directory must exist
if result != ENoError:
perr("Fail to create config directory '{}'".format(ConfigDir))
return result
for tomove in filesToMove:
oldfile = tomove[0]
newfile = tomove[1]
if os.path.exists(oldfile):
dst = newfile
if os.path.exists(newfile):
dst = TokenFilePath + '.old'
result = movefile(oldfile, dst) and result
return result
@staticmethod
def getcertfile():
result = ENoError
if not os.path.exists(ByPyCertsPath):
if os.path.exists(ByPyCertsFile):
result = copyfile(ByPyCertsFile, ByPyCertsPath)
else:
try:
# perform a simple download from github
urllib.urlretrieve(
'https://raw.githubusercontent.com/houtianze/bypy/master/bypy.cacerts.pem', ByPyCertsPath)
except IOError as ex:
perr("Fail download CA Certs to '{}'.\n" + \
"Exception:\n{}\nStack:{}\n".format(
ByPyCertsPath, ex, traceback.format_exc()))
result = EDownloadCerts
return result
def __init__(self,
slice_size = DefaultSliceSize,
dl_chunk_size = DefaultDlChunkSize,
verify = True,
retry = 5, timeout = None,
quit_when_fail = False,
listfile = None,
resumedownload = True,
extraupdate = lambda: (),
incregex = '',
ondup = '',
followlink = True,
checkssl = True,
cacerts = None,
rapiduploadonly = False,
verbose = 0, debug = False):
# handle backward compatibility
sr = ByPy.migratesettings()
if sr != ENoError:
# bail out
perr("Failed to migrate old settings.")
onexit(EMigrationFailed)
# it doesn't matter if it failed, we can disable SSL verification anyway
ByPy.getcertfile()
self.__slice_size = slice_size
self.__dl_chunk_size = dl_chunk_size
self.__verify = verify
self.__retry = retry
self.__quit_when_fail = quit_when_fail
self.__timeout = timeout
self.__listfile = listfile
self.__resumedownload = resumedownload
self.__extraupdate = extraupdate
self.__incregex = incregex
self.__incregmo = re.compile(incregex)
if ondup and len(ondup) > 0:
self.__ondup = ondup[0].upper()
else:
self.__ondup = 'O' # O - Overwrite* S - Skip P - Prompt
# TODO: whether this works is still to be tried out
self.__isrev = False
self.__followlink = followlink;
# TODO: properly fix this InsecurePlatformWarning
checkssl = False
# using a mirror, which has name mismatch SSL error,
# so need to disable SSL check
if pcsurl != PcsUrl:
# TODO: print a warning
checkssl = False
self.__checkssl = checkssl
self.__rapiduploadonly = rapiduploadonly
self.Verbose = verbose
self.Debug = debug
if self.__checkssl:
# sort of undocumented by requests
# http://stackoverflow.com/questions/10667960/python-requests-throwing-up-sslerror
if cacerts is not None:
if os.path.isfile(cacerts):
self.__checkssl = cacerts
else:
perr("Invalid CA Bundle '{}' specified")
# falling through here means no customized CA Certs specified
if self.__checkssl is True:
# use our own CA Bundle if possible
if os.path.isfile(ByPyCertsPath):
self.__checkssl = ByPyCertsPath
else:
# Well, disable cert verification
pwarn(
"** SSL Certificate Verification has been disabled **\n\n" + \
"If you are confident that your CA Bundle can verify " + \
"Baidu PCS's certs, you can run the prog with the '" + CaCertsOption + \
" <your ca cert path>' argument to enable SSL cert verification.\n\n" + \
"However, most of the time, you can ignore this warning, " + \
"you are going to send sensitive data to the cloud plainly right?")
self.__checkssl = False
if not checkssl:
disable_urllib3_warning()
# the prophet said: thou shalt initialize
self.__existing_size = 0
self.__json = {}
self.__access_token = ''
self.__bduss = ''
self.__pancookies = {}
self.__remote_json = {}
self.__slice_md5s = []
if self.__listfile and os.path.exists(self.__listfile):
with open(self.__listfile, 'r') as f:
self.__list_file_contents = f.read()
else:
self.__list_file_contents = None
# only if user specifies '-ddd' or more 'd's, the following
# debugging information will be shown, as it's very talkative.
if self.Debug >= 3:
# these two lines enable debugging at httplib level (requests->urllib3->httplib)
# you will see the REQUEST, including HEADERS and DATA, and RESPONSE with HEADERS but without DATA.
# the only thing missing will be the response.body which is not logged.
enable_http_logging()
if not self.__load_local_json():
# no need to call __load_local_json() again as __auth() will load the json & acess token.
result = self.__auth()
if result != ENoError:
perr("Program authorization FAILED.\n" + \
"You need to authorize this program before using any PCS functions.\n" + \
"Quitting...\n")
onexit(result)
if not self.__load_local_bduss():
self.pv("BDUSS not found at '{}'.".format(BDUSSPath))
def pv(self, msg, **kwargs):
if self.Verbose:
pr(msg)
def pd(self, msg, level = 1, **kwargs):
if self.Debug >= level:
pdbg(msg, kwargs)
def shalloverwrite(self, prompt):
if self.__ondup == 'S':
return False
elif self.__ondup == 'P':
ans = ask(prompt, False).upper()
if not ans.startswith('Y'):
return False
return True
def __print_error_json(self, r):
try:
dj = r.json()
if 'error_code' in dj and 'error_msg' in dj:
ec = dj['error_code']
et = dj['error_msg']
msg = ''
if ec == IEMD5NotFound:
pf = pinfo
msg = et
else:
pf = perr
msg = "Error code: {}\nError Description: {}".format(ec, et)
pf(msg)
except Exception:
perr('Error parsing JSON Error Code from:\n{}'.format(rb(r.text)))
perr('Exception:\n{}'.format(traceback.format_exc()))
def __dump_exception(self, ex, url, pars, r, act):
if self.Debug or self.Verbose:
perr("Error accessing '{}'".format(url))
if ex and isinstance(ex, Exception) and self.Debug:
perr("Exception:\n{}".format(ex))
tb = traceback.format_exc()
if tb:
pr(tb)
perr("Function: {}".format(act.__name__))
perr("Website parameters: {}".format(pars))
if hasattr(r, 'status_code'):
perr("HTTP Response Status Code: {}".format(r.status_code))
if (r.status_code != 200 and r.status_code != 206) or (not (pars.has_key('method') and pars['method'] == 'download') and url.find('method=download') == -1 and url.find('baidupcs.com/file/') == -1):
self.__print_error_json(r)
perr("Website returned: {}".format(rb(r.text)))
# always append / replace the 'access_token' parameter in the https request
def __request_work(self, url, pars, act, method, actargs = None, addtoken = True, dumpex = True, **kwargs):
result = ENoError
r = None
self.__extraupdate()
parsnew = pars.copy()
if addtoken:
parsnew['access_token'] = self.__access_token
try:
self.pd(method + ' ' + url)
self.pd("actargs: {}".format(actargs))
self.pd("Params: {}".format(pars))
if method.upper() == 'GET':
r = requests.get(url,
params = parsnew, timeout = self.__timeout, verify = self.__checkssl, **kwargs)
elif method.upper() == 'POST':
r = requests.post(url,
params = parsnew, timeout = self.__timeout, verify = self.__checkssl, **kwargs)
# BUGFIX: DON'T do this, if we are downloading a big file, the program sticks and dies
#self.pd("Request Headers: {}".format(
# pprint.pformat(r.request.headers)), 2)
sc = r.status_code
self.pd("HTTP Status Code: {}".format(sc))
# BUGFIX: DON'T do this, if we are downloading a big file, the program sticks and dies
#self.pd("Header returned: {}".format(pprint.pformat(r.headers)), 2)
#self.pd("Website returned: {}".format(rb(r.text)), 3)
if sc == requests.codes.ok or sc == 206: # 206 Partial Content
if sc == requests.codes.ok:
if not (hasattr(pars, 'method') and pars['method'] == 'download'):
try:
j = r.json()
if hasattr(j, 'error_code') and j['error_code'] == 0 and hasattr(j, 'error_msg') and j['error_msg'] == 'no error': # __walk_remote_dir_act() KeyError: u'list'
self.pd("Unexpected response: {}".format(j))
return ERequestFailed
except Exception:
sys.exc_clear()
self.pd("Request OK, processing action")
else:
self.pd("206 Partial Content")
result = act(r, actargs)
if result == ENoError:
self.pd("Request all goes fine")
elif sc == 404 and r.url.find('http://bcscdn.baidu.com/bcs-cdn/wenxintishi') == 0: # = "error_code":31390,"error_msg":"Illegal File"
self.pd("File is blacklisted ('wenxintishi'). Skipping.")
result = EFileNotFound
else:
ec = 0
try:
j = r.json()
ec = j['error_code']
# error print is done in __dump_exception()
# self.__print_error_json(r)
except ValueError:
perr("Not valid error JSON")
# 6 (sc: 403): No permission to access user data
# 110 (sc: 401): Access token invalid or no longer valid
# 111 (sc: 401): Access token expired
if ec == 111 or ec == 110 or ec == 6: # and sc == 401:
self.pd("Need to refresh token, refreshing")
if ENoError == self.__refresh_token(): # refresh the token and re-request
# TODO: avoid dead recursive loops
# TODO: properly pass retry
result = self.__request(url, pars, act, method, actargs, True, addtoken, dumpex, **kwargs)
else:
result = EFatal
perr("FATAL: Token refreshing failed, can't continue.\nQuitting...\n")
onexit(result)
# File md5 not found, you should use upload API to upload the whole file.
elif ec == IEMD5NotFound: # and sc == 404:
self.pd("MD5 not found, rapidupload failed")
result = ec
# user not exists
elif ec == 31045: # and sc == 403:
self.pd("BDUSS has expired")
result = IEBDUSSExpired
# superfile create failed
elif ec == 31081: # and sc == 404:
self.pd("Failed to combine files from MD5 slices (superfile create failed)")
result = ec
# topath already exists
elif ec == 31196: # and sc == 403:
self.pd("UnzipCopy destination already exists.")
result = act(r, actargs)
# file copy failed
elif ec == 31197: # and sc == 503:
result = act(r, actargs)
# file size exceeds limit
elif ec == 31199: # and sc == 403:
result = act(r, actargs)
# errors that make retrying meaningless
elif (
ec == 31061 or # sc == 400 file already exists
ec == 31062 or # sc == 400 file name is invalid
ec == 31063 or # sc == 400 file parent path does not exist
ec == 31064 or # sc == 403 file is not authorized
ec == 31065 or # sc == 400 directory is full
ec == 31066): # sc == 403 (indeed 404) file does not exist
result = ec
if dumpex:
self.__dump_exception(None, url, pars, r, act)
else:
result = ERequestFailed
if dumpex:
self.__dump_exception(None, url, pars, r, act)
except (requests.exceptions.RequestException,
socket.error) as ex:
# If certificate check failed, no need to continue
# but prompt the user for work-around and quit
# why so kludge? because requests' SSLError doesn't set
# the errno and strerror due to using **kwargs,
# so we are forced to use string matching
if isinstance(ex, requests.exceptions.SSLError) \
and re.match(r'^\[Errno 1\].*error:14090086.*:certificate verify failed$', str(ex), re.I):
# [Errno 1] _ssl.c:504: error:14090086:SSL routines:SSL3_GET_SERVER_CERTIFICATE:certificate verify failed
result = EFatal
self.__dump_exception(ex, url, pars, r, act)
perr("\n\n== Baidu's Certificate Verification Failure ==\n" + \
"We couldn't verify Baidu's SSL Certificate.\n" + \
"It's most likely that the system doesn't have " + \
"the corresponding CA certificate installed.\n" + \
"There are two ways of solving this:\n" + \
"Either) Run this prog with the '" + CaCertsOption + \
" <path to " + ByPyCertsPath + "> argument " + \
"(" + ByPyCertsPath + " comes along with this prog). " + \
"This is the secure way. " + \
"However, it won't work after 2020-02-08 when " + \
"the certificat expires.\n" + \
"Or) Run this prog with the '" + DisableSslCheckOption + \
"' argument. This supresses the CA cert check " + \
"and always works.\n")
onexit(result)
# why so kludge? because requests' SSLError doesn't set
# the errno and strerror due to using **kwargs,
# so we are forced to use string matching
if isinstance(ex, requests.exceptions.SSLError) \
and re.match(r'^\[Errno 1\].*error:14090086.*:certificate verify failed$', str(ex), re.I):
# [Errno 1] _ssl.c:504: error:14090086:SSL routines:SSL3_GET_SERVER_CERTIFICATE:certificate verify failed
perr("\n*** We probably don't have Baidu's CA Certificate ***\n" + \
"This in fact doesn't matter most of the time.\n\n" + \
"However, if you are _really_ concern about it, you can:\n" + \
"Either) Run this prog with the '" + CaCertsOption + \
" <path to bypy.cacerts.pem>' " + \
"argument. This is the secure way.\n" + \
"Or) Run this prog with the '" + DisableSslCheckOption + \
"' argument. This suppresses the CA cert check.\n")
result = ERequestFailed
if dumpex:
self.__dump_exception(ex, url, pars, r, act)
except Exception as ex:
result = EFatal
self.__dump_exception(ex, url, pars, r, act)
perr("Fatal Exception, no way to continue.\nQuitting...\n")
perr("If the error is reproducible, run the program with `-dv` arguments again to get more info.\n")
onexit(result)
# we eat the exception, and use return code as the only
# error notification method, we don't want to mix them two
#raise # must notify the caller about the failure
return result
def __request(self, url, pars, act, method, actargs = None, retry = True, addtoken = True, dumpex = True, **kwargs):
tries = 1
if retry:
tries = self.__retry
i = 0
result = ERequestFailed
# Change the User-Agent to avoid server fuss
kwnew = kwargs.copy()
if 'headers' not in kwnew:
kwnew['headers'] = { 'User-Agent': UserAgent }
else:
kwnew['headers']['User-Agent'] = UserAgent
while True:
result = self.__request_work(url, pars, act, method, actargs, addtoken, dumpex, **kwnew)
i += 1
# only ERequestFailed needs retry, other error still directly return
if result == ERequestFailed:
if i < tries:
# algo changed: delay more after each failure
delay = RetryDelayInSec * i
perr("Waiting {} seconds before retrying...".format(delay))
time.sleep(delay)
perr("Request Try #{} / {}".format(i + 1, tries))
else:
perr("Maximum number ({}) of tries failed.".format(tries))
if self.__quit_when_fail:
onexit(EMaxRetry)
break
else:
break
return result
def __get(self, url, pars, act, actargs = None, retry = True, addtoken = True, dumpex = True, **kwargs):
return self.__request(url, pars, act, 'GET', actargs, retry, addtoken, dumpex, **kwargs)
def __post(self, url, pars, act, actargs = None, retry = True, addtoken = True, dumpex = True, **kwargs):
return self.__request(url, pars, act, 'POST', actargs, retry, addtoken, dumpex, **kwargs)
# direction: True - upload, False - download
def __shallinclude(self, lpath, rpath, direction):
arrow = '==>' if direction else '<=='
checkpath = lpath if direction else rpath
# TODO: bad practice, see os.access() document for more info
if direction: # upload
if not os.path.exists(lpath):
perr("'{}' {} '{}' skipped since local path no longer exists".format(
lpath, arrow, rpath));
return False
else: # download
if os.path.exists(lpath) and (not os.access(lpath, os.R_OK)):
perr("'{}' {} '{}' skipped due to permission".format(
lpath, arrow, rpath));
return False
if '\\' in os.path.basename(checkpath):
perr("'{}' {} '{}' skipped due to problemic '\\' in the path".format(
lpath, arrow, rpath));
return False
include = (not self.__incregex) or self.__incregmo.match(checkpath)
if not include:
self.pv("'{}' {} '{}' skipped as it's not included in the regex pattern".format(
lpath, arrow, rpath));
return include
def __replace_list_format(self, fmt, j):
output = fmt
for k, v in ByPy.ListFormatDict.iteritems():
output = output.replace(k, v(j))
return output
def __load_local_json(self):
try:
with open(TokenFilePath, 'rb') as infile:
self.__json = json.load(infile)
self.__access_token = self.__json['access_token']
self.pd("Token loaded:")
self.pd(self.__json)
return True
except IOError:
perr('Error while loading baidu pcs token:')
perr(traceback.format_exc())
return False
def __store_json_only(self, j):
self.__json = j
self.__access_token = self.__json['access_token']
self.pd("access token: " + self.__access_token)
self.pd("Authorize JSON:")
self.pd(self.__json)
tokenmode = 0o600
try:
with open(TokenFilePath, 'wb') as outfile:
json.dump(self.__json, outfile)
os.chmod(TokenFilePath, tokenmode)
return ENoError
except Exception:
perr("Exception occured while trying to store access token:\n" \
"Exception:\n{}".format(traceback.format_exc()))
return EFileWrite
def __store_json(self, r):
j = {}
try:
j = r.json()
except Exception:
perr("Failed to decode JSON:\n" \
"Exception:\n{}".format(traceback.format_exc()))
perr("Error response:\n{}".format(r.text));
pinfo('-' * 64)
pinfo("""This is most likely caused by authorization errors.
Possible causes:
- You didn't run this program for a long time (more than a month).
- You changed your Baidu password after authorizing this program.
- You didn't give this program the 'netdisk' access while authorizing.
- ...
Possible fixes:
1. Remove the authorization token by running with the parameter '{}', and then re-run this program.
2. If (1) still doesn't solve the problem, you may have to go to:
https://passport.baidu.com/accountbind
and remove the authorization of this program, and then re-run this program.""".format(CleanOptionShort))
return EInvalidJson
return self.__store_json_only(j)
def __load_local_bduss(self):
try:
with open(BDUSSPath, 'rb') as infile:
self.__bduss = infile.readline().strip()
self.pd("BDUSS loaded: {}".format(self.__bduss))
self.__pancookies = {'BDUSS': self.__bduss}
return True
except IOError:
self.pd('Error loading BDUSS:')
self.pd(traceback.format_exc())
return False
def __server_auth_act(self, r, args):
return self.__store_json(r)
def __server_auth(self):
params = {
'client_id' : ApiKey,
'response_type' : 'code',
'redirect_uri' : 'oob',
'scope' : 'basic netdisk' }
pars = urllib.urlencode(params)
msg = 'Please visit:\n{}\nAnd authorize this app'.format(ServerAuthUrl + '?' + pars) + \
'\nPaste the Authorization Code here within 10 minutes.'
auth_code = ask(msg).strip()
self.pd("auth_code: {}".format(auth_code))
pr('Authorizing, please be patient, it may take upto {} seconds...'.format(self.__timeout))
pars = {
'code' : auth_code,
'redirect_uri' : 'oob' }
result = None
for auth in AuthServerList:
(url, retry, msg) = auth
pr(msg)
result = self.__get(url, pars, self.__server_auth_act, retry = retry, addtoken = False)
if result == ENoError:
break
if result == ENoError:
pr("Successfully authorized")
else:
perr("Fatal: All server authorizations failed.")
return result
def __device_auth_act(self, r, args):
dj = r.json()
return self.__get_token(dj)
def __device_auth(self):
pars = {
'client_id' : ApiKey,
'response_type' : 'device_code',
'scope' : 'basic netdisk'}
return self.__get(DeviceAuthUrl, pars, self.__device_auth_act, addtoken = False)
def __auth(self):
if ServerAuth:
return self.__server_auth()
else:
return self.__device_auth()
def __get_token_act(self, r, args):
return self.__store_json(r)
def __get_token(self, deviceJson):
msg = 'Please visit:\n' + deviceJson['verification_url'] + \
'\nwithin ' + str(deviceJson['expires_in']) + ' seconds\n' + \
'Input the CODE: {}\n'.format(deviceJson['user_code']) + \
'and Authorize this little app.\n' + \
"Press [Enter] when you've finished\n"
ask(msg)
pars = {
'grant_type' : 'device_token',
'code' : deviceJson['device_code'],
'client_id' : ApiKey,
'client_secret' : SecretKey}
return self.__get(TokenUrl, pars, self.__get_token_act, addtoken = False)
def __refresh_token_act(self, r, args):
return self.__store_json(r)
def __refresh_token(self):
if ServerAuth:
pr('Refreshing, please be patient, it may take upto {} seconds...'.format(self.__timeout))
pars = {
'grant_type' : 'refresh_token',
'refresh_token' : self.__json['refresh_token'] }
result = None
for refresh in RefreshServerList:
(url, retry, msg) = refresh
pr(msg)
result = self.__get(url, pars, self.__refresh_token_act, retry = retry, addtoken = False)
if result == ENoError:
break
if result == ENoError:
pr("Token successfully refreshed")
else:
perr("Token-refreshing on all the servers failed")
return result
else:
pars = {
'grant_type' : 'refresh_token',
'refresh_token' : self.__json['refresh_token'],
'client_secret' : SecretKey,
'client_id' : ApiKey }
return self.__post(TokenUrl, pars, self.__refresh_token_act)
def __quota_act(self, r, args):
j = r.json()
pr('Quota: ' + human_size(j['quota']))
pr('Used: ' + human_size(j['used']))
return ENoError
def help(self, command): # this comes first to make it easy to spot
''' Usage: help command - provide some information for the command '''
for i, v in ByPy.__dict__.iteritems():
if callable(v) and v.__doc__ and v.__name__ == command :
help = v.__doc__.strip()
pos = help.find(ByPy.HelpMarker)
if pos != -1:
pr("Usage: " + help[pos + len(ByPy.HelpMarker):].strip())
def refreshtoken(self):
''' Usage: refreshtoken - refresh the access token '''
return self.__refresh_token()
def info(self):
return self.quota()
def quota(self):
''' Usage: quota/info - displays the quota information '''
pars = {
'method' : 'info' }
return self.__get(pcsurl + 'quota', pars, self.__quota_act)
# return:
# 0: local and remote files are of same size
# 1: local file is larger
# 2: remote file is larger
# -1: inconclusive (probably invalid remote json)
def __compare_size(self, lsize, rjson):
if 'size' in rjson:
rsize = rjson['size']
if lsize == rsize:
return 0;
elif lsize > rsize:
return 1;
else:
return 2
else:
return -1
def __verify_current_file(self, j, gotlmd5):
# if we really don't want to verify
if self.__current_file == '/dev/null' and not self.__verify:
return ENoError
rsize = 0
rmd5 = 0
# always perform size check even __verify is False
if 'size' in j:
rsize = j['size']
else:
perr("Unable to verify JSON: '{}', as no 'size' entry found".format(j))
return EHashMismatch
if 'md5' in j:
rmd5 = binascii.unhexlify(j['md5'])
#elif 'block_list' in j and len(j['block_list']) > 0:
# rmd5 = j['block_list'][0]
#else:
# # quick hack for meta's 'block_list' field
# pwarn("No 'md5' nor 'block_list' found in json:\n{}".format(j))
# pwarn("Assuming MD5s match, checking size ONLY.")
# rmd5 = self.__current_file_md5
else:
perr("Unable to verify JSON: '{}', as no 'md5' entry found".format(j))
return EHashMismatch
self.pd("Comparing local file '{}' and remote file '{}'".format(
self.__current_file, j['path']))
self.pd("Local file size : {}".format(self.__current_file_size))
self.pd("Remote file size: {}".format(rsize))
if self.__current_file_size == rsize:
self.pd("Local file and remote file sizes match")
if self.__verify:
if not gotlmd5:
self.__current_file_md5 = md5(self.__current_file)
self.pd("Local file MD5 : {}".format(binascii.hexlify(self.__current_file_md5)))
self.pd("Remote file MD5: {}".format(binascii.hexlify(rmd5)))
if self.__current_file_md5 == rmd5:
self.pd("Local file and remote file hashes match")
return ENoError
else:
pinfo("Local file and remote file hashes DON'T match")
return EHashMismatch
else:
return ENoError
else:
pinfo("Local file and remote file sizes DON'T match")
return EHashMismatch
def __get_file_info_act(self, r, args):
remotefile = args
j = r.json()
self.pd("List json: {}".format(j))
l = j['list']
for f in l:
if f['path'] == remotefile: # case-sensitive
self.__remote_json = f
self.pd("File info json: {}".format(self.__remote_json))
return ENoError;
return EFileNotFound
# the 'meta' command sucks, since it doesn't supply MD5 ...
# now the JSON is written to self.__remote_json, due to Python call-by-reference chaos
# https://stackoverflow.com/questions/986006/python-how-do-i-pass-a-variable-by-reference
# as if not enough confusion in Python call-by-reference
def __get_file_info(self, remotefile, **kwargs):
rdir, rfile = posixpath.split(remotefile)
self.pd("__get_file_info(): rdir : {} | rfile: {}".format(rdir, rfile))
if rdir and rfile:
pars = {
'method' : 'list',
'path' : rdir,
'by' : 'name', # sort in case we can use binary-search, etc in the futrue.
'order' : 'asc' }
return self.__get(pcsurl + 'file', pars, self.__get_file_info_act, remotefile, **kwargs)
else:
perr("Invalid remotefile '{}' specified.".format(remotefile))
return EArgument
def __list_act(self, r, args):
(remotedir, fmt) = args
j = r.json()
pr("{} ({}):".format(remotedir, fmt))
for f in j['list']:
pr(self.__replace_list_format(fmt, f))
return ENoError
def ls(self, remotepath = '',
fmt = '$t $f $s $m $d',
sort = 'name', order = 'asc'):
return self.list(remotepath, fmt, sort, order)
def list(self, remotepath = '',
fmt = '$t $f $s $m $d',
sort = 'name', order = 'asc'):
''' Usage: list/ls [remotepath] [format] [sort] [order] - list the 'remotepath' directory at Baidu PCS
remotepath - the remote path at Baidu PCS. default: root directory '/'
format - specifies how the list are displayed
$t - Type: Directory ('D') or File ('F')
$f - File name
$c - Creation time
$m - Modification time
$d - MD5 hash
$s - Size
$$ - The '$' sign
So '$t - $f - $s - $$' will display "Type - File - Size - $'
Default format: '$t $f $s $m $d'
sort - sorting by [name, time, size]. default: 'name'
order - sorting order [asc, desc]. default: 'asc'
'''
rpath = get_pcs_path(remotepath)
pars = {
'method' : 'list',
'path' : rpath,
'by' : sort,
'order' : order }
return self.__get(pcsurl + 'file', pars, self.__list_act, (rpath, fmt))
def __meta_act(self, r, args):
return self.__list_act(r, args)
# multi-file meta is not implemented for it's low usage
def meta(self, remotepath, fmt = '$t $u $f $s $c $m $i $b'):
''' Usage: meta <remotepath> [format] - \
get information of the given path (dir / file) at Baidu Yun.
remotepath - the remote path
format - specifies how the list are displayed
it supports all the format variables in the 'list' command, and additionally the followings:
$i - fs_id
$b - MD5 block_list
$u - Has sub directory or not
'''
rpath = get_pcs_path(remotepath)
pars = {
'method' : 'meta',
'path' : rpath }
return self.__get(pcsurl + 'file', pars,
self.__meta_act, (rpath, fmt))
def __combine_file_act(self, r, args):
result = self.__verify_current_file(r.json(), False)
if result == ENoError:
self.pv("'{}' =C=> '{}' OK.".format(self.__current_file, args))
else:
perr("'{}' =C=> '{}' FAILED.".format(self.__current_file, args))
# save the md5 list, in case we add in resume function later to this program
self.__last_slice_md5s = self.__slice_md5s
self.__slice_md5s = []
return result
def __combine_file(self, remotepath, ondup = 'overwrite'):
pars = {
'method' : 'createsuperfile',
'path' : remotepath,
'ondup' : ondup }
if self.__isrev and ondup != 'newcopy':
pars['is_revision'] = 1
# always print this, so that we can use these data to combine file later
pr("Combining the following MD5 slices:")
for m in self.__slice_md5s:
pr(m)
param = { 'block_list' : self.__slice_md5s }
return self.__post(pcsurl + 'file',
pars, self.__combine_file_act,
remotepath,
data = { 'param' : json.dumps(param) } )
def unzip(self, remotepath, subpath = '/', start = 0, limit = 1000):
''' Usage: unzip <remotepath> [<subpath> [<start> [<limit>]]]'''
rpath = get_pcs_path(remotepath)
return self.__panapi_unzip_file(rpath, subpath, start, limit);
def __panapi_unzip_file_act(self, r, args):
j = r.json()
self.pd("Unzip response: {}".format(j))
if j['errno'] == 0:
if 'time' in j:
perr("Extraction not completed yet: '{}'...".format(args['path']))
return ERequestFailed
elif 'list' in j:
for e in j['list']:
pr("{}\t{}\t{}".format(ls_type(e['isdir'] == 1), e['file_name'], e['size']))
return ENoError
def __panapi_unzip_file(self, rpath, subpath, start, limit):
pars = {
'path' : rpath,
'start' : start,
'limit' : limit,
'subpath' : '/' + subpath.strip('/') }
self.pd("Unzip request: {}".format(pars))
return self.__get(PanAPIUrl + 'unzip?app_id=250528',
pars, self.__panapi_unzip_file_act, cookies = self.__pancookies, actargs = pars )
def extract(self, remotepath, subpath, saveaspath = None):
''' Usage: extract <remotepath> <subpath> [<saveaspath>]'''
rpath = get_pcs_path(remotepath)
topath = get_pcs_path(saveaspath)
if not saveaspath:
topath = os.path.dirname(rpath) + '/' + subpath
return self.__panapi_unzipcopy_file(rpath, subpath, topath)
def __panapi_unzipcopy_file_act(self, r, args):
j = r.json()
self.pd("UnzipCopy response: {}".format(j))
if 'path' in j:
self.pv("Remote extract: '{}#{}' =xx=> '{}' OK.".format(args['path'], args['subpath'], j[u'path']))
return ENoError
elif 'error_code' in j:
if j['error_code'] == 31196:
perr("Remote extract: '{}#{}' =xx=> '{}' FAILED. File already exists.".format(args['path'], args['subpath'], args['topath']))
subresult = self.__delete(args['topath'])
if subresult == ENoError:
return self.__panapi_unzipcopy_file(args['path'], args['subpath'], args['topath'])
else:
return ERequestFailed
elif j['error_code'] == 31199:
perr("Remote extract: '{}#{}' =xx=> '{}' FAILED. File too large.".format(args['path'], args['subpath'], args['topath']))
return EMaxRetry
else:
perr("Remote extract: '{}#{}' =xx=> '{}' FAILED. Unknown error {}: {}.".format(args['path'], args['subpath'], args['topath'], j['error_code'], j['error_msg']))
return EMaxRetry
def __panapi_unzipcopy_file(self, rpath, subpath, topath):
pars = {
'app_id' : 250528,
'method' : 'unzipcopy',
'path' : rpath,
'subpath' : '/' + subpath.strip('/'),
'topath' : topath }
self.pd("UnzipCopy request: {}".format(pars))
return self.__get(pcsurl + 'file',
pars, self.__panapi_unzipcopy_file_act, addtoken = False, cookies = self.__pancookies, actargs = pars )
def revision(self, remotepath):
''' Usage: revision <remotepath> '''
rpath = get_pcs_path(remotepath)
return self.__panapi_revision_list(rpath)
def history(self, remotepath):
''' Usage: history <remotepath> '''
return self.revision(remotepath)
def __panapi_revision_list_act(self, r, args):
j = r.json()
self.pd("RevisionList response: {}".format(j))
if j['errno'] == 0:
if 'list' in j:
for e in j['list']:
pr("{}\t{}\t{}".format(e['revision'], e['size'], ls_time(e['revision'] / 1e6)))
return ENoError
if j['errno'] == -6: # invalid BDUSS
pr("BDUSS has expired.")
return IEBDUSSExpired
if j['errno'] == -9:
pr("File '{}' not exists.".format(args['path']))
return EFileNotFound
return ENoError
def __panapi_revision_list(self, rpath):
pars = {
'path' : rpath,
'desc' : 1 }
self.pd("RevisionList request: {}".format(pars))
return self.__post(PanAPIUrl + 'revision/list?app_id=250528',
{}, self.__panapi_revision_list_act, pars, data = pars, cookies = self.__pancookies )
def revert(self, remotepath, revision, dir = None):
''' Usage: revert <remotepath> revisionid [dir]'''
rpath = get_pcs_path(remotepath)
dir = get_pcs_path(dir)
if not dir:
dir = os.path.dirname(rpath)
return self.__panapi_revision_revert(rpath, revision, dir)
def __panapi_revision_revert_act(self, r, args):
j = r.json()
self.pd("RevisionRevert response: {}".format(j))
if j['errno'] == 0:
self.pv("Remote revert: '{}#{}' =rr=> '{}' OK.".format(args['path'], args['revision'], j['path']))
return ENoError
if j['errno'] == -6: # invalid BDUSS
pr("BDUSS has expired.")
return IEBDUSSExpired
if j['errno'] == -9:
pr("File '{}' not exists.".format(args['path']))
return EFileNotFound
if j['errno'] == 10:
pr("Reverting '{}' in process...".format(args['path']))
return ERequestFailed
return ENoError
def __panapi_revision_revert(self, rpath, revision, dir = None):
if not dir:
dir = os.path.dirname(rpath)
pars = {
'revision' : revision,
'path' : rpath,
'type' : 2,
'dir' : dir }
self.pd("RevisionRevert request: {}".format(pars))
return self.__post(PanAPIUrl + 'revision/revert?app_id=250528',
{}, self.__panapi_revision_revert_act, pars, data = pars, cookies = self.__pancookies )
def __upload_slice_act(self, r, args):
j = r.json()
# slices must be verified and re-upload if MD5s don't match,
# otherwise, it makes the uploading slower at the end
rsmd5 = j['md5']
self.pd("Uploaded MD5 slice: " + rsmd5)
if self.__current_slice_md5 == binascii.unhexlify(rsmd5):
self.__slice_md5s.append(rsmd5)
self.pv("'{}' >>==> '{}' OK.".format(self.__current_file, args))
return ENoError
else:
perr("'{}' >>==> '{}' FAILED.".format(self.__current_file, args))
return EHashMismatch
def __upload_slice(self, remotepath):
pars = {
'method' : 'upload',
'type' : 'tmpfile'}
return self.__post(cpcsurl + 'file',
pars, self.__upload_slice_act, remotepath,
# wants to be proper? properness doesn't work (search this sentence for more occurence)
#files = { 'file' : (os.path.basename(self.__current_file), self.__current_slice) } )
files = { 'file' : ('file', self.__current_slice) } )
def __upload_file_slices(self, localpath, remotepath, ondup = 'overwrite'):
pieces = MaxSlicePieces
slice = self.__slice_size
if self.__current_file_size <= self.__slice_size * MaxSlicePieces:
# slice them using slice size
pieces = (self.__current_file_size + self.__slice_size - 1 ) / self.__slice_size
else:
# the following comparision is done in the caller:
# elif self.__current_file_size <= MaxSliceSize * MaxSlicePieces:
# no choice, but need to slice them to 'MaxSlicePieces' pieces
slice = (self.__current_file_size + MaxSlicePieces - 1) / MaxSlicePieces
self.pd("Slice size: {}, Pieces: {}".format(slice, pieces))
i = 0
ec = ENoError
with open(self.__current_file, 'rb') as f:
start_time = time.time()
while i < pieces:
self.__current_slice = f.read(slice)
m = hashlib.md5()
m.update(self.__current_slice)
self.__current_slice_md5 = m.digest()
self.pd("Uploading MD5 slice: {}, #{} / {}".format(
binascii.hexlify(self.__current_slice_md5),
i + 1, pieces))
j = 0
while True:
ec = self.__upload_slice(remotepath)
if ec == ENoError:
self.pd("Slice MD5 match, continuing next slice")
pprgr(f.tell(), self.__current_file_size, start_time)
break
elif j < self.__retry:
j += 1
# TODO: Improve or make it TRY with the __requet retry logic
perr("Slice MD5 mismatch, waiting {} seconds before retrying...".format(RetryDelayInSec))
time.sleep(RetryDelayInSec)
perr("Retrying #{} / {}".format(j + 1, self.__retry))
else:
self.__slice_md5s = []
break
i += 1
if ec != ENoError:
return ec
else:
#self.pd("Sleep 2 seconds before combining, just to be safer.")
#time.sleep(2)
return self.__combine_file(remotepath, ondup = 'overwrite')
def __rapidupload_file_act(self, r, args):
if self.__verify:
self.pd("Not strong-consistent, sleep 1 second before verification")
time.sleep(1)
return self.__verify_current_file(r.json(), True)
else:
return ENoError
def __rapidupload_file(self, localpath, remotepath, ondup = 'overwrite'):
self.__current_file_md5 = md5(self.__current_file)
self.__current_file_slice_md5 = slice_md5(self.__current_file)
self.__current_file_crc32 = crc32(self.__current_file)
md5str = binascii.hexlify(self.__current_file_md5)
slicemd5str = binascii.hexlify(self.__current_file_slice_md5)
crcstr = hex(self.__current_file_crc32)
pars = {
'method' : 'rapidupload',
'path' : remotepath,
'content-length' : self.__current_file_size,
'content-md5' : md5str,
'slice-md5' : slicemd5str,
'content-crc32' : crcstr,
'ondup' : ondup }
if self.__isrev and ondup != 'newcopy':
pars['is_revision'] = 1
self.pd("RapidUploading Length: {} MD5: {}, Slice-MD5: {}, CRC: {}".format(
self.__current_file_size, md5str, slicemd5str, crcstr))
return self.__post(pcsurl + 'file', pars, self.__rapidupload_file_act)
def __upload_one_file_act(self, r, args):
result = self.__verify_current_file(r.json(), False)
if result == ENoError:
self.pv("'{}' ==> '{}' OK.".format(self.__current_file, args))
else:
perr("'{}' ==> '{}' FAILED.".format(self.__current_file, args))
return result
def __upload_one_file(self, localpath, remotepath, ondup = 'overwrite'):
pars = {
'method' : 'upload',
'path' : remotepath,
'ondup' : ondup }
if self.__isrev and ondup != 'newcopy':
pars['is_revision'] = 1
with open(localpath, "rb") as f:
return self.__post(cpcsurl + 'file',
pars, self.__upload_one_file_act, remotepath,
# wants to be proper? properness doesn't work
# there seems to be a bug at Baidu's handling of http text:
# Content-Disposition: ... filename=utf-8''yourfile.ext
# (pass '-ddd' to this program to verify this)
# when you specify a unicode file name, which will be encoded
# using the utf-8'' syntax
# so, we put a work-around here: we always call our file 'file'
# NOTE: an empty file name '' doesn't seem to work, so we
# need to give it a name at will, but empty one.
# apperantly, Baidu PCS doesn't use this file name for
# checking / verification, so we are probably safe here.
#files = { 'file' : (os.path.basename(localpath), f) })
files = { 'file' : ('file', f) })
#TODO: upload empty directories as well?
def __walk_upload(self, localpath, remotepath, ondup, walk):
(dirpath, dirnames, filenames) = walk
rdir = os.path.relpath(dirpath, localpath)
if rdir == '.':
rdir = ''
else:
rdir = rdir.replace('\\', '/')
rdir = (remotepath + '/' + rdir).rstrip('/') # '/' bites
result = ENoError
for name in filenames:
#lfile = os.path.join(dirpath, name)
lfile = joinpath(dirpath, name)
self.__current_file = lfile
self.__current_file_size = getfilesize(lfile)
rfile = rdir + '/' + name.replace('\\', '/')
# if the corresponding file matches at Baidu Yun, then don't upload
upload = True
self.__isrev = False
self.__remote_json = {}
subresult = self.__get_file_info(rfile, dumpex = False)
if subresult == ENoError: # same-name remote file exists
self.__isrev = True
if ENoError == self.__verify_current_file(self.__remote_json, False):
# the two files are the same
upload = False
self.pv("Remote file '{}' already exists, skip uploading".format(rfile))
else: # the two files are different
if not self.shalloverwrite("Remote file '{}' exists but is different, ".format(rfile) + \
"do you want to overwrite it? [y/N]"):
upload = False
if upload:
fileresult = self.__upload_file(lfile, rfile, ondup)
if fileresult != ENoError:
result = fileresult # we still continue
else:
pinfo("Remote file '{}' exists and is the same, skip uploading".format(rfile))
# next / continue
return result
def __upload_dir(self, localpath, remotepath, ondup = 'overwrite'):
self.pd("Uploading directory '{}' to '{}'".format(localpath, remotepath))
# it's so minor that we don't care about the return value
self.__mkdir(remotepath, dumpex = False)
for walk in os.walk(localpath, followlinks=self.__followlink):
self.__walk_upload(localpath, remotepath, ondup, walk)
def __upload_file(self, localpath, remotepath, ondup = 'overwrite'):
# TODO: this is a quick patch
if not self.__shallinclude(localpath, remotepath, True):
# since we are not going to upload it, there is no error
return ENoError
self.__current_file = localpath
self.__current_file_size = getfilesize(localpath)
result = ENoError
if self.__current_file_size > MinRapidUploadFileSize:
self.pd("'{}' is being RapidUploaded.".format(self.__current_file))
result = self.__rapidupload_file(localpath, remotepath, ondup)
if result == ENoError:
self.pv("RapidUpload: '{}' =R=> '{}' OK.".format(localpath, remotepath))
else:
if not self.__rapiduploadonly:
self.pd("'{}' can't be RapidUploaded, now trying normal uploading.".format(
self.__current_file))
# rapid upload failed, we have to upload manually
if self.__current_file_size <= self.__slice_size:
self.pd("'{}' is being non-slicing uploaded.".format(self.__current_file))
# no-slicing upload
result = self.__upload_one_file(localpath, remotepath, ondup)
elif self.__current_file_size <= MaxSliceSize * MaxSlicePieces:
# slice them using slice size
self.pd("'{}' is being slicing uploaded.".format(self.__current_file))
result = self.__upload_file_slices(localpath, remotepath, ondup)
else:
result = EFileTooBig
perr("Error: size of file '{}' - {} is too big".format(
self.__current_file,
self.__current_file_size))
else:
self.pv("'{}' can't be rapidly uploaded, so it's skipped since we are in the rapid-upload-only mode.".format(localpath))
return result
elif not self.__rapiduploadonly:
# very small file, must be uploaded manually and no slicing is needed
self.pd("'{}' is small and being non-slicing uploaded.".format(self.__current_file))
return self.__upload_one_file(localpath, remotepath, ondup)
def upload(self, localpath = u'', remotepath = '', ondup = "overwrite"):
''' Usage: upload [localpath] [remotepath] [ondup] - \
upload a file or directory (recursively)
localpath - local path, is the current directory '.' if not specified
remotepath - remote path at Baidu Yun (after app root directory at Baidu Yun)
ondup - what to do upon duplication ('overwrite' or 'newcopy'), default: 'overwrite'
'''
# copying since Python is call-by-reference by default,
# so we shall not modify the passed-in parameters
lpath = localpath.rstrip('\\/ ') # no trailing slashes
lpathbase = os.path.basename(lpath)
rpath = remotepath
if not lpath:
# so, if you don't specify the local path, it will always be the current direcotry
# and thus isdir(localpath) is always true
lpath = os.path.abspath(".")
self.pd("localpath not set, set it to current directory '{}'".format(localpath))
if os.path.isfile(lpath):
self.pd("Uploading file '{}'".format(lpath))
if not rpath or rpath == '/': # to root we go
rpath = lpathbase
if rpath[-1] == '/': # user intends to upload to this DIR
rpath = get_pcs_path(rpath + lpathbase)
else:
rpath = get_pcs_path(rpath)
# avoid uploading a file and destroy a directory by accident
subresult = self.__get_file_info(rpath)
if subresult == ENoError: # remove path exists, check is dir or file
if self.__remote_json['isdir']: # do this only for dir
rpath += '/' + lpathbase # rpath is guaranteed no '/' ended
else: # rpath is a file
self.__isrev = True
self.pd("remote path is '{}'".format(rpath))
return self.__upload_file(lpath, rpath, ondup)
elif os.path.isdir(lpath):
self.pd("Uploading directory '{}' recursively".format(lpath))
rpath = get_pcs_path(rpath)
return self.__upload_dir(lpath, rpath, ondup)
else:
perr("Error: invalid local path '{}' for uploading specified.".format(localpath))
return EParameter
def combine(self, remotefile, localfile = '', *args):
''' Usage: combine <remotefile> [md5s] [localfile] - \
try to create a file at PCS by combining slices, having MD5s specified
remotefile - remote file at Baidu Yun (after app root directory at Baidu Yun)
md5s - MD5 digests of the slices, separated by spaces
if not specified, you must specify the 'listfile' using the '-l' or '--list-file' switch in command line. the MD5 digests will be read from the (text) file, which can store the MD5 digest seperate by new-line or spaces
localfile - local file for verification, if not specified, no verification is done
'''
self.__slice_md5s = []
if args:
for arg in args:
self.__slice_md5s.append(arg)
elif self.__list_file_contents:
digests = filter(None, self.__list_file_contents.split())
for d in digests:
self.__slice_md5s.append(d)
else:
perr("You MUST either provide the MD5s through the command line, "
"or using the '-l' ('--list-file') switch to specify "
"the 'listfile' to read MD5s from")
return EArgument
verify = self.__verify
if localfile:
self.__current_file = localfile
self.__current_file_size = getfilesize(localfile)
else:
self.__current_file = '/dev/null' # Force no verify
self.__verify = False
result = self.__combine_file(get_pcs_path(remotefile))
self.__verify = verify
return result
# no longer used
def __get_meta_act(self, r, args):
parse_ok = False
j = r.json()
if 'list' in j:
lj = j['list']
if len(lj) > 0:
self.__remote_json = lj[0] # TODO: ugly patch
# patch for inconsistency between 'list' and 'meta' json
#self.__remote_json['md5'] = self.__remote_json['block_list'].strip('[]"')
self.pd("self.__remote_json: {}".format(self.__remote_json))
parse_ok = True
return ENoError
if not parse_ok:
self.__remote_json = {}
perr("Invalid JSON: {}\n{}".format(j, traceback.format_exc()))
return EInvalidJson
# no longer used
def __get_meta(self, remotefile):
pars = {
'method' : 'meta',
'path' : remotefile }
return self.__get(
pcsurl + 'file', pars,
self.__get_meta_act)
# NO LONGER IN USE
def __downfile_act(self, r, args):
rfile, offset = args
with open(self.__current_file, 'r+b' if offset > 0 else 'wb') as f:
if offset > 0:
f.seek(offset)
rsize = self.__remote_json['size']
start_time = time.time()
for chunk in r.iter_content(chunk_size = self.__dl_chunk_size):
if chunk: # filter out keep-alive new chunks
f.write(chunk)
f.flush()
pprgr(f.tell(), rsize, start_time)
# https://stackoverflow.com/questions/7127075/what-exactly-the-pythons-file-flush-is-doing
#os.fsync(f.fileno())
# No exception above, then everything goes fine
result = ENoError
if self.__verify:
self.__current_file_size = getfilesize(self.__current_file)
result = self.__verify_current_file(self.__remote_json, False)
if result == ENoError:
self.pv("'{}' <== '{}' OK".format(self.__current_file, rfile))
else:
perr("'{}' <== '{}' FAILED".format(self.__current_file, rfile))
return result
def __downchunks_act(self, r, args):
rfile, offset, rsize, start_time = args
expectedBytes = self.__dl_chunk_size
if rsize - offset < self.__dl_chunk_size:
expectedBytes = rsize - offset
if len(r.content) != expectedBytes:
return ERequestFailed
else:
with open(self.__current_file, 'r+b' if offset > 0 else 'wb') as f:
if offset > 0:
f.seek(offset)
f.write(r.content)
pos = f.tell()
pprgr(pos, rsize, start_time, existing = self.__existing_size)
if pos - offset == expectedBytes:
return ENoError
else:
return EFileWrite
# requirment: self.__remote_json is already gotten
def __downchunks(self, rfile, start):
rsize = self.__remote_json['size']
pars = {
'method' : 'download',
'path' : rfile }
offset = start
self.__existing_size = offset
start_time = time.time()
while True:
nextoffset = offset + self.__dl_chunk_size
if nextoffset < rsize:
headers = { "Range" : "bytes={}-{}".format(
offset, nextoffset - 1) }
elif offset > 0:
headers = { "Range" : "bytes={}-".format(offset) }
elif rsize >= 1: # offset == 0
# Fix chunked + gzip response,
# seems we need to specify the Range for the first chunk as well:
# https://github.com/houtianze/bypy/pull/161
#headers = { "Range" : "bytes=0-".format(rsize - 1) }
headers = { "Range" : "bytes=0-{}".format(rsize - 1) }
else:
headers = {}
subresult = self.__get(dpcsurl + 'file', pars,
self.__downchunks_act, (rfile, offset, rsize, start_time), headers = headers)
if subresult != ENoError:
return subresult
if nextoffset < rsize:
offset += self.__dl_chunk_size
else:
break
# No exception above, then everything goes fine
result = ENoError
if self.__verify:
self.__current_file_size = getfilesize(self.__current_file)
result = self.__verify_current_file(self.__remote_json, False)
if result == ENoError:
self.pv("'{}' <== '{}' OK".format(self.__current_file, rfile))
else:
perr("'{}' <== '{}' FAILED".format(self.__current_file, rfile))
return result
def __downfile(self, remotefile, localfile):
# TODO: this is a quick patch
if not self.__shallinclude(localfile, remotefile, False):
# since we are not going to download it, there is no error
return ENoError
result = ENoError
rfile = remotefile
self.__remote_json = {}
self.pd("Downloading '{}' as '{}'".format(rfile, localfile))
self.__current_file = localfile
#if self.__verify or self.__resumedownload:
self.pd("Getting info of remote file '{}' for later verification".format(rfile))
result = self.__get_file_info(rfile)
if result != ENoError:
return result
offset = 0
self.pd("Checking if we already have the copy locally")
if os.path.isfile(localfile):
self.pd("Same-name local file '{}' exists, checking if contents match".format(localfile))
self.__current_file_size = getfilesize(self.__current_file)
if ENoError == self.__verify_current_file(self.__remote_json, False):
self.pd("Same local file '{}' already exists, skip downloading".format(localfile))
return ENoError
else:
if not self.shalloverwrite("Same-name locale file '{}' exists but is different, ".format(localfile) + \
"do you want to overwrite it? [y/N]"):
pinfo("Same-name local file '{}' exists but is different, skip downloading".format(localfile))
return ENoError
if self.__resumedownload and \
self.__compare_size(self.__current_file_size, self.__remote_json) == 2:
# revert back at least one download chunk
pieces = self.__current_file_size // self.__dl_chunk_size
if pieces > 1:
offset = (pieces - 1) * self.__dl_chunk_size
elif os.path.isdir(localfile):
if not self.shalloverwrite("Same-name direcotry '{}' exists, ".format(localfile) + \
"do you want to remove it? [y/N]"):
pinfo("Same-name directory '{}' exists, skip downloading".format(localfile))
return ENoError
self.pv("Directory with the same name '{}' exists, removing ...".format(localfile))
result = removedir(localfile, self.Verbose)
if result == ENoError:
self.pv("Removed")
else:
perr("Error removing the directory '{}'".format(localfile))
return result
ldir, file = os.path.split(localfile)
if ldir and not os.path.exists(ldir):
result = makedir(ldir, verbose = self.Verbose)
if result != ENoError:
perr("Fail to make directory '{}'".format(ldir))
return result
return self.__downchunks(rfile, offset)
def downfile(self, remotefile, localpath = ''):
''' Usage: downfile <remotefile> [localpath] - \
download a remote file.
remotefile - remote file at Baidu Yun (after app root directory at Baidu Yun)
localpath - local path.
if it ends with '/' or '\\', it specifies the local direcotry
if it specifies an existing directory, it is the local direcotry
if not specified, the local direcotry is the current directory '.'
otherwise, it specifies the local file name
To stream a file using downfile, you can use the 'mkfifo' trick with omxplayer etc.:
mkfifo /tmp/omx
bypy.py downfile <remotepath> /tmp/omx &
omxplayer /tmp/omx
'''
localfile = localpath
if not localpath:
localfile = os.path.basename(remotefile)
elif localpath[-1] == '\\' or \
localpath[-1] == '/' or \
os.path.isdir(localpath):
#localfile = os.path.join(localpath, os.path.basename(remotefile))
localfile = joinpath(localpath, os.path.basename(remotefile))
else:
localfile = localpath
pcsrpath = get_pcs_path(remotefile)
return self.__downfile(pcsrpath, localfile)
def __stream_act_actual(self, r, args):
pipe, csize = args
with open(pipe, 'wb') as f:
for chunk in r.iter_content(chunk_size = csize):
if chunk: # filter out keep-alive new chunks
f.write(chunk)
f.flush()
# https://stackoverflow.com/questions/7127075/what-exactly-the-pythons-file-flush-is-doing
#os.fsync(f.fileno())
def __streaming_act(self, r, args):
return self.__stream_act_actual(r, args)
# NOT WORKING YET
def streaming(self, remotefile, localpipe, fmt = 'M3U8_480_360', chunk = 4 * OneM):
''' Usage: stream <remotefile> <localpipe> [format] [chunk] - \
stream a video / audio file converted to M3U format at cloud side, to a pipe.
remotefile - remote file at Baidu Yun (after app root directory at Baidu Yun)
localpipe - the local pipe file to write to
format - output video format (M3U8_320_240 | M3U8_480_224 | \
M3U8_480_360 | M3U8_640_480 | M3U8_854_480). Default: M3U8_480_360
chunk - chunk (initial buffering) size for streaming (default: 4M)
To stream a file, you can use the 'mkfifo' trick with omxplayer etc.:
mkfifo /tmp/omx
bypy.py downfile <remotepath> /tmp/omx &
omxplayer /tmp/omx
*** NOT WORKING YET ****
'''
pars = {
'method' : 'streaming',
'path' : get_pcs_path(remotefile),
'type' : fmt }
return self.__get(pcsurl + 'file', pars,
self.__streaming_act, (localpipe, chunk), stream = True)
def __walk_remote_dir_act(self, r, args):
dirjs, filejs = args
j = r.json()
#self.pd("Remote path content JSON: {}".format(j))
paths = j['list']
for path in paths:
if path['isdir']:
dirjs.append(path)
else:
filejs.append(path)
return ENoError
def __walk_remote_dir(self, remotepath, proceed, args = None):
pars = {
'method' : 'list',
'path' : remotepath,
'by' : 'name',
'order' : 'asc' }
# Python parameters are by-reference and mutable, so they are 'out' by default
dirjs = []
filejs = []
result = self.__get(pcsurl + 'file', pars, self.__walk_remote_dir_act, (dirjs, filejs))
self.pd("Remote dirs: {}".format(dirjs))
self.pd("Remote files: {}".format(filejs))
if result == ENoError:
subresult = proceed(remotepath, dirjs, filejs, args)
if subresult != ENoError:
self.pd("Error: {} while proceeding remote path'{}'".format(
subresult, remotepath))
result = subresult # we continue
for dirj in dirjs:
subresult = self.__walk_remote_dir(dirj['path'], proceed, args)
if subresult != ENoError:
self.pd("Error: {} while sub-walking remote dirs'{}'".format(
subresult, dirjs))
result = subresult
return result
def __prepare_local_dir(self, localdir):
result = ENoError
if os.path.isfile(localdir):
result = removefile(localdir, self.Verbose)
if result == ENoError:
if localdir and not os.path.exists(localdir):
result = makedir(localdir, verbose = self.Verbose)
return result
def __proceed_downdir(self, remotepath, dirjs, filejs, args):
result = ENoError
rootrpath, localpath = args
rlen = len(remotepath) + 1 # '+ 1' for the trailing '/', it bites.
rootlen = len(rootrpath) + 1 # ditto
result = self.__prepare_local_dir(localpath)
if result != ENoError:
perr("Fail to create prepare local directory '{}' for downloading, ABORT".format(localpath))
return result
for dirj in dirjs:
reldir = dirj['path'][rlen:]
#ldir = os.path.join(localpath, reldir)
ldir = joinpath(localpath, reldir)
result = self.__prepare_local_dir(ldir)
if result != ENoError:
perr("Fail to create prepare local directory '{}' for downloading, ABORT".format(ldir))
return result
for filej in filejs:
rfile = filej['path']
relfile = rfile[rootlen:]
#lfile = os.path.join(localpath, relfile)
lfile = joinpath(localpath, relfile)
self.__downfile(rfile, lfile)
return result
def downdir(self, remotepath = None, localpath = None):
''' Usage: downdir <remotedir> [localdir] - \
download a remote directory (recursively)
remotedir - remote directory at Baidu Yun (after app root directory at Baidu Yun)
localdir - local directory. if not specified, it is set to the current direcotry
'''
rpath = get_pcs_path(remotepath)
lpath = localpath
if not lpath:
lpath = '' # empty string does it, no need '.'
lpath = lpath.rstrip('/\\ ')
return self.__walk_remote_dir(rpath, self.__proceed_downdir, (rpath, lpath))
def __mkdir_act(self, r, args):
if self.Verbose:
j = r.json()
pr("path, ctime, mtime, fs_id")
pr("{path}, {ctime}, {mtime}, {fs_id}".format(**j))
return ENoError
def __mkdir(self, rpath, **kwargs):
# TODO: this is a quick patch
# the code still works because Baidu Yun doesn't require
# parent directory to exist remotely to upload / create a file
if not self.__shallinclude('.', rpath, True):
return ENoError
self.pd("Making remote directory '{}'".format(rpath))
pars = {
'method' : 'mkdir',
'path' : rpath }
return self.__post(pcsurl + 'file', pars, self.__mkdir_act, **kwargs)
def mkdir(self, remotepath):
''' Usage: mkdir <remotedir> - \
create a directory at Baidu Yun
remotedir - the remote directory
'''
rpath = get_pcs_path(remotepath)
return self.__mkdir(rpath)
def __move_act(self, r, args):
j = r.json()
list = j['extra']['list']
fromp = list[0]['from']
to = list[0]['to']
self.pd("Remote move: '{}' =mm-> '{}' OK".format(fromp, to))
# aliases
def mv(self, fromp, to):
return self.move(fromp, to)
def rename(self, fromp, to):
return self.move(fromp, to)
def ren(self, fromp, to):
return self.move(fromp, to)
def move(self, fromp, to):
''' Usage: move/mv/rename/ren <from> <to> - \
move a file / dir remotely at Baidu Yun
from - source path (file / dir)
to - destination path (file / dir)
'''
frompp = get_pcs_path(fromp)
top = get_pcs_path(to)
pars = {
'method' : 'move',
'from' : frompp,
'to' : top }
self.pd("Remote moving: '{}' =mm=> '{}'".format(fromp, to))
return self.__post(pcsurl + 'file', pars, self.__move_act)
def __copy_act(self, r, args):
j = r.json()
for list in j['extra']['list']:
fromp = list['from']
to = list['to']
self.pd("Remote copy: '{}' =cc=> '{}' OK".format(fromp, to))
return ENoError
# alias
def cp(self, fromp, to):
return self.copy(fromp, to)
def copy(self, fromp, to):
''' Usage: copy/cp <from> <to> - \
copy a file / dir remotely at Baidu Yun
from - source path (file / dir)
to - destination path (file / dir)
'''
frompp = get_pcs_path(fromp)
top = get_pcs_path(to)
pars = {
'method' : 'copy',
'from' : frompp,
'to' : top }
self.pd("Remote copying '{}' =cc=> '{}'".format(frompp, top))
return self.__post(pcsurl + 'file', pars, self.__copy_act)
def __delete_act(self, r, args):
rid = r.json()['request_id']
if rid:
pr("Deletion request '{}' OK".format(rid))
pr("Usage 'list' command to confirm")
return ENoError
else:
perr("Deletion failed")
return EFailToDeleteFile
def __delete(self, rpath):
pars = {
'method' : 'delete',
'path' : rpath }
self.pd("Remote deleting: '{}'".format(rpath))
return self.__post(pcsurl + 'file', pars, self.__delete_act)
# aliases
def remove(self, remotepath):
return self.delete(remotepath)
def rm(self, remotepath):
return self.delete(remotepath)
def delete(self, remotepath):
''' Usage: delete/remove/rm <remotepath> - \
delete a file / dir remotely at Baidu Yun
remotepath - destination path (file / dir)
'''
rpath = get_pcs_path(remotepath)
return self.__delete(rpath)
def __search_act(self, r, args):
print_pcs_list(r.json())
return ENoError
def search(self, keyword, remotepath = None, recursive = True):
''' Usage: search <keyword> [remotepath] [recursive] - \
search for a file using keyword at Baidu Yun
keyword - the keyword to search
remotepath - remote path at Baidu Yun, if not specified, it's app's root directory
resursive - search recursively or not. default is true
'''
rpath = get_pcs_path(remotepath)
pars = {
'method' : 'search',
'path' : rpath,
'wd' : keyword,
're' : '1' if str2bool(recursive) else '0'}
self.pd("Searching: '{}'".format(rpath))
return self.__get(pcsurl + 'file', pars, self.__search_act)
def __listrecycle_act(self, r, args):
print_pcs_list(r.json())
return ENoError
def listrecycle(self, start = 0, limit = 1000):
''' Usage: listrecycle [start] [limit] - \
list the recycle contents
start - starting point, default: 0
limit - maximum number of items to display. default: 1000
'''
pars = {
'method' : 'listrecycle',
'start' : str2int(start),
'limit' : str2int(limit) }
self.pd("Listing recycle '{}'")
return self.__get(pcsurl + 'file', pars, self.__listrecycle_act)
def __restore_act(self, r, args):
path = args
pr("'{}' found and restored".format(path))
return ENoError
def __restore_search_act(self, r, args):
path = args
flist = r.json()['list']
fsid = None
for f in flist:
if os.path.normpath(f['path'].lower()) == os.path.normpath(path.lower()):
fsid = f['fs_id']
self.pd("fs_id for restoring '{}' found".format(fsid))
break
if fsid:
pars = {
'method' : 'restore',
'fs_id' : fsid }
return self.__post(pcsurl + 'file', pars, self.__restore_act, path)
else:
perr("'{}' not found in the recycle bin".format(path))
def restore(self, remotepath):
''' Usage: restore <remotepath> - \
restore a file from the recycle bin
remotepath - the remote path to restore
'''
rpath = get_pcs_path(remotepath)
# by default, only 1000 items, more than that sounds a bit crazy
pars = {
'method' : 'listrecycle' }
self.pd("Searching for fs_id to restore")
return self.__get(pcsurl + 'file', pars, self.__restore_search_act, rpath)
def __proceed_local_gather(self, dirlen, walk):
#names.sort()
(dirpath, dirnames, filenames) = walk
files = []
for name in filenames:
#fullname = os.path.join(dirpath, name)
fullname = joinpath(dirpath, name)
files.append((name, getfilesize(fullname), md5(fullname)))
reldir = dirpath[dirlen:].replace('\\', '/')
place = self.__local_dir_contents.get(reldir)
for dir in dirnames:
place.add(dir, PathDictTree('D'))
for file in files:
place.add(file[0], PathDictTree('F', size = file[1], md5 = file[2]))
return ENoError
def __gather_local_dir(self, dir):
self.__local_dir_contents = PathDictTree()
for walk in os.walk(dir, followlinks=self.__followlink):
self.__proceed_local_gather(len(dir), walk)
self.pd(self.__local_dir_contents)
def __proceed_remote_gather(self, remotepath, dirjs, filejs, args = None):
# NOTE: the '+ 1' is due to the trailing slash '/'
# be careful about the trailing '/', it bit me once, bitterly
rootrdir = args
rootlen = len(rootrdir)
dlen = len(remotepath) + 1
for d in dirjs:
self.__remote_dir_contents.get(remotepath[rootlen:]).add(
d['path'][dlen:], PathDictTree('D', size = d['size'], md5 = binascii.unhexlify(d['md5'])))
for f in filejs:
self.__remote_dir_contents.get(remotepath[rootlen:]).add(
f['path'][dlen:], PathDictTree('F', size = f['size'], md5 = binascii.unhexlify(f['md5'])))
return ENoError
def __gather_remote_dir(self, rdir):
self.__remote_dir_contents = PathDictTree()
self.__walk_remote_dir(rdir, self.__proceed_remote_gather, rdir)
self.pd("---- Remote Dir Contents ---")
self.pd(self.__remote_dir_contents)
def __compare(self, remotedir = None, localdir = None):
if not localdir:
localdir = '.'
self.pv("Gathering local directory ...")
self.__gather_local_dir(localdir)
self.pv("Done")
self.pv("Gathering remote directory ...")
self.__gather_remote_dir(remotedir)
self.pv("Done")
self.pv("Comparing ...")
# list merge, where Python shines
commonsame = []
commondiff = []
localonly = []
remoteonly = []
# http://stackoverflow.com/questions/1319338/combining-two-lists-and-removing-duplicates-without-removing-duplicates-in-orig
lps = self.__local_dir_contents.allpath()
rps = self.__remote_dir_contents.allpath()
dps = set(rps) - set(lps)
allpath = lps + list(dps)
for p in allpath:
local = self.__local_dir_contents.get(p)
remote = self.__remote_dir_contents.get(p)
if local is None: # must be in the remote dir, since p is from allpath
remoteonly.append((remote.type, p))
elif remote is None:
localonly.append((local.type, p))
else: # all here
same = False
if local.type == 'D' and remote.type == 'D':
type = 'D'
same = True
elif local.type == 'F' and remote.type == 'F':
type = 'F'
if local.extra['size'] == remote.extra['size'] and \
local.extra['md5'] == remote.extra['md5']:
same = True
else:
same = False
else:
type = local.type + remote.type
same = False
if same:
commonsame.append((type, p))
else:
commondiff.append((type, p))
self.pv("Done")
return commonsame, commondiff, localonly, remoteonly
def compare(self, remotedir = None, localdir = None):
''' Usage: compare [remotedir] [localdir] - \
compare the remote direcotry with the local directory
remotedir - the remote directory at Baidu Yun (after app's direcotry). \
if not specified, it defaults to the root directory.
localdir - the local directory, if not specified, it defaults to the current directory.
'''
same, diff, local, remote = self.__compare(get_pcs_path(remotedir), localdir)
pr("==== Same files ===")
for c in same:
pr("{} - {}".format(c[0], c[1]))
pr("==== Different files ===")
for d in diff:
pr("{} - {}".format(d[0], d[1]))
pr("==== Local only ====")
for l in local:
pr("{} - {}".format(l[0], l[1]))
pr("==== Remote only ====")
for r in remote:
pr("{} - {}".format(r[0], r[1]))
pr("\nStatistics:")
pr("--------------------------------")
pr("Same: {}".format(len(same)));
pr("Different: {}".format(len(diff)));
pr("Local only: {}".format(len(local)));
pr("Remote only: {}".format(len(remote)));
def syncdown(self, remotedir = '', localdir = u'', deletelocal = False):
''' Usage: syncdown [remotedir] [localdir] [deletelocal] - \
sync down from the remote direcotry to the local directory
remotedir - the remote directory at Baidu Yun (after app's direcotry) to sync from. \
if not specified, it defaults to the root directory
localdir - the local directory to sync to if not specified, it defaults to the current directory.
deletelocal - delete local files that are not inside Baidu Yun direcotry, default is False
'''
result = ENoError
rpath = get_pcs_path(remotedir)
same, diff, local, remote = self.__compare(rpath, localdir)
# clear the way
for d in diff:
t = d[0]
p = d[1]
#lcpath = os.path.join(localdir, p) # local complete path
lcpath = joinpath(localdir, p) # local complete path
rcpath = rpath + '/' + p # remote complete path
if t == 'DF':
result = removedir(lcpath, self.Verbose)
subresult = self.__downfile(rcpath, lcpath)
if subresult != ENoError:
result = subresult
elif t == 'FD':
result = removefile(lcpath, self.Verbose)
subresult = makedir(lcpath, verbose = self.Verbose)
if subresult != ENoError:
result = subresult
else: # " t == 'F' " must be true
result = self.__downfile(rcpath, lcpath)
for r in remote:
t = r[0]
p = r[1]
#lcpath = os.path.join(localdir, p) # local complete path
lcpath = joinpath(localdir, p) # local complete path
rcpath = rpath + '/' + p # remote complete path
if t == 'F':
subresult = self.__downfile(rcpath, lcpath)
if subresult != ENoError:
result = subresult
else: # " t == 'D' " must be true
subresult = makedir(lcpath, verbose = self.Verbose)
if subresult != ENoError:
result = subresult
if str2bool(deletelocal):
for l in local:
# use os.path.isfile()/isdir() instead of l[0], because we need to check file/dir existence.
# as we may have removed the parent dir previously during the iteration
#p = os.path.join(localdir, l[1])
p = joinpath(localdir, l[1])
if os.path.isfile(p):
subresult = removefile(p, self.Verbose)
if subresult != ENoError:
result = subresult
elif os.path.isdir(p):
subresult = removedir(p, self.Verbose)
if subresult != ENoError:
result = subresult
return result
def syncup(self, localdir = u'', remotedir = '', deleteremote = False):
''' Usage: syncup [localdir] [remotedir] [deleteremote] - \
sync up from the local direcotry to the remote directory
localdir - the local directory to sync from if not specified, it defaults to the current directory.
remotedir - the remote directory at Baidu Yun (after app's direcotry) to sync to. \
if not specified, it defaults to the root directory
deleteremote - delete remote files that are not inside the local direcotry, default is False
'''
result = ENoError
rpath = get_pcs_path(remotedir)
#rpartialdir = remotedir.rstrip('/ ')
same, diff, local, remote = self.__compare(rpath, localdir)
# clear the way
for d in diff:
t = d[0] # type
p = d[1] # path
#lcpath = os.path.join(localdir, p) # local complete path
lcpath = joinpath(localdir, p) # local complete path
rcpath = rpath + '/' + p # remote complete path
if self.shalloverwrite("Do you want to overwrite '{}' at Baidu Yun? [y/N]".format(p)):
# this path is before get_pcs_path() since delete() expects so.
#result = self.delete(rpartialdir + '/' + p)
#result = self.__delete(rcpath)
self.pd("diff type: {}".format(t))
self.__isrev = True
if t != 'F':
result = self.move(remotedir + '/' + p, remotedir + '/' + p + '.moved_by_bypy.' + time.strftime("%Y%m%d%H%M%S"))
self.__isrev = False
if t == 'F' or t == 'FD':
subresult = self.__upload_file(lcpath, rcpath)
if subresult != ENoError:
result = subresult
else: # " t == 'DF' " must be true
subresult = self.__mkdir(rcpath)
if subresult != ENoError:
result = subresult
else:
pinfo("Uploading '{}' skipped".format(lcpath))
for l in local:
t = l[0]
p = l[1]
#lcpath = os.path.join(localdir, p) # local complete path
lcpath = joinpath(localdir, p) # local complete path
rcpath = rpath + '/' + p # remote complete path
self.pd("local type: {}".format(t))
self.__isrev = False
if t == 'F':
subresult = self.__upload_file(lcpath, rcpath)
if subresult != ENoError:
result = subresult
else: # " t == 'D' " must be true
subresult = self.__mkdir(rcpath)
if subresult != ENoError:
result = subresult
if str2bool(deleteremote):
# i think the list is built top-down, so directories appearing later are either
# children or another set of directories
pp = '\\' # previous path, setting to '\\' make sure it won't be found in the first step
for r in remote:
#p = rpartialdir + '/' + r[1]
p = rpath + '/' + r[1]
if 0 != p.find(pp): # another path
#subresult = self.delete(p)
subresult = self.__delete(p)
if subresult != ENoError:
result = subresult
pp = p
return result
def dumpcache(self):
''' Usage: dumpcache - display file hash cache'''
if cached.cacheloaded:
#pprint.pprint(cached.cache)
MyPrettyPrinter().pprint(cached.cache)
return ENoError
else:
perr("Cache not loaded.")
return ECacheNotLoaded
def cleancache(self):
''' Usage: cleancache - remove invalid entries from hash cache file'''
if os.path.exists(HashCachePath):
try:
# backup first
backup = HashCachePath + '.lastclean'
shutil.copy(HashCachePath, backup)
self.pd("Hash Cache file '{}' backed up as '{}".format(
HashCachePath, backup))
cached.cleancache()
return ENoError
except:
perr("Exception:\n{}".format(traceback.format_exc()))
return EException
else:
return EFileNotFound
OriginalFloatTime = True
def onexit(retcode = ENoError):
# saving is the most important
# we save, but don't clean, why?
# think about unmount path, moved files,
# once we discard the information, they are gone.
# so unless the user specifically request a clean,
# we don't act too smart.
#cached.cleancache()
cached.savecache()
os.stat_float_times(OriginalFloatTime)
# if we flush() on Ctrl-C, we get
# IOError: [Errno 32] Broken pipe
sys.stdout.flush()
sys.exit(retcode)
def sighandler(signum, frame):
pr("Signal {} received, Abort".format(signum))
pr("Stack:\n")
traceback.print_stack(frame)
onexit(EAbort)
def main(argv=None): # IGNORE:C0111
''' Main Entry '''
# *** IMPORTANT ***
# We must set this in order for cache to work,
# as we need to get integer file mtime, which is used as the key of Hash Cache
global OriginalFloatTime
OriginalFloatTime = os.stat_float_times()
os.stat_float_times(False)
# --- IMPORTANT ---
result = ENoError
if argv is None:
argv = sys.argv
else:
sys.argv.extend(argv)
if sys.platform == 'win32':
#signal.signal(signal.CTRL_C_EVENT, sighandler)
#signal.signal(signal.CTRL_BREAK_EVENT, sighandler)
# bug, see: http://bugs.python.org/issue9524
pass
else:
signal.signal(signal.SIGBUS, sighandler)
signal.signal(signal.SIGHUP, sighandler)
# https://stackoverflow.com/questions/108183/how-to-prevent-sigpipes-or-handle-them-properly
signal.signal(signal.SIGPIPE, signal.SIG_IGN)
signal.signal(signal.SIGQUIT, sighandler)
signal.signal(signal.SIGSYS, sighandler)
signal.signal(signal.SIGABRT, sighandler)
signal.signal(signal.SIGFPE, sighandler)
signal.signal(signal.SIGILL, sighandler)
signal.signal(signal.SIGINT, sighandler)
signal.signal(signal.SIGSEGV, sighandler)
signal.signal(signal.SIGTERM, sighandler)
#program_name = os.path.basename(sys.argv[0])
program_version = "v%s" % __version__
program_version_message = '%%(prog)s %s' % (program_version )
shortdesc = __import__('__main__').__doc__.split("\n")[1]
shortdesc = program_version_message + ' -- ' + shortdesc.split('--')[1]
program_shortdesc = shortdesc
program_longdesc = __import__('__main__').__doc__.split("---\n")[1]
try:
# +++ DEPRECATED +++
# check if ApiKey, SecretKey and AppPcsPath are correctly specified.
#if not ApiKey or not SecretKey or not AppPcsPath:
if False:
ApiNotConfigured = '''
*** ABORT *** Baidu API not properly configured
- Please go to 'http://developer.baidu.com/' and create an application.
- Get the ApiKey, SecretKey and configure the App Path (default: '/apps/bypy/')
- Update the corresponding variables at the beginning of this file, \
right after the '# PCS configuration constants' comment.
- Try to run this program again
*** ABORT ***
'''
pr(ApiNotConfigured)
return EApiNotConfigured
# --- DEPRECATED ---
# setup argument parser
epilog = "Commands:\n"
summary = []
for k, v in ByPy.__dict__.items():
if callable(v) and v.__doc__:
help = v.__doc__.strip()
pos = help.find(ByPy.HelpMarker)
if pos != -1:
pos_body = pos + len(ByPy.HelpMarker)
helpbody = help[pos_body:]
helpline = helpbody.split('\n')[0].strip() + '\n'
if helpline.find('help') == 0:
summary.insert(0, helpline)
else:
summary.append(helpline)
remaining = summary[1:]
remaining.sort()
summary = [summary[0]] + remaining
epilog += ''.join(summary)
parser = ArgumentParser(
description=program_shortdesc + '\n\n' + program_longdesc,
formatter_class=RawDescriptionHelpFormatter, epilog=epilog)
# special
parser.add_argument("--TESTRUN", dest="TESTRUN", action="store_true", help="Perform python doctest")
parser.add_argument("--PROFILE", dest="PROFILE", action="store_true", help="Profile the code")
# help, version, program information etc
parser.add_argument('-V', '--version', action='version', version=program_version_message)
#parser.add_argument(dest="paths", help="paths to folder(s) with source file(s) [default: %(default)s]", metavar="path", nargs='+')
# debug, logging
parser.add_argument("-d", "--debug", dest="debug", action="count", default=0, help="enable debugging & logging [default: %(default)s]")
parser.add_argument("-v", "--verbose", dest="verbose", default=0, action="count", help="set verbosity level [default: %(default)s]")
# program tunning, configration (those will be passed to class ByPy)
parser.add_argument("-r", "--retry", dest="retry", default=5, help="number of retry attempts on network error [default: %(default)i times]")
parser.add_argument("-q", "--quit-when-fail", dest="quit", default=False, help="quit when maximum number of retry failed [default: %(default)s]")
parser.add_argument("-t", "--timeout", dest="timeout", default=60, help="network timeout in seconds [default: %(default)s]")
parser.add_argument("-s", "--slice", dest="slice", default=DefaultSliceSize, help="size of file upload slice (can use '1024', '2k', '3MB', etc) [default: {} MB]".format(DefaultSliceInMB))
parser.add_argument("--chunk", dest="chunk", default=DefaultDlChunkSize, help="size of file download chunk (can use '1024', '2k', '3MB', etc) [default: {} MB]".format(DefaultDlChunkSize / OneM))
parser.add_argument("-e", "--verify", dest="verify", action="store_true", default=False, help="Verify upload / download [default : %(default)s]")
parser.add_argument("-f", "--force-hash", dest="forcehash", action="store_true", help="force file MD5 / CRC32 calculation instead of using cached value")
parser.add_argument("-l", "--list-file", dest="listfile", default=None, help="input list file (used by some of the commands only [default: %(default)s]")
parser.add_argument("--resume-download", dest="resumedl", default=True, help="resume instead of restarting when downloading if local file already exists [default: %(default)s]")
parser.add_argument("--include-regex", dest="incregex", default='', help="regular expression of files to include. if not specified (default), everything is included. for download, the regex applies to the remote files; for upload, the regex applies to the local files. to exclude files, think about your regex, some tips here: https://stackoverflow.com/questions/406230/regular-expression-to-match-string-not-containing-a-word [default: %(default)s]")
parser.add_argument("--on-dup", dest="ondup", default='overwrite', help="what to do when the same file / folder exists in the destination: 'overwrite', 'skip', 'prompt' [default: %(default)s]")
parser.add_argument("--no-symlink", dest="followlink", action="store_false", help="DON'T follow symbol links when uploading / syncing up")
parser.add_argument(DisableSslCheckOption, dest="checkssl", action="store_false", help="DON'T verify host SSL cerificate")
parser.add_argument(CaCertsOption, dest="cacerts", help="Specify the path for CA Bundle [default: %(default)s]")
parser.add_argument("--mirror", dest="mirror", default=None, help="Specify the PCS mirror (e.g. bj.baidupcs.com. Open 'https://pcs.baidu.com/rest/2.0/pcs/manage?method=listhost' to get the list) to use.")
parser.add_argument("--rapid-upload-only", dest="rapiduploadonly", action="store_true", help="Only upload large files that can be rapidly uploaded")
# action
parser.add_argument(CleanOptionShort, CleanOptionLong, dest="clean", action="count", default=0, help="1: clean settings (remove the token file) 2: clean settings and hash cache [default: %(default)s]")
# the MAIN parameter - what command to perform
parser.add_argument("command", nargs='*', help = "operations (quota / list)")
# Process arguments
args = parser.parse_args()
if args.mirror:
global pcsurl
global cpcsurl
global dpcsurl
pcsurl = re.sub(r'//.*?/', '//' + args.mirror + '/', pcsurl)
cpcsurl = pcsurl
dpcsurl = pcsurl
try:
slice_size = interpret_size(args.slice)
except (ValueError, KeyError):
pr("Error: Invalid slice size specified '{}'".format(args.slice))
return EArgument
try:
chunk_size = interpret_size(args.chunk)
except (ValueError, KeyError):
pr("Error: Invalid slice size specified '{}'".format(args.slice))
return EArgument
if args.TESTRUN:
return TestRun()
if args.PROFILE:
return Profile()
pr("Token file: '{}'".format(TokenFilePath))
pr("Hash Cache file: '{}'".format(HashCachePath))
pr("App root path at Baidu Yun '{}'".format(AppPcsPath))
pr("sys.stdin.encoding = {}".format(sys.stdin.encoding))
pr("sys.stdout.encoding = {}".format(sys.stdout.encoding))
pr("sys.stderr.encoding = {}".format(sys.stderr.encoding))
if args.verbose > 0:
pr("Verbose level = {}".format(args.verbose))
pr("Debug = {}".format(args.debug))
pr("----\n")
if os.path.exists(HashCachePath):
cachesize = getfilesize(HashCachePath)
if cachesize > 10 * OneM or cachesize == -1:
pr((
"*** WARNING ***\n"
"Hash Cache file '{0}' is very large ({1}).\n"
"This may affect program's performance (high memory consumption).\n"
"You can first try to run 'bypy.py cleancache' to slim the file.\n"
"But if the file size won't reduce (this warning persists),"
" you may consider deleting / moving the Hash Cache file '{0}'\n"
"*** WARNING ***\n\n\n").format(HashCachePath, human_size(cachesize)))
if args.clean >= 1:
result = removefile(TokenFilePath, args.verbose)
if result == ENoError:
pr("Token file '{}' removed. You need to re-authorize "
"the application upon next run".format(TokenFilePath))
else:
perr("Failed to remove the token file '{}'".format(TokenFilePath))
perr("You need to remove it manually")
if args.clean >= 2:
subresult = os.remove(HashCachePath)
if subresult == ENoError:
pr("Hash Cache File '{}' removed.".format(HashCachePath))
else:
perr("Failed to remove the Hash Cache File '{}'".format(HashCachePath))
perr("You need to remove it manually")
result = subresult
return result
if len(args.command) <= 0 or \
(len(args.command) == 1 and args.command[0].lower() == 'help'):
parser.print_help()
return EArgument
elif args.command[0] in ByPy.__dict__: # dir(ByPy), dir(by)
timeout = None
if args.timeout:
timeout = float(args.timeout)
cached.usecache = not args.forcehash
cached.verbose = args.verbose
cached.debug = args.debug
cached.loadcache()
by = ByPy(slice_size = slice_size, dl_chunk_size = chunk_size,
verify = args.verify,
retry = int(args.retry), timeout = timeout,
quit_when_fail = args.quit,
listfile = args.listfile,
resumedownload = args.resumedl,
incregex = args.incregex,
ondup = args.ondup,
followlink = args.followlink,
checkssl = args.checkssl,
cacerts = args.cacerts,
rapiduploadonly = args.rapiduploadonly,
verbose = args.verbose, debug = args.debug)
uargs = []
for arg in args.command[1:]:
uargs.append(unicode(arg, SystemEncoding))
result = getattr(by, args.command[0])(*uargs)
else:
pr("Error: Command '{}' not available.".format(args.command[0]))
parser.print_help()
return EParameter
except KeyboardInterrupt:
### handle keyboard interrupt ###
pr("KeyboardInterrupt")
pr("Abort")
except Exception:
perr("Exception occurred:")
pr(traceback.format_exc())
pr("Abort")
# raise
onexit(result)
def TestRun():
import doctest
doctest.testmod()
return ENoError
def Profile():
import cProfile
import pstats
profile_filename = 'bypy_profile.txt'
cProfile.run('main()', profile_filename)
statsfile = open("profile_stats.txt", "wb")
p = pstats.Stats(profile_filename, stream=statsfile)
stats = p.strip_dirs().sort_stats('cumulative')
stats.print_stats()
statsfile.close()
sys.exit(ENoError)
def unused():
''' just prevent unused warnings '''
inspect.stack()
if __name__ == "__main__":
main()
# vim: tabstop=4 noexpandtab shiftwidth=4 softtabstop=4 ff=unix fileencoding=utf-8
|
Zocalos/bypy
|
bypy.py
|
Python
|
mit
| 114,113
|
[
"VisIt"
] |
9e7d1f3c80577443111ee8af1385af8f952fc274996a1fd21835bd6637a4fbc7
|
#!/usr/bin/env python
#Copyright 2013 Sandia Corporation. Under the terms of Contract DE-AC04-94AL85000, there is a non-exclusive license for use of this work by or on behalf of the U.S. Government. Export of this program may require a license from the United States Government.
import pymongo
import sys, string, time
import unicodedata
import datetime
from vtk import *
from titan.TextAnalysis import *
from titan.DataAnalysis import *
from titan.MachineLearning import *
if len (sys.argv) < 2:
print "Usage: " + sys.argv[0] + " database"
exit ()
from optparse import OptionParser
parser = OptionParser ()
parser.add_option ("-d", "--db", dest="database",
help="Specify the mongo database to work with")
parser.add_option ("-k", "--clusters", dest="clusters", type="int", default=5,
help="Specify the number of user groups to find (default 5)")
(options, args) = parser.parse_args ()
if (options.database == None):
print "You must specify a database. Use -h for help"
exit ();
connection = pymongo.Connection ()
db = connection[options.database]
models = db['models']
phcModel = models.find_one({'model' : 'phc'})
if (len (phcModel['initial']) == 0):
print "Cluster model is too small"
exit ()
userTable = vtkTable ()
for i in range (0, len (phcModel['initial'])):
column = vtkDoubleArray ()
column.SetName (str(i))
userTable.AddColumn (column)
users = db['users']
posts = db['posts']
progress = 0
delta = 1.0/float(users.count ())
next_report = 0.1
for user in users.find ():
progress += delta
if progress > next_report:
sys.stdout.write (".")
sys.stdout.flush ()
next_report += 0.1
clusterValues = [0]*len (phcModel['initial'])
for post in posts.find ({'name' : string.lower (user['name'])}):
cluster = int(post['cluster_assignment'][0])
clusterValues[cluster] += post['cluster_proximity'][0]
user['post_clusters'] = clusterValues
users.update ({'_id' : user['_id']}, user)
for i in range(0, len(clusterValues)):
userTable.GetColumn (i).InsertNextValue (clusterValues[i])
cluster = vtkPHClustering ()
cluster.SetInputData (userTable)
cluster.SetNumberOfClusters (options.clusters)
cluster.SetNumberOfTrials (10)
print "Processing UPC"
cluster.Update ()
clusterTable = cluster.GetOutput (0)
hierarchicalTable = cluster.GetOutput (1)
assignmentTable = cluster.GetOutput (2)
aArr = assignmentTable.GetColumn (0);
assignmentsPython = [
[ aArr.GetComponent (i, j) for j in range (0, aArr.GetNumberOfComponents ()) ]
for i in range (0, aArr.GetNumberOfTuples ()) ]
apArr = assignmentTable.GetColumn (1);
assignmentProxPython = [
[ apArr.GetComponent (i, j) for j in range (0, apArr.GetNumberOfComponents ()) ]
for i in range (0, apArr.GetNumberOfTuples ()) ]
doc = 0
for user in users.find ():
if (doc >= len(assignmentsPython)):
break
user['cluster_assignment'] = assignmentsPython[doc]
user['cluster_proximity'] = assignmentProxPython[doc]
users.update ({'_id' : user['_id']}, user)
doc += 1
if (doc != len(assignmentsPython)):
print "Error assignments and users don't match " + str(doc) + " " + str(len(assignmentsPython))
exit ()
cArr = clusterTable.GetColumn (0)
clusters = [
[ cArr.GetComponent (i, j) for j in range (0, cArr.GetNumberOfComponents ()) ]
for i in range (0, cArr.GetNumberOfTuples ()) ]
pArr = hierarchicalTable.GetColumn (1);
clusterProximities = [
[ pArr.GetComponent (i, j) for j in range (0, pArr.GetNumberOfComponents ()) ]
for i in range (0, pArr.GetNumberOfTuples ()) ]
clusterModel = models.find_one({'model' : 'upc'})
if (clusterModel == None):
clusterModel = {'model': 'upc',
'updated': datetime.datetime.utcnow (),
'cluster': clusters,
'proximities': clusterProximities }
print "Inserting a new UPC model"
models.insert (clusterModel)
else:
updatedModel = {'model': 'upc',
'updated': datetime.datetime.utcnow (),
'cluster': clusters,
'proximities': clusterProximities }
print "Updating the UPC model"
models.update ({'model' : 'upc'}, updatedModel)
for model in models.find ({'model' : 'upc'}):
print "model: " + model['model'] + " updated: " + str(model['updated'])
|
sandialabs/grandmaster
|
processing/updateUPCmodel.py
|
Python
|
apache-2.0
| 4,285
|
[
"VTK"
] |
0636796ebb85ebc03a9e357b9692654d9604e4a6f0f2279700fcd229b9821aa2
|
###
### This script demonstrates the use of the new generic cinema io api to create
### a small dataset based on the disk_out_ref Exodus II file. Three contours
### are created and images are captured from a small set of camera angles. Due
### to the fact that all the data array values are radially symmetric, we chose
### to color by cellNormals (y component), so that it is clear when rotating the
### the dataset in the cinema UI that images are indeed different at each value
### of phi.
###
### To use this script, follow the command example below, after changing the
### variable "outputDirectory" to a path that makes sense for your system.
###
### Below is an example command we have used to run this script. We typically
### use pvpython so that the ParaView imports will be satisfied, then we prepend
### the PYTHONPATH so that the genericCinemaIO imports will be satisfied:
###
### PYTHONPATH=/home/scott/projects/genericCinemaIO /home/scott/projects/ParaView/build-make-debug/bin/pvpython /home/scott/projects/cinema/scripts/data_generation/generateSimpleDiskOut.py
###
import os
# ParaView imports
from paraview.simple import *
# Cinema imports
from cinema_store import *
import pv_explorers
# -----------------------------------------------------------------------------
# Configure input/output
# -----------------------------------------------------------------------------
inputFile = '/home/scott/projects/ParaViewData/Data/disk_out_ref.ex2'
outputDirectory = '/home/scott/Documents/cinemaDemo/simpleCinemaWebGL/tiny-diskout'
if not os.path.exists(outputDirectory):
os.makedirs(outputDirectory)
# -----------------------------------------------------------------------------
# Customize a view
# -----------------------------------------------------------------------------
view = GetRenderView()
view.Background = [1.0, 1.0, 1.0]
view.OrientationAxesVisibility = 0
view.CenterAxesVisibility = 0
# -----------------------------------------------------------------------------
# Create some Cinema settings
# -----------------------------------------------------------------------------
resolution = 500
center_of_rotation = [0.0, 0.0, 0.0]
rotation_axis = [0.0, 0.0, 1.0]
distance = 45.0
phis = range(0, 360, 60)
thetas = range(-60, 61, 30)
iso_values = [ 300.0, 600.0, 900.0 ]
# -----------------------------------------------------------------------------
# Create a pipeline including an exodus reader and a Contour filter
# -----------------------------------------------------------------------------
disk_out_refex2 = ExodusIIReader(FileName=[inputFile])
disk_out_refex2.PointVariables = ['Temp', 'Pres', 'AsH3', 'GaMe3', 'CH4', 'H2']
disk_out_refex2.NodeSetArrayStatus = []
disk_out_refex2.SideSetArrayStatus = []
disk_out_refex2.ElementBlocks = ['Unnamed block ID: 1 Type: HEX8']
# get color transfer function/color map for 'cellNormals'
lut = GetColorTransferFunction('cellNormals')
lut.RGBPoints = [-0.9961946606636047, 0.231373, 0.298039, 0.752941, 0.0, 0.865003, 0.865003, 0.865003, 0.9961946606636047, 0.705882, 0.0156863, 0.14902]
lut.ScalarRangeInitialized = 1.0
lut.VectorComponent = 1
lut.VectorMode = 'Component'
pwf = GetOpacityTransferFunction('cellNormals')
pwf.Points = [-0.9961946606636047, 0.0, 0.5, 0.0, 0.9961946606636047, 1.0, 0.5, 0.0]
pwf.ScalarRangeInitialized = 1
contourFilter = Contour(
Input=disk_out_refex2,
PointMergeMethod="Uniform Binning",
ContourBy = ['POINTS', 'Temp'],
Isosurfaces = [300.0],
ComputeScalars = 1)
representation = Show(contourFilter, view)
representation.ColorArrayName = ['CELLS', 'cellNormals']
representation.LookupTable = lut
representation.ColorArrayName = colorByVariableName
# -----------------------------------------------------------------------------
# Configure Cinema data export settings
# -----------------------------------------------------------------------------
# This camera will take care of rotating around the object
cam = pv_explorers.Camera(center_of_rotation, rotation_axis, distance, view)
# Create the Cinema filestore
fng = FileStore(os.path.join(outputDirectory, 'info.json'))
# Configure the name pattern that governs how the images are placed into the
# directory hierarchy
fng.filename_pattern = "{iso}/{phi}/{theta}/image.png"
# Tell the filestore about the possible values for each parameter
fng.add_descriptor('iso', make_cinema_descriptor_properties('iso', range(len(iso_values))))
fng.add_descriptor('theta', make_cinema_descriptor_properties('theta', thetas))
fng.add_descriptor('phi', make_cinema_descriptor_properties('phi', phis))
# -----------------------------------------------------------------------------
# Data exploration loop
# -----------------------------------------------------------------------------
# Now do the exploration by manually iterating over all parameters
for iso in range(len(iso_values)):
# Set the current contour value on the contour filter
contourFilter.Isosurfaces = [ iso_values[iso] ]
# Then capture an image from every configured camera angle
for phi in phis:
for theta in thetas:
# Make an entry in the database and retrieve the filename for the
# current set of parameter values
doc = Document({'iso':iso,'phi':phi,'theta':theta})
# Move the camera to the current location
cam.execute(doc)
# Get the filename, given the current parameter state
fn = fng.get_filename(doc)
fng.insert(doc)
# Triggers the pipeline and then writes the resulting image
WriteImage(fn)
# Generate metadata
fng.add_metadata({'type':'parametric-image-stack'})
fng.save()
|
Kitware/cinema
|
scripts/data_generation/generateSimpleDiskOut.py
|
Python
|
bsd-3-clause
| 5,767
|
[
"ParaView"
] |
564b2599f0193292c6f9b78ec17231d60b974712fd0e570f3987f59802eb4eb5
|
#Caoimhe Harvey
from skyscanner.skyscanner import Flights
flights_service = Flights('<Your API Key>')
# http://stackoverflow.com/questions/7047790/how-can-i-input-data-into-a-webpage-to-scrape-the-resulting-output-using-python
#QPX Express API Key
QAPI_key = 'AIzaSyC74E3Vu_dY0ZfxMIhQlXonC8yklxhVYqU'
#user_airports get's the airport list from the user of the
#airport codes they wish to visit and stores in an array
user_airports = []
#end_route stores the final key (airport) value (cost) pair
#with the best way to get from A to B
end_route = {}
#index for starting point
start_point = 2
#controls input
get_airports = False;
date_interval = input("Enter the average length of time you will spend at each destination: ")
start_date = input("Enter the start date of this journey: ")
while get_airports == False:
airport = input("Input the Airport codes to where you'd like to go: ")
if (airport == 'N'):
get_airports = True;
else:
print("\nTo exit enter 'N'")
user_airports.append(airport)
print (user_airports)
end = best_route(user_start, user_airports)
#output final route and cost
for key, value in end_route.items() :
print (key, value)
#algorithm to find the best route
def best_route(index, array):
temp = {}
for i in array:
#need to create getCost function taking Airport code as parameter
#destCost = getCost(index, array[i])
#search
result = flights_service.get_result(
currency='EUR',
locale='en-GB',
originplace= index + '-sky',
destinationplace= array[i] + '-sky',
outbounddate = start_date,
adults=1).parsed
print(result)
temp.update({array[i]:result})
#get minimum cost from temp dictionary here
minAirport = min(temp, key = temp.get)
#add minimum cost to end_route
end_route.update({minAirport : temp[minAirport]})
user_airports.remove(array[index])
start_route = minAirport
temp.clear()
if (len(user_airports) != 0):
best_route(start_route, user_airports)
|
caoimheharvey/Backpacking_Solution
|
tsp_backpacking.py
|
Python
|
mit
| 2,128
|
[
"VisIt"
] |
403f247f897c74a0804ea9d5a891878a426bced3fb3243ee41765e78e8b25152
|
# Copyright (c) 2018, Henrique Miranda
# All rights reserved.
#
# This file is part of the yambopy project
#
from yambopy import *
from netCDF4 import Dataset
class YamboStaticScreeningDB(object):
"""
Class to handle static screening databases from Yambo
This reads the databases ``ndb.em1s*``
There :math:`v\chi(\omega=0)` is stored.
To calculate epsilon (static dielectric function) we do:
.. math::
\epsilon^{-1} = 1-v\chi
"""
def __init__(self,save='.',filename='ndb.em1s',db1='ns.db1'):
self.save = save
self.filename = filename
#read the lattice paramaters
try:
#posibilities where to find db1
for filename in ['%s/%s'%(save,db1)]:#,'%s/../SAVE/%s'%(save,db1)]:
if os.path.isfile(filename):
break
database = Dataset(filename, 'r')
self.alat = database.variables['LATTICE_PARAMETER'][:]
self.lat = database.variables['LATTICE_VECTORS'][:].T
self.volume = np.linalg.det(self.lat)
except:
raise IOError("Error opening %s in YamboStaticScreeningDB"%filename)
#read em1s database
try:
database = Dataset("%s/%s"%(self.save,self.filename), 'r')
except:
raise IOError("Error opening %s/%s in YamboStaticScreeningDB"%(self.save,self.filename))
#read some parameters
size,nbands,eh = database.variables['X_PARS_1'][:3]
self.size = int(size)
self.nbands = int(nbands)
self.eh = eh
#read gvectors
gvectors = np.rint(database.variables['X_RL_vecs'][:].T)
self.gvectors = np.array([g/self.alat for g in gvectors])
self.ngvectors = len(self.gvectors)
#read q-points
qpoints = database.variables['HEAD_QPT'][:].T
self.qpoints = np.array([q/self.alat for q in qpoints])
self.nqpoints = len(self.qpoints)
#are we usign coulomb cutoff?
self.cutoff = "".join(database.variables['CUTOFF'][:][0]).strip()
self.readDBs()
def readDBs(self):
"""
Read the yambo databases
"""
#create database to hold all the X data
self.X = np.zeros([self.nqpoints,self.size,self.size],dtype=np.complex64)
for nq in range(self.nqpoints):
#open database for each k-point
filename = "%s/%s_fragment_%d"%(self.save,self.filename,nq+1)
try:
database = Dataset(filename)
except:
print("warning: failed to read %s"%filename)
#static screening means we have only one frequency
# this try except is because the way this is sotored has changed in yambo
try:
re, im = database.variables['X_Q_%d'%(nq+1)][0,:]
except:
re, im = database.variables['X_Q_%d'%(nq+1)][0,:].T
self.X[nq] = re + 1j*im
#close database
database.close()
def saveDBS(self,path):
"""
Save the database
"""
if os.path.isdir(path): shutil.rmtree(path)
os.mkdir(path)
#copy all the files
oldpath = self.save
filename = self.filename
shutil.copyfile("%s/%s"%(oldpath,filename),"%s/%s"%(path,filename))
for nq in range(self.nqpoints):
fname = "%s_fragment_%d"%(filename,nq+1)
shutil.copyfile("%s/%s"%(oldpath,fname),"%s/%s"%(path,fname))
#edit with the new wfs
X = self.X
for nq in range(self.nqpoints):
fname = "%s_fragment_%d"%(filename,nq+1)
database = Dataset("%s/%s"%(path,fname),'r+')
database.variables['X_Q_%d'%(nq+1)][0,0,:] = X[nq].real
database.variables['X_Q_%d'%(nq+1)][0,1,:] = X[nq].imag
database.close()
def writetxt(self,filename='em1s.dat',ng1=0,ng2=0,volume=False):
"""
Write vVepsilon_{g1=0,g2=0} (q) as a funciton of |q| on a text file
volume -> multiply by the volume
"""
x,y = self._geteq(ng1=ng1,ng2=ng2,volume=volume)
np.savetxt(filename,np.array([x,y]).T)
def get_g_index(self,g):
"""
get the index of the gvectors.
If the gvector is not present return None
"""
for ng,gvec in enumerate(self.gvectors):
if np.isclose(g,gvec).all():
return ng
return None
def _geteq(self,volume=False):
"""
Get epsilon_{0,0} = [1/(1+vX)]_{0,0} a function of |q|
vX is a matrix with size equal to the number of local fields components
In the database we find vX(\omega=0) where:
v -> coulomb interaction (truncated or not)
X -> electronic response function
Arguments:
ng1, ng2 -> Choose local field components
volume -> Normalize with the volume of the cell
"""
x = [np.linalg.norm(q) for q in self.qpoints]
y = [np.linalg.inv(np.eye(self.ngvectors)+xq)[0,0] for xq in self.X ]
#order according to the distance
x, y = list(zip(*sorted(zip(x, y))))
y = np.array(y)
#scale by volume?
if volume: y *= self.volume
return x,y
def _getvxq(self,ng1=0,ng2=0,volume=False):
"""
Get vX_{ng1,ng2} a function of |q|
vX is a matrix with size equal to the number of local fields components
In the database we find vX(\omega=0) where:
v -> coulomb interaction (truncated or not)
X -> electronic response function
Arguments:
ng1, ng2 -> Choose local field components
volume -> Normalize with the volume of the cell
"""
x = [np.linalg.norm(q) for q in self.qpoints]
y = [xq[ng2,ng1] for xq in self.X ]
#order according to the distance
x, y = list(zip(*sorted(zip(x, y))))
y = np.array(y)
#scale by volume?
if volume: y *= self.volume
return x,y
def plot(self,ax,volume=False,**kwargs):
"""
Plot the static screening as a function of |q|
Arguments
ax -> Instance of the matplotlib axes or some other object with the plot method
func -> Function to apply to the dielectric function
"""
#get vX_{00}
x,vX = self._getvxq(volume=volume)
#when plotting we apply a funciton to epsilon to represent it, by default the |x|
ax.plot(x,(1+vX).real,**kwargs)
ax.set_xlabel('$|q|$')
ax.set_ylabel('$\epsilon^{-1}_{00}(\omega=0)$')
def __str__(self):
s = ""
s += "nqpoints: %d\n"%self.nqpoints
s += "X size: %d\n"%self.size
s += "cutoff: %s\n"%self.cutoff
return s
if __name__ == "__main__":
ys = YamboStaticScreeningDB()
print(ys)
#plot static screening
ax = plt.gca()
ys.plot(ax)
plt.show()
|
henriquemiranda/yambo-py
|
yambopy/dbs/em1sdb.py
|
Python
|
bsd-3-clause
| 7,055
|
[
"Yambo"
] |
5ab3a2576f5bddcd8e79227f7c2bcdd907e7f6ffb0a89b8136f1579b3c2161ca
|
#!/usr/bin/env python -u
#############################################################################################
#
# Python script to demonstrate interacting with CASDA's TAP and SODA implementations to
# retrieve cutout images in bulk.
#
# This script does a TAP query to get the image cubes for a given scheduling block, and can be
# configured to either:
# a) conduct a second TAP query to identify catalogue entries of interest, and create an async
# job to download cutouts at the RA and DEC of each of the catalogue entries.
# b) create an async job to download the entire image cube file.
#
# Author: Amanda Helliwell on 16 Dec 2015
#
# Written for python 2.7
# Note: astropy is available on galaxy via 'module load astropy'
# On other machines, try Anaconda https://www.continuum.io/downloads
#
#############################################################################################
from __future__ import print_function, division, unicode_literals
import argparse
import os
from astropy.io.votable import parse
import casda
def parseargs():
"""
Parse the command line arguments
:return: An args map with the parsed arguments
"""
parser = argparse.ArgumentParser(
description="Generate and download cutouts around each source identified in a scheduling block.")
parser.add_argument("opal_username",
help="Your user name on the ATNF's online proposal system (normally an email address)")
parser.add_argument("-p", "--opal_password", help="Your password on the ATNF's online proposal system")
parser.add_argument("--password_file", help="The file holding your password for the ATNF's online proposal system")
parser.add_argument("--full_files", help="Should full files be downloaded rather than just cutouts",
action='store_true')
parser.add_argument("scheduling_block_id", help="The id of the ASKAP scheduling block to be queried.")
parser.add_argument("destination_directory", help="The directory where the resulting files will be stored")
args = parser.parse_args()
return args
def download_cutouts(sbid, username, password, destination_dir, catalogue_query, do_cutouts, cutout_radius_degrees=0.1):
# 2) Use CASDA VO (secure) to query for the images associated with the given scheduling_block_id
print ("\n\n** Finding images and image cubes for scheduling block {} ... \n\n".format(sbid))
data_product_id_query = "select * from ivoa.obscore where obs_id = '" + str(
sbid) + "' and dataproduct_type = 'cube' and dataproduct_subtype in ('cont.restored.t0', 'spectral.restored.3d')"
filename = destination_dir + "image_cubes_" + str(sbid) + ".xml"
casda.sync_tap_query(data_product_id_query, filename, username, password)
image_cube_votable = parse(filename, pedantic=False)
results_array = image_cube_votable.get_table_by_id('results').array
service = 'cutout_service' if do_cutouts else 'async_service'
# 3) For each of the image cubes, query datalink to get the secure datalink details
print ("\n\n** Retrieving datalink for each image and image cube...\n\n")
authenticated_id_tokens = []
for image_cube_result in results_array:
image_cube_id = image_cube_result['obs_publisher_did'].decode('utf-8')
async_url, authenticated_id_token = casda.get_service_link_and_id(image_cube_id, username,
password,
service=service,
destination_dir=destination_dir)
if authenticated_id_token is not None and len(authenticated_id_tokens) < 10:
authenticated_id_tokens.append(authenticated_id_token)
if len(authenticated_id_tokens) == 0:
print ("No image cubes for scheduling_block_id " + str(sbid))
return 1
# Run the catalogue_query to find catalogue entries that are of interest
if do_cutouts:
print ("\n\n** Finding components in each image and image cube...\n\n")
filename = destination_dir + "catalogue_query_" + str(sbid) + ".xml"
casda.sync_tap_query(catalogue_query, filename, username, password)
catalogue_vo_table = parse(filename, pedantic=False)
catalogue_results_array = catalogue_vo_table.get_table_by_id('results').array
print ("\n\n** Found %d components...\n\n" % (len(catalogue_results_array)))
if len(catalogue_results_array) == 0:
print ("No catalogue entries matching the criteria found for scheduling_block_id " + str(sbid))
return 1
# For each source found in the catalogue query, create a position filter
pos_list = []
for entry in catalogue_results_array:
ra = entry['ra_deg_cont']
dec = entry['dec_deg_cont']
circle = "CIRCLE " + str(ra) + " " + str(dec) + " " + str(cutout_radius_degrees)
pos_list.append(circle)
# Generate cutouts from each image around each source
# where there is no overlap an error file is generated but can be ignored.
job_location = casda.create_async_soda_job(authenticated_id_tokens)
if do_cutouts:
casda.add_params_to_async_job(job_location, 'pos', pos_list)
job_status = casda.run_async_job(job_location)
print ('\nJob finished with status %s address is %s\n\n' % (job_status, job_location))
if job_status != 'ERROR':
casda.download_all(job_location, destination_dir)
return 0
def main():
args = parseargs()
password = casda.get_opal_password(args.opal_password, args.password_file)
# 1) Create the destination directory
destination_dir = args.destination_directory + "/"
if not os.path.exists(destination_dir):
os.makedirs(destination_dir)
# Change this to choose which environment to use, prod is the default
# casda.use_at()
catalogue_query = 'SELECT * FROM casda.continuum_component where first_sbid = {} and flux_peak > 500'.format(
args.scheduling_block_id)
return download_cutouts(args.scheduling_block_id, args.opal_username, password, destination_dir, catalogue_query,
not args.full_files)
if __name__ == '__main__':
exit(main())
|
csiro-rds/casda-samples
|
cutouts.py
|
Python
|
apache-2.0
| 6,363
|
[
"Galaxy"
] |
bb9b7e70f1e37360462a8c651bc6107692cb8d97fab284e322871b51908eb148
|
#! /usr/bin/env python
#
# Copyright (C) 2011, 2012, 2013, 2014 David Maxwell
#
# This file is part of PISM.
#
# PISM is free software; you can redistribute it and/or modify it under the
# terms of the GNU General Public License as published by the Free Software
# Foundation; either version 3 of the License, or (at your option) any later
# version.
#
# PISM is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License
# along with PISM; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
import PISM
import PISM.invert.ssa
import numpy as np
import sys, os, math
from PISM.logging import logMessage
class SSAForwardRun(PISM.invert.ssa.SSAForwardRunFromInputFile):
def write(self,filename,append=False):
if not append:
PISM.invert.ssa.SSAForwardRunFromInputFile.write(self,filename)
else:
grid = self.grid
vecs = self.modeldata.vecs
pio = PISM.PIO(grid.com,"netcdf3", grid.get_unit_system())
pio.open(filename,PISM.NC_WRITE,True) #append mode!
self.modeldata.vecs.write(filename)
pio.close()
class InvSSAPlotListener(PISM.invert.listener.PlotListener):
def __init__(self,grid,Vmax):
PISM.invert.listener.PlotListener.__init__(self,grid)
self.Vmax = Vmax
self.l2_weight = None
self.l2_weight_init = False
def __call__(self,inverse_solver,count,data):
if not self.l2_weight_init:
vecs = inverse_solver.ssarun.modeldata.vecs;
if vecs.has('vel_misfit_weight'):
self.l2_weight=self.toproczero(vecs.vel_misfit_weight)
self.l2_weight_init = True
method = inverse_solver.method
r=self.toproczero(data.residual)
Td = None
if data.has_key('T_zeta_step'): Td = self.toproczero(data.T_zeta_step)
TStarR = None
if data.has_key('TStar_residual'): TStarR = self.toproczero(data.TStar_residual)
d = None
if data.has_key('zeta_step'): d = self.toproczero(data.zeta_step)
zeta = self.toproczero(data.zeta)
secpera = grid.convert(1.0, "year", "second")
if self.grid.rank == 0:
import matplotlib.pyplot as pp
pp.figure(self.figure())
l2_weight=self.l2_weight
pp.clf()
V = self.Vmax
pp.subplot(2,3,1)
if l2_weight is not None:
rx = l2_weight*r[0,:,:]*secpera
else:
rx = r[0,:,:]*secpera
rx = np.maximum(rx,-V)
rx = np.minimum(rx,V)
pp.imshow(rx,origin='lower',interpolation='nearest')
pp.colorbar()
pp.title('r_x')
pp.jet()
pp.subplot(2,3,4)
if l2_weight is not None:
ry = l2_weight*r[1,:,:]*secpera
else:
ry = r[1,:,:]*secpera
ry = np.maximum(ry,-V)
ry = np.minimum(ry,V)
pp.imshow(ry,origin='lower',interpolation='nearest')
pp.colorbar()
pp.title('r_y')
pp.jet()
if method == 'ign':
pp.subplot(2,3,2)
Tdx = Td[0,:,:]*secpera
pp.imshow(Tdx,origin='lower',interpolation='nearest')
pp.colorbar()
pp.title('Td_x')
pp.jet()
pp.subplot(2,3,5)
Tdy = Td[1,:,:]*secpera
pp.imshow(Tdy,origin='lower',interpolation='nearest')
pp.colorbar()
pp.title('Td_y')
pp.jet()
elif method == 'sd' or method == 'nlcg':
pp.subplot(2,3,2)
pp.imshow(TStarR,origin='lower',interpolation='nearest')
pp.colorbar()
pp.title('TStarR')
pp.jet()
if d is not None:
d *= -1
pp.subplot(2,3,3)
pp.imshow(d,origin='lower',interpolation='nearest')
# colorbar does a divide by zero if 'd' is all zero,
# as it will be at the start of iteration zero.
# The warning message is a distraction, so we suppress it.
import warnings
with warnings.catch_warnings():
warnings.simplefilter("ignore")
pp.colorbar()
pp.jet()
pp.title('-zeta_step')
pp.subplot(2,3,6)
pp.imshow(zeta,origin='lower',interpolation='nearest')
pp.colorbar()
pp.jet()
pp.title('zeta')
pp.ion()
pp.draw()
pp.show()
class InvSSALinPlotListener(PISM.invert.listener.PlotListener):
def __init__(self,grid,Vmax):
PISM.invert.listener.PlotListener.__init__(self,grid)
self.Vmax = Vmax
self.l2_weight = None
self.l2_weight_init = False
def __call__(self,inverse_solver,count,data):
# On the first go-around, extract the l2_weight vector onto
# processor zero.
if self.l2_weight_init == False:
vecs = inverse_solver.ssarun.modeldata.vecs;
self.l2_weight = self.toproczero(vecs.vel_misfit_weight)
self.l2_init = True
l2_weight=self.l2_weight
r = self.toproczero(data.r)
d = self.toproczero(data.d)
if self.grid.rank == 0:
import matplotlib.pyplot as pp
pp.figure(self.figure())
pp.clf()
V = self.Vmax
pp.subplot(1,3,1)
rx = l2_weight*r[0,:,:]
rx = np.maximum(rx,-V)
rx = np.minimum(rx,V)
pp.imshow(rx,origin='lower',interpolation='nearest')
pp.colorbar()
pp.title('ru')
pp.jet()
pp.subplot(1,3,2)
ry = l2_weight*r[1,:,:]
ry = np.maximum(ry,-V)
ry = np.minimum(ry,V)
pp.imshow(ry,origin='lower',interpolation='nearest')
pp.colorbar()
pp.title('rv')
pp.jet()
d *= -1
pp.subplot(1,3,3)
pp.imshow(d,origin='lower',interpolation='nearest')
pp.colorbar()
pp.jet()
pp.title('-d')
pp.ion()
pp.show()
def adjustTauc(mask,tauc):
"""Where ice is floating or land is ice-free, tauc should be adjusted to have some preset default values."""
logMessage(" Adjusting initial estimate of 'tauc' to match PISM model for floating ice and ice-free bedrock.\n")
grid = mask.get_grid()
high_tauc = grid.config.get("high_tauc")
with PISM.vec.Access(comm=tauc,nocomm=mask):
mq = PISM.MaskQuery(mask)
for (i,j) in grid.points():
if mq.ocean(i,j):
tauc[i,j] = 0;
elif mq.ice_free(i,j):
tauc[i,j] = high_tauc
def createDesignVec(grid,design_var,name=None,**kwargs):
if name is None:
name = design_var
if design_var == "tauc":
design_vec = PISM.model.createYieldStressVec(grid,name=name,**kwargs)
elif design_var == "hardav":
design_vec = PISM.model.createAveragedHardnessVec(grid,name=name,**kwargs)
else:
raise ValueError("Unknown design variable %s" % design_var)
return design_vec
## Main code starts here
if __name__ == "__main__":
context = PISM.Context()
config = context.config
com = context.com
PISM.set_abort_on_sigint(True)
WIDE_STENCIL = 2
usage = \
""" pismi.py [-i IN.nc [-o OUT.nc]]/[-a INOUT.nc] [-inv_data inv_data.nc] [-inv_forward model]
[-inv_design design_var] [-inv_method meth]
where:
-i IN.nc is input file in NetCDF format: contains PISM-written model state
-o OUT.nc is output file in NetCDF format to be overwritten
-a INOUT.nc is input/output file in NetCDF format to be appended to
-inv_data inv_data.nc is data file containing extra inversion data (e.g. observed surface velocities)
-inv_forward model forward model: only 'ssa' supported
-inv_design design_var design variable name; one of 'tauc'/'hardav' for SSA inversions
-inv_method meth algorithm for inversion [sd,nlcg,ign,tikhonov_lmvm]
notes:
* only one of -i/-a is allowed; both specify the input file
* only one of -o/-a is allowed; both specify the output file
* if -o is used, only the variables involved in inversion are written to the output file.
* if -a is used, the varaibles involved in inversion are appended to the given file. No
original variables in the file are changed.
"""
append_mode = False
PISM.setVerbosityLevel(1)
for o in PISM.OptionsGroup(context.com,"","pismi"):
input_filename = PISM.optionsString("-i","input file")
append_filename = PISM.optionsString("-a","append file",default=None)
output_filename = PISM.optionsString("-o","output file",default=None)
if (input_filename is None) and (append_filename is None):
PISM.verbPrintf(1,com,"\nError: No input file specified. Use one of -i [file.nc] or -a [file.nc].\n")
PISM.PISMEndQuiet()
if (input_filename is not None) and (append_filename is not None):
PISM.verbPrintf(1,com,"\nError: Only one of -i/-a is allowed.\n")
PISM.PISMEndQuiet()
if (output_filename is not None) and (append_filename is not None):
PISM.verbPrintf(1,com,"\nError: Only one of -a/-o is allowed.\n")
PISM.PISMEndQuiet()
if append_filename is not None:
input_filename = append_filename
output_filename = append_filename
append_mode = True
inv_data_filename = PISM.optionsString("-inv_data","inverse data file",default=input_filename)
verbosity = PISM.optionsInt("-verbose","verbosity level",default=2)
do_plotting = PISM.optionsFlag("-inv_plot","perform visualization during the computation",default=False)
do_final_plot = PISM.optionsFlag("-inv_final_plot","perform visualization at the end of the computation",default=False)
Vmax = PISM.optionsReal("-inv_plot_vmax","maximum velocity for plotting residuals",default=30)
design_var = PISM.optionsList(context.com,"-inv_ssa","design variable for inversion", ["tauc", "hardav"], "tauc")
do_pause = PISM.optionsFlag("-inv_pause","pause each iteration",default=False)
do_restart = PISM.optionsFlag("-inv_restart","Restart a stopped computation.",default=False)
use_design_prior = PISM.optionsFlag("-inv_use_design_prior","Use prior from inverse data file as initial guess.",default=True)
prep_module = PISM.optionsString("-inv_prep_module","Python module used to do final setup of inverse solver",default=None)
is_regional = PISM.optionsFlag("-regional","Compute SIA/SSA using regional model semantics",default=False)
using_zeta_fixed_mask = PISM.optionsFlag("-inv_use_zeta_fixed_mask",
"Enforce locations where the parameterized design variable should be fixed. (Automatically determined if not provided)",default=True)
inv_method = config.get_string("inv_ssa_method")
if output_filename is None:
output_filename = "pismi_"+os.path.basename(input_filename)
saving_inv_data = (inv_data_filename != output_filename)
PISM.setVerbosityLevel(verbosity)
forward_run = SSAForwardRun(input_filename, inv_data_filename, design_var)
forward_run.setup()
design_param = forward_run.designVariableParameterization()
solver = PISM.invert.ssa.createInvSSASolver(forward_run)
modeldata = forward_run.modeldata
vecs = modeldata.vecs
grid = modeldata.grid
# Determine the prior guess for tauc/hardav. This can be one of
# a) tauc/hardav from the input file (default)
# b) tauc/hardav_prior from the inv_datafile if -inv_use_design_prior is set
design_prior = createDesignVec(grid,design_var,'%s_prior' % design_var)
long_name = design_prior.metadata().get_string("long_name")
units = design_prior.metadata().get_string("units")
design_prior.set_attrs("", "best prior estimate for %s (used for inversion)" % long_name, units, "");
if PISM.util.fileHasVariable(inv_data_filename,"%s_prior" % design_var) and use_design_prior:
PISM.logging.logMessage(" Reading '%s_prior' from inverse data file %s.\n" % (design_var,inv_data_filename));
design_prior.regrid(inv_data_filename,critical=True)
vecs.add(design_prior,writing=saving_inv_data)
else:
if not PISM.util.fileHasVariable(input_filename,design_var):
PISM.verbPrintf(1,com,"Initial guess for design variable is not available as '%s' in %s.\nYou can provide an initial guess in the inverse data file.\n" % (design_var,input_filename) )
exit(1)
PISM.logging.logMessage("Reading '%s_prior' from '%s' in input file.\n" % (design_var,design_var) );
design = createDesignVec(grid,design_var)
design.regrid(input_filename,True)
design_prior.copy_from(design)
vecs.add(design_prior,writing=True)
if using_zeta_fixed_mask:
if PISM.util.fileHasVariable(inv_data_filename,"zeta_fixed_mask"):
zeta_fixed_mask = PISM.model.createZetaFixedMaskVec(grid)
zeta_fixed_mask.regrid(inv_data_filename)
vecs.add(zeta_fixed_mask)
else:
if design_var == 'tauc':
logMessage(" Computing 'zeta_fixed_mask' (i.e. locations where design variable '%s' has a fixed value).\n" % design_var)
zeta_fixed_mask = PISM.model.createZetaFixedMaskVec(grid)
zeta_fixed_mask.set(1);
mask = vecs.ice_mask
with PISM.vec.Access(comm=zeta_fixed_mask,nocomm=mask):
mq = PISM.MaskQuery(mask)
for (i,j) in grid.points():
if mq.grounded_ice(i,j):
zeta_fixed_mask[i,j] = 0;
vecs.add(zeta_fixed_mask)
adjustTauc(vecs.ice_mask,design_prior)
elif design_var == 'hardav':
PISM.logging.logPrattle("Skipping 'zeta_fixed_mask' for design variable 'hardav'; no natural locations to fix its value.")
pass
else:
raise NotImplementedError("Unable to build 'zeta_fixed_mask' for design variable %s.", design_var)
# Convert design_prior -> zeta_prior
zeta_prior = PISM.IceModelVec2S();
zeta_prior.create(grid, "zeta_prior", PISM.WITH_GHOSTS, WIDE_STENCIL)
design_param.convertFromDesignVariable(design_prior,zeta_prior)
vecs.add(zeta_prior,writing=True)
# Determine the initial guess for zeta. If we are restarting, load it from
# the output file. Otherwise, if 'zeta_inv' is in the inverse data file, use it.
# If none of the above, copy from 'zeta_prior'.
zeta = PISM.IceModelVec2S();
zeta.create(grid, "zeta_inv", PISM.WITH_GHOSTS, WIDE_STENCIL)
zeta.set_attrs("diagnostic", "zeta_inv", "1", "zeta_inv")
if do_restart:
# Just to be sure, verify that we have a 'zeta_inv' in the output file.
if not PISM.util.fileHasVariable(output_filename,'zeta_inv'):
PISM.verbPrintf(1,com,"Unable to restart computation: file %s is missing variable 'zeta_inv'", output_filename)
exit(1)
PISM.logging.logMessage(" Inversion starting from 'zeta_inv' found in %s\n" % output_filename )
zeta.regrid(output_filename,True)
elif PISM.util.fileHasVariable(inv_data_filename, 'zeta_inv'):
PISM.logging.logMessage(" Inversion starting from 'zeta_inv' found in %s\n" % inv_data_filename )
zeta.regrid(inv_data_filename,True)
else:
zeta.copy_from(zeta_prior)
vel_ssa_observed = None
vel_ssa_observed = PISM.model.create2dVelocityVec(grid,'_ssa_observed',stencil_width=2)
if PISM.util.fileHasVariable(inv_data_filename,"u_ssa_observed"):
vel_ssa_observed.regrid(inv_data_filename,True)
vecs.add(vel_ssa_observed,writing=saving_inv_data)
else:
if not PISM.util.fileHasVariable(inv_data_filename,"u_surface_observed"):
PISM.verbPrintf(1,context.com,"Neither u/v_ssa_observed nor u/v_surface_observed is available in %s.\nAt least one must be specified.\n" % inv_data_filename)
exit(1)
vel_surface_observed = PISM.model.create2dVelocityVec(grid,'_surface_observed',stencil_width=2)
vel_surface_observed.regrid(inv_data_filename,True)
vecs.add(vel_surface_observed,writing=saving_inv_data)
sia_solver=PISM.SIAFD
if is_regional:
sia_solver=PISM.SIAFD_Regional
vel_sia_observed = PISM.sia.computeSIASurfaceVelocities(modeldata,sia_solver)
vel_sia_observed.rename('_sia_observed',"'observed' SIA velocities'","")
vel_ssa_observed.copy_from(vel_surface_observed)
vel_ssa_observed.add(-1,vel_sia_observed)
vecs.add(vel_ssa_observed,writing=True)
# If the inverse data file has a variable tauc/hardav_true, this is probably
# a synthetic inversion. We'll load it now so that it will get written
# out, if needed, at the end of the computation in the output file.
if PISM.util.fileHasVariable(inv_data_filename,"%s_true" % design_var):
design_true = createDesignVec(grid,design_var,'%s_true' % design_var)
design_true.regrid(inv_data_filename,True)
design_true.read_attributes(inv_data_filename)
vecs.add(design_true,writing=saving_inv_data)
# Establish a logger which will save logging messages to the output file.
message_logger = PISM.logging.CaptureLogger(output_filename,'pismi_log');
PISM.logging.add_logger(message_logger)
if append_mode or do_restart:
message_logger.readOldLog()
# Prep the output file from the grid so that we can save zeta to it during the runs.
if not append_mode:
pio = PISM.PIO(grid.com,"netcdf3", grid.get_unit_system())
pio.open(output_filename,PISM.NC_WRITE,False)
pio.def_time(grid.config.get_string("time_dimension_name"),
grid.config.get_string("calendar"), grid.time.units_string())
pio.append_time(grid.config.get_string("time_dimension_name"),grid.time.current())
pio.close()
zeta.write(output_filename)
# Log the command line to the output file now so that we have a record of
# what was attempted
PISM.util.writeProvenance(output_filename)
# Attach various iteration listeners to the solver as needed for:
# Iteration report.
solver.addIterationListener(PISM.invert.ssa.printIteration)
# Misfit reporting/logging.
misfit_logger = PISM.invert.ssa.MisfitLogger()
solver.addIterationListener(misfit_logger)
if inv_method.startswith('tikhonov'):
solver.addIterationListener(PISM.invert.ssa.printTikhonovProgress)
# Saving the current iteration
solver.addDesignUpdateListener(PISM.invert.ssa.ZetaSaver(output_filename))
# Plotting
if do_plotting:
solver.addIterationListener(InvSSAPlotListener(grid,Vmax))
if solver.method=='ign':
solver.addLinearIterationListener(InvSSALinPlotListener(grid,Vmax))
# Solver is set up. Give the user's prep module a chance to do any final
# setup.
if prep_module is not None:
if prep_module.endswith(".py"):
prep_module = prep_module[0:-2]
exec "import %s as user_prep_module" % prep_module
user_prep_module.prep_solver(solver)
# Pausing (add this after the user's listeners)
if do_pause:
solver.addIterationListener(PISM.invert.listener.pauseListener)
# Run the inverse solver!
if do_restart:
PISM.logging.logMessage('************** Restarting inversion. ****************\n')
else:
PISM.logging.logMessage('============== Starting inversion. ==================\n')
# Try solving
reason = solver.solveInverse(zeta_prior,vel_ssa_observed,zeta);
if reason.failed():
PISM.logging.logError("Inverse solve FAILURE:\n%s\n" % reason.nested_description(1));
quit()
PISM.logging.logMessage("Inverse solve success (%s)!\n" % reason.description());
(zeta,u) = solver.inverseSolution()
# Convert back from zeta to tauc
design = createDesignVec(grid,design_var)
design_param.convertToDesignVariable(zeta,design)
# It may be that a 'tauc'/'hardav' was read in earlier. We replace it with
# our newly generated one.
if vecs.has(design_var): vecs.remove(design_var)
vecs.add(design,writing=True)
vecs.add(zeta,writing=True)
u.rename("_ssa_inv","SSA velocity computed by inversion","")
vecs.add(u,writing=True)
residual = PISM.model.create2dVelocityVec(grid,name='_inv_ssa_residual')
residual.copy_from(u)
residual.add(-1,vel_ssa_observed);
r_mag = PISM.IceModelVec2S();
r_mag.create(grid,"inv_ssa_residual", PISM.WITHOUT_GHOSTS,0);
r_mag.set_attrs("diagnostic","magnitude of mismatch between observed surface velocities and their reconstrution by inversion",
"m s-1", "inv_ssa_residual", 0);
r_mag.metadata().set_double("_FillValue", grid.convert(-0.01,'m/year','m/s'));
r_mag.metadata().set_double("valid_min", 0.0);
r_mag.set_glaciological_units("m year-1")
r_mag.write_in_glaciological_units = True
residual.magnitude(r_mag)
r_mag.mask_by(vecs.thickness)
vecs.add(residual,writing=True)
vecs.add(r_mag,writing=True)
# Write solution out to netcdf file
forward_run.write(output_filename,append=append_mode)
# If we're not in append mode, the previous command just nuked
# the output file. So we rewrite the siple log.
if not append_mode:
message_logger.write(output_filename)
# Save the misfit history
misfit_logger.write(output_filename)
|
talbrecht/pism_pik06
|
examples/inverse/pismi.py
|
Python
|
gpl-3.0
| 20,733
|
[
"NetCDF"
] |
b700da27a849cdc0dfde6594901a47213ff68c1e206076213338638cc8f1a896
|
# Author: Travis Oliphant, 2002
#
# Further updates and enhancements by many SciPy developers.
#
from __future__ import division, print_function, absolute_import
import math
import warnings
from collections import namedtuple
import numpy as np
from numpy import (isscalar, r_, log, around, unique, asarray,
zeros, arange, sort, amin, amax, any, atleast_1d,
sqrt, ceil, floor, array, poly1d, compress,
pi, exp, ravel, angle, count_nonzero)
from numpy.testing.decorators import setastest
from scipy._lib.six import string_types
from scipy import optimize
from scipy import special
from . import statlib
from . import stats
from .stats import find_repeats
from .contingency import chi2_contingency
from . import distributions
from ._distn_infrastructure import rv_generic
__all__ = ['mvsdist',
'bayes_mvs', 'kstat', 'kstatvar', 'probplot', 'ppcc_max', 'ppcc_plot',
'boxcox_llf', 'boxcox', 'boxcox_normmax', 'boxcox_normplot',
'shapiro', 'anderson', 'ansari', 'bartlett', 'levene', 'binom_test',
'fligner', 'mood', 'wilcoxon', 'median_test',
'pdf_fromgamma', 'circmean', 'circvar', 'circstd', 'anderson_ksamp'
]
Mean = namedtuple('Mean', ('statistic', 'minmax'))
Variance = namedtuple('Variance', ('statistic', 'minmax'))
Std_dev = namedtuple('Std_dev', ('statistic', 'minmax'))
def bayes_mvs(data, alpha=0.90):
r"""
Bayesian confidence intervals for the mean, var, and std.
Parameters
----------
data : array_like
Input data, if multi-dimensional it is flattened to 1-D by `bayes_mvs`.
Requires 2 or more data points.
alpha : float, optional
Probability that the returned confidence interval contains
the true parameter.
Returns
-------
mean_cntr, var_cntr, std_cntr : tuple
The three results are for the mean, variance and standard deviation,
respectively. Each result is a tuple of the form::
(center, (lower, upper))
with `center` the mean of the conditional pdf of the value given the
data, and `(lower, upper)` a confidence interval, centered on the
median, containing the estimate to a probability ``alpha``.
See Also
--------
mvsdist
Notes
-----
Each tuple of mean, variance, and standard deviation estimates represent
the (center, (lower, upper)) with center the mean of the conditional pdf
of the value given the data and (lower, upper) is a confidence interval
centered on the median, containing the estimate to a probability
``alpha``.
Converts data to 1-D and assumes all data has the same mean and variance.
Uses Jeffrey's prior for variance and std.
Equivalent to ``tuple((x.mean(), x.interval(alpha)) for x in mvsdist(dat))``
References
----------
T.E. Oliphant, "A Bayesian perspective on estimating mean, variance, and
standard-deviation from data", http://scholarsarchive.byu.edu/facpub/278,
2006.
Examples
--------
First a basic example to demonstrate the outputs:
>>> from scipy import stats
>>> data = [6, 9, 12, 7, 8, 8, 13]
>>> mean, var, std = stats.bayes_mvs(data)
>>> mean
Mean(statistic=9.0, minmax=(7.1036502226125329, 10.896349777387467))
>>> var
Variance(statistic=10.0, minmax=(3.1767242068607087, 24.459103821334018))
>>> std
Std_dev(statistic=2.9724954732045084, minmax=(1.7823367265645143, 4.9456146050146295))
Now we generate some normally distributed random data, and get estimates of
mean and standard deviation with 95% confidence intervals for those
estimates:
>>> n_samples = 100000
>>> data = stats.norm.rvs(size=n_samples)
>>> res_mean, res_var, res_std = stats.bayes_mvs(data, alpha=0.95)
>>> import matplotlib.pyplot as plt
>>> fig = plt.figure()
>>> ax = fig.add_subplot(111)
>>> ax.hist(data, bins=100, normed=True, label='Histogram of data')
>>> ax.vlines(res_mean.statistic, 0, 0.5, colors='r', label='Estimated mean')
>>> ax.axvspan(res_mean.minmax[0],res_mean.minmax[1], facecolor='r',
... alpha=0.2, label=r'Estimated mean (95% limits)')
>>> ax.vlines(res_std.statistic, 0, 0.5, colors='g', label='Estimated scale')
>>> ax.axvspan(res_std.minmax[0],res_std.minmax[1], facecolor='g', alpha=0.2,
... label=r'Estimated scale (95% limits)')
>>> ax.legend(fontsize=10)
>>> ax.set_xlim([-4, 4])
>>> ax.set_ylim([0, 0.5])
>>> plt.show()
"""
m, v, s = mvsdist(data)
if alpha >= 1 or alpha <= 0:
raise ValueError("0 < alpha < 1 is required, but alpha=%s was given."
% alpha)
m_res = Mean(m.mean(), m.interval(alpha))
v_res = Variance(v.mean(), v.interval(alpha))
s_res = Std_dev(s.mean(), s.interval(alpha))
return m_res, v_res, s_res
def mvsdist(data):
"""
'Frozen' distributions for mean, variance, and standard deviation of data.
Parameters
----------
data : array_like
Input array. Converted to 1-D using ravel.
Requires 2 or more data-points.
Returns
-------
mdist : "frozen" distribution object
Distribution object representing the mean of the data
vdist : "frozen" distribution object
Distribution object representing the variance of the data
sdist : "frozen" distribution object
Distribution object representing the standard deviation of the data
See Also
--------
bayes_mvs
Notes
-----
The return values from ``bayes_mvs(data)`` is equivalent to
``tuple((x.mean(), x.interval(0.90)) for x in mvsdist(data))``.
In other words, calling ``<dist>.mean()`` and ``<dist>.interval(0.90)``
on the three distribution objects returned from this function will give
the same results that are returned from `bayes_mvs`.
References
----------
T.E. Oliphant, "A Bayesian perspective on estimating mean, variance, and
standard-deviation from data", http://scholarsarchive.byu.edu/facpub/278,
2006.
Examples
--------
>>> from scipy import stats
>>> data = [6, 9, 12, 7, 8, 8, 13]
>>> mean, var, std = stats.mvsdist(data)
We now have frozen distribution objects "mean", "var" and "std" that we can
examine:
>>> mean.mean()
9.0
>>> mean.interval(0.95)
(6.6120585482655692, 11.387941451734431)
>>> mean.std()
1.1952286093343936
"""
x = ravel(data)
n = len(x)
if n < 2:
raise ValueError("Need at least 2 data-points.")
xbar = x.mean()
C = x.var()
if n > 1000: # gaussian approximations for large n
mdist = distributions.norm(loc=xbar, scale=math.sqrt(C / n))
sdist = distributions.norm(loc=math.sqrt(C), scale=math.sqrt(C / (2. * n)))
vdist = distributions.norm(loc=C, scale=math.sqrt(2.0 / n) * C)
else:
nm1 = n - 1
fac = n * C / 2.
val = nm1 / 2.
mdist = distributions.t(nm1, loc=xbar, scale=math.sqrt(C / nm1))
sdist = distributions.gengamma(val, -2, scale=math.sqrt(fac))
vdist = distributions.invgamma(val, scale=fac)
return mdist, vdist, sdist
def kstat(data, n=2):
r"""
Return the nth k-statistic (1<=n<=4 so far).
The nth k-statistic k_n is the unique symmetric unbiased estimator of the
nth cumulant kappa_n.
Parameters
----------
data : array_like
Input array. Note that n-D input gets flattened.
n : int, {1, 2, 3, 4}, optional
Default is equal to 2.
Returns
-------
kstat : float
The nth k-statistic.
See Also
--------
kstatvar: Returns an unbiased estimator of the variance of the k-statistic.
moment: Returns the n-th central moment about the mean for a sample.
Notes
-----
For a sample size n, the first few k-statistics are given by:
.. math::
k_{1} = \mu
k_{2} = \frac{n}{n-1} m_{2}
k_{3} = \frac{ n^{2} } {(n-1) (n-2)} m_{3}
k_{4} = \frac{ n^{2} [(n + 1)m_{4} - 3(n - 1) m^2_{2}]} {(n-1) (n-2) (n-3)}
where ``:math:\mu`` is the sample mean, ``:math:m_2`` is the sample
variance, and ``:math:m_i`` is the i-th sample central moment.
References
----------
http://mathworld.wolfram.com/k-Statistic.html
http://mathworld.wolfram.com/Cumulant.html
Examples
--------
>>> from scipy import stats
>>> rndm = np.random.RandomState(1234)
As sample size increases, n-th moment and n-th k-statistic converge to the
same number (although they aren't identical). In the case of the normal
distribution, they converge to zero.
>>> for n in [2, 3, 4, 5, 6, 7]:
... x = rndm.normal(size=10**n)
... m, k = stats.moment(x, 3), stats.kstat(x, 3)
... print("%.3g %.3g %.3g" % (m, k, m-k))
-0.631 -0.651 0.0194
0.0282 0.0283 -8.49e-05
-0.0454 -0.0454 1.36e-05
7.53e-05 7.53e-05 -2.26e-09
0.00166 0.00166 -4.99e-09
-2.88e-06 -2.88e-06 8.63e-13
"""
if n > 4 or n < 1:
raise ValueError("k-statistics only supported for 1<=n<=4")
n = int(n)
S = np.zeros(n + 1, np.float64)
data = ravel(data)
N = data.size
# raise ValueError on empty input
if N == 0:
raise ValueError("Data input must not be empty")
# on nan input, return nan without warning
if np.isnan(np.sum(data)):
return np.nan
for k in range(1, n + 1):
S[k] = np.sum(data**k, axis=0)
if n == 1:
return S[1] * 1.0/N
elif n == 2:
return (N*S[2] - S[1]**2.0) / (N*(N - 1.0))
elif n == 3:
return (2*S[1]**3 - 3*N*S[1]*S[2] + N*N*S[3]) / (N*(N - 1.0)*(N - 2.0))
elif n == 4:
return ((-6*S[1]**4 + 12*N*S[1]**2 * S[2] - 3*N*(N-1.0)*S[2]**2 -
4*N*(N+1)*S[1]*S[3] + N*N*(N+1)*S[4]) /
(N*(N-1.0)*(N-2.0)*(N-3.0)))
else:
raise ValueError("Should not be here.")
def kstatvar(data, n=2):
r"""
Returns an unbiased estimator of the variance of the k-statistic.
See `kstat` for more details of the k-statistic.
Parameters
----------
data : array_like
Input array. Note that n-D input gets flattened.
n : int, {1, 2}, optional
Default is equal to 2.
Returns
-------
kstatvar : float
The nth k-statistic variance.
See Also
--------
kstat: Returns the n-th k-statistic.
moment: Returns the n-th central moment about the mean for a sample.
Notes
-----
The variances of the first few k-statistics are given by:
.. math::
var(k_{1}) = \frac{\kappa^2}{n}
var(k_{2}) = \frac{\kappa^4}{n} + \frac{2\kappa^2_{2}}{n - 1}
var(k_{3}) = \frac{\kappa^6}{n} + \frac{9 \kappa_2 \kappa_4}{n - 1} +
\frac{9 \kappa^2_{3}}{n - 1} +
\frac{6 n \kappa^3_{2}}{(n-1) (n-2)}
var(k_{4}) = \frac{\kappa^8}{n} + \frac{16 \kappa_2 \kappa_6}{n - 1} +
\frac{48 \kappa_{3} \kappa_5}{n - 1} +
\frac{34 \kappa^2_{4}}{n-1} + \frac{72 n \kappa^2_{2} \kappa_4}{(n - 1) (n - 2)} +
\frac{144 n \kappa_{2} \kappa^2_{3}}{(n - 1) (n - 2)} +
\frac{24 (n + 1) n \kappa^4_{2}}{(n - 1) (n - 2) (n - 3)}
"""
data = ravel(data)
N = len(data)
if n == 1:
return kstat(data, n=2) * 1.0/N
elif n == 2:
k2 = kstat(data, n=2)
k4 = kstat(data, n=4)
return (2*N*k2**2 + (N-1)*k4) / (N*(N+1))
else:
raise ValueError("Only n=1 or n=2 supported.")
def _calc_uniform_order_statistic_medians(n):
"""
Approximations of uniform order statistic medians.
Parameters
----------
n : int
Sample size.
Returns
-------
v : 1d float array
Approximations of the order statistic medians.
References
----------
.. [1] James J. Filliben, "The Probability Plot Correlation Coefficient
Test for Normality", Technometrics, Vol. 17, pp. 111-117, 1975.
Examples
--------
Order statistics of the uniform distribution on the unit interval
are marginally distributed according to beta distributions.
The expectations of these order statistic are evenly spaced across
the interval, but the distributions are skewed in a way that
pushes the medians slightly towards the endpoints of the unit interval:
>>> n = 4
>>> k = np.arange(1, n+1)
>>> from scipy.stats import beta
>>> a = k
>>> b = n-k+1
>>> beta.mean(a, b)
array([ 0.2, 0.4, 0.6, 0.8])
>>> beta.median(a, b)
array([ 0.15910358, 0.38572757, 0.61427243, 0.84089642])
The Filliben approximation uses the exact medians of the smallest
and greatest order statistics, and the remaining medians are approximated
by points spread evenly across a sub-interval of the unit interval:
>>> from scipy.morestats import _calc_uniform_order_statistic_medians
>>> _calc_uniform_order_statistic_medians(n)
array([ 0.15910358, 0.38545246, 0.61454754, 0.84089642])
This plot shows the skewed distributions of the order statistics
of a sample of size four from a uniform distribution on the unit interval:
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(0.0, 1.0, num=50, endpoint=True)
>>> pdfs = [beta.pdf(x, a[i], b[i]) for i in range(n)]
>>> plt.figure()
>>> plt.plot(x, pdfs[0], x, pdfs[1], x, pdfs[2], x, pdfs[3])
"""
v = np.zeros(n, dtype=np.float64)
v[-1] = 0.5**(1.0 / n)
v[0] = 1 - v[-1]
i = np.arange(2, n)
v[1:-1] = (i - 0.3175) / (n + 0.365)
return v
def _parse_dist_kw(dist, enforce_subclass=True):
"""Parse `dist` keyword.
Parameters
----------
dist : str or stats.distributions instance.
Several functions take `dist` as a keyword, hence this utility
function.
enforce_subclass : bool, optional
If True (default), `dist` needs to be a
`_distn_infrastructure.rv_generic` instance.
It can sometimes be useful to set this keyword to False, if a function
wants to accept objects that just look somewhat like such an instance
(for example, they have a ``ppf`` method).
"""
if isinstance(dist, rv_generic):
pass
elif isinstance(dist, string_types):
try:
dist = getattr(distributions, dist)
except AttributeError:
raise ValueError("%s is not a valid distribution name" % dist)
elif enforce_subclass:
msg = ("`dist` should be a stats.distributions instance or a string "
"with the name of such a distribution.")
raise ValueError(msg)
return dist
def _add_axis_labels_title(plot, xlabel, ylabel, title):
"""Helper function to add axes labels and a title to stats plots"""
try:
if hasattr(plot, 'set_title'):
# Matplotlib Axes instance or something that looks like it
plot.set_title(title)
plot.set_xlabel(xlabel)
plot.set_ylabel(ylabel)
else:
# matplotlib.pyplot module
plot.title(title)
plot.xlabel(xlabel)
plot.ylabel(ylabel)
except:
# Not an MPL object or something that looks (enough) like it.
# Don't crash on adding labels or title
pass
def probplot(x, sparams=(), dist='norm', fit=True, plot=None):
"""
Calculate quantiles for a probability plot, and optionally show the plot.
Generates a probability plot of sample data against the quantiles of a
specified theoretical distribution (the normal distribution by default).
`probplot` optionally calculates a best-fit line for the data and plots the
results using Matplotlib or a given plot function.
Parameters
----------
x : array_like
Sample/response data from which `probplot` creates the plot.
sparams : tuple, optional
Distribution-specific shape parameters (shape parameters plus location
and scale).
dist : str or stats.distributions instance, optional
Distribution or distribution function name. The default is 'norm' for a
normal probability plot. Objects that look enough like a
stats.distributions instance (i.e. they have a ``ppf`` method) are also
accepted.
fit : bool, optional
Fit a least-squares regression (best-fit) line to the sample data if
True (default).
plot : object, optional
If given, plots the quantiles and least squares fit.
`plot` is an object that has to have methods "plot" and "text".
The `matplotlib.pyplot` module or a Matplotlib Axes object can be used,
or a custom object with the same methods.
Default is None, which means that no plot is created.
Returns
-------
(osm, osr) : tuple of ndarrays
Tuple of theoretical quantiles (osm, or order statistic medians) and
ordered responses (osr). `osr` is simply sorted input `x`.
For details on how `osm` is calculated see the Notes section.
(slope, intercept, r) : tuple of floats, optional
Tuple containing the result of the least-squares fit, if that is
performed by `probplot`. `r` is the square root of the coefficient of
determination. If ``fit=False`` and ``plot=None``, this tuple is not
returned.
Notes
-----
Even if `plot` is given, the figure is not shown or saved by `probplot`;
``plt.show()`` or ``plt.savefig('figname.png')`` should be used after
calling `probplot`.
`probplot` generates a probability plot, which should not be confused with
a Q-Q or a P-P plot. Statsmodels has more extensive functionality of this
type, see ``statsmodels.api.ProbPlot``.
The formula used for the theoretical quantiles (horizontal axis of the
probability plot) is Filliben's estimate::
quantiles = dist.ppf(val), for
0.5**(1/n), for i = n
val = (i - 0.3175) / (n + 0.365), for i = 2, ..., n-1
1 - 0.5**(1/n), for i = 1
where ``i`` indicates the i-th ordered value and ``n`` is the total number
of values.
Examples
--------
>>> from scipy import stats
>>> import matplotlib.pyplot as plt
>>> nsample = 100
>>> np.random.seed(7654321)
A t distribution with small degrees of freedom:
>>> ax1 = plt.subplot(221)
>>> x = stats.t.rvs(3, size=nsample)
>>> res = stats.probplot(x, plot=plt)
A t distribution with larger degrees of freedom:
>>> ax2 = plt.subplot(222)
>>> x = stats.t.rvs(25, size=nsample)
>>> res = stats.probplot(x, plot=plt)
A mixture of two normal distributions with broadcasting:
>>> ax3 = plt.subplot(223)
>>> x = stats.norm.rvs(loc=[0,5], scale=[1,1.5],
... size=(nsample//2,2)).ravel()
>>> res = stats.probplot(x, plot=plt)
A standard normal distribution:
>>> ax4 = plt.subplot(224)
>>> x = stats.norm.rvs(loc=0, scale=1, size=nsample)
>>> res = stats.probplot(x, plot=plt)
Produce a new figure with a loggamma distribution, using the ``dist`` and
``sparams`` keywords:
>>> fig = plt.figure()
>>> ax = fig.add_subplot(111)
>>> x = stats.loggamma.rvs(c=2.5, size=500)
>>> res = stats.probplot(x, dist=stats.loggamma, sparams=(2.5,), plot=ax)
>>> ax.set_title("Probplot for loggamma dist with shape parameter 2.5")
Show the results with Matplotlib:
>>> plt.show()
"""
x = np.asarray(x)
_perform_fit = fit or (plot is not None)
if x.size == 0:
if _perform_fit:
return (x, x), (np.nan, np.nan, 0.0)
else:
return x, x
osm_uniform = _calc_uniform_order_statistic_medians(len(x))
dist = _parse_dist_kw(dist, enforce_subclass=False)
if sparams is None:
sparams = ()
if isscalar(sparams):
sparams = (sparams,)
if not isinstance(sparams, tuple):
sparams = tuple(sparams)
osm = dist.ppf(osm_uniform, *sparams)
osr = sort(x)
if _perform_fit:
# perform a linear least squares fit.
slope, intercept, r, prob, sterrest = stats.linregress(osm, osr)
if plot is not None:
plot.plot(osm, osr, 'bo', osm, slope*osm + intercept, 'r-')
_add_axis_labels_title(plot, xlabel='Quantiles',
ylabel='Ordered Values',
title='Probability Plot')
# Add R^2 value to the plot as text
xmin = amin(osm)
xmax = amax(osm)
ymin = amin(x)
ymax = amax(x)
posx = xmin + 0.70 * (xmax - xmin)
posy = ymin + 0.01 * (ymax - ymin)
plot.text(posx, posy, "$R^2=%1.4f$" % r**2)
if fit:
return (osm, osr), (slope, intercept, r)
else:
return osm, osr
def ppcc_max(x, brack=(0.0, 1.0), dist='tukeylambda'):
"""
Calculate the shape parameter that maximizes the PPCC
The probability plot correlation coefficient (PPCC) plot can be used to
determine the optimal shape parameter for a one-parameter family of
distributions. ppcc_max returns the shape parameter that would maximize the
probability plot correlation coefficient for the given data to a
one-parameter family of distributions.
Parameters
----------
x : array_like
Input array.
brack : tuple, optional
Triple (a,b,c) where (a<b<c). If bracket consists of two numbers (a, c)
then they are assumed to be a starting interval for a downhill bracket
search (see `scipy.optimize.brent`).
dist : str or stats.distributions instance, optional
Distribution or distribution function name. Objects that look enough
like a stats.distributions instance (i.e. they have a ``ppf`` method)
are also accepted. The default is ``'tukeylambda'``.
Returns
-------
shape_value : float
The shape parameter at which the probability plot correlation
coefficient reaches its max value.
See also
--------
ppcc_plot, probplot, boxcox
Notes
-----
The brack keyword serves as a starting point which is useful in corner
cases. One can use a plot to obtain a rough visual estimate of the location
for the maximum to start the search near it.
References
----------
.. [1] J.J. Filliben, "The Probability Plot Correlation Coefficient Test for
Normality", Technometrics, Vol. 17, pp. 111-117, 1975.
.. [2] http://www.itl.nist.gov/div898/handbook/eda/section3/ppccplot.htm
Examples
--------
First we generate some random data from a Tukey-Lambda distribution,
with shape parameter -0.7:
>>> from scipy import stats
>>> x = stats.tukeylambda.rvs(-0.7, loc=2, scale=0.5, size=10000,
... random_state=1234567) + 1e4
Now we explore this data with a PPCC plot as well as the related
probability plot and Box-Cox normplot. A red line is drawn where we
expect the PPCC value to be maximal (at the shape parameter -0.7 used
above):
>>> import matplotlib.pyplot as plt
>>> fig = plt.figure(figsize=(8, 6))
>>> ax = fig.add_subplot(111)
>>> res = stats.ppcc_plot(x, -5, 5, plot=ax)
We calculate the value where the shape should reach its maximum and a red
line is drawn there. The line should coincide with the highest point in the
ppcc_plot.
>>> max = stats.ppcc_max(x)
>>> ax.vlines(max, 0, 1, colors='r', label='Expected shape value')
>>> plt.show()
"""
dist = _parse_dist_kw(dist)
osm_uniform = _calc_uniform_order_statistic_medians(len(x))
osr = sort(x)
# this function computes the x-axis values of the probability plot
# and computes a linear regression (including the correlation)
# and returns 1-r so that a minimization function maximizes the
# correlation
def tempfunc(shape, mi, yvals, func):
xvals = func(mi, shape)
r, prob = stats.pearsonr(xvals, yvals)
return 1 - r
return optimize.brent(tempfunc, brack=brack, args=(osm_uniform, osr, dist.ppf))
def ppcc_plot(x, a, b, dist='tukeylambda', plot=None, N=80):
"""
Calculate and optionally plot probability plot correlation coefficient.
The probability plot correlation coefficient (PPCC) plot can be used to
determine the optimal shape parameter for a one-parameter family of
distributions. It cannot be used for distributions without shape parameters
(like the normal distribution) or with multiple shape parameters.
By default a Tukey-Lambda distribution (`stats.tukeylambda`) is used. A
Tukey-Lambda PPCC plot interpolates from long-tailed to short-tailed
distributions via an approximately normal one, and is therefore particularly
useful in practice.
Parameters
----------
x : array_like
Input array.
a, b: scalar
Lower and upper bounds of the shape parameter to use.
dist : str or stats.distributions instance, optional
Distribution or distribution function name. Objects that look enough
like a stats.distributions instance (i.e. they have a ``ppf`` method)
are also accepted. The default is ``'tukeylambda'``.
plot : object, optional
If given, plots PPCC against the shape parameter.
`plot` is an object that has to have methods "plot" and "text".
The `matplotlib.pyplot` module or a Matplotlib Axes object can be used,
or a custom object with the same methods.
Default is None, which means that no plot is created.
N : int, optional
Number of points on the horizontal axis (equally distributed from
`a` to `b`).
Returns
-------
svals : ndarray
The shape values for which `ppcc` was calculated.
ppcc : ndarray
The calculated probability plot correlation coefficient values.
See also
--------
ppcc_max, probplot, boxcox_normplot, tukeylambda
References
----------
J.J. Filliben, "The Probability Plot Correlation Coefficient Test for
Normality", Technometrics, Vol. 17, pp. 111-117, 1975.
Examples
--------
First we generate some random data from a Tukey-Lambda distribution,
with shape parameter -0.7:
>>> from scipy import stats
>>> import matplotlib.pyplot as plt
>>> np.random.seed(1234567)
>>> x = stats.tukeylambda.rvs(-0.7, loc=2, scale=0.5, size=10000) + 1e4
Now we explore this data with a PPCC plot as well as the related
probability plot and Box-Cox normplot. A red line is drawn where we
expect the PPCC value to be maximal (at the shape parameter -0.7 used
above):
>>> fig = plt.figure(figsize=(12, 4))
>>> ax1 = fig.add_subplot(131)
>>> ax2 = fig.add_subplot(132)
>>> ax3 = fig.add_subplot(133)
>>> res = stats.probplot(x, plot=ax1)
>>> res = stats.boxcox_normplot(x, -5, 5, plot=ax2)
>>> res = stats.ppcc_plot(x, -5, 5, plot=ax3)
>>> ax3.vlines(-0.7, 0, 1, colors='r', label='Expected shape value')
>>> plt.show()
"""
if b <= a:
raise ValueError("`b` has to be larger than `a`.")
svals = np.linspace(a, b, num=N)
ppcc = np.empty_like(svals)
for k, sval in enumerate(svals):
_, r2 = probplot(x, sval, dist=dist, fit=True)
ppcc[k] = r2[-1]
if plot is not None:
plot.plot(svals, ppcc, 'x')
_add_axis_labels_title(plot, xlabel='Shape Values',
ylabel='Prob Plot Corr. Coef.',
title='(%s) PPCC Plot' % dist)
return svals, ppcc
def boxcox_llf(lmb, data):
r"""The boxcox log-likelihood function.
Parameters
----------
lmb : scalar
Parameter for Box-Cox transformation. See `boxcox` for details.
data : array_like
Data to calculate Box-Cox log-likelihood for. If `data` is
multi-dimensional, the log-likelihood is calculated along the first
axis.
Returns
-------
llf : float or ndarray
Box-Cox log-likelihood of `data` given `lmb`. A float for 1-D `data`,
an array otherwise.
See Also
--------
boxcox, probplot, boxcox_normplot, boxcox_normmax
Notes
-----
The Box-Cox log-likelihood function is defined here as
.. math::
llf = (\lambda - 1) \sum_i(\log(x_i)) -
N/2 \log(\sum_i (y_i - \bar{y})^2 / N),
where ``y`` is the Box-Cox transformed input data ``x``.
Examples
--------
>>> from scipy import stats
>>> import matplotlib.pyplot as plt
>>> from mpl_toolkits.axes_grid1.inset_locator import inset_axes
>>> np.random.seed(1245)
Generate some random variates and calculate Box-Cox log-likelihood values
for them for a range of ``lmbda`` values:
>>> x = stats.loggamma.rvs(5, loc=10, size=1000)
>>> lmbdas = np.linspace(-2, 10)
>>> llf = np.zeros(lmbdas.shape, dtype=float)
>>> for ii, lmbda in enumerate(lmbdas):
... llf[ii] = stats.boxcox_llf(lmbda, x)
Also find the optimal lmbda value with `boxcox`:
>>> x_most_normal, lmbda_optimal = stats.boxcox(x)
Plot the log-likelihood as function of lmbda. Add the optimal lmbda as a
horizontal line to check that that's really the optimum:
>>> fig = plt.figure()
>>> ax = fig.add_subplot(111)
>>> ax.plot(lmbdas, llf, 'b.-')
>>> ax.axhline(stats.boxcox_llf(lmbda_optimal, x), color='r')
>>> ax.set_xlabel('lmbda parameter')
>>> ax.set_ylabel('Box-Cox log-likelihood')
Now add some probability plots to show that where the log-likelihood is
maximized the data transformed with `boxcox` looks closest to normal:
>>> locs = [3, 10, 4] # 'lower left', 'center', 'lower right'
>>> for lmbda, loc in zip([-1, lmbda_optimal, 9], locs):
... xt = stats.boxcox(x, lmbda=lmbda)
... (osm, osr), (slope, intercept, r_sq) = stats.probplot(xt)
... ax_inset = inset_axes(ax, width="20%", height="20%", loc=loc)
... ax_inset.plot(osm, osr, 'c.', osm, slope*osm + intercept, 'k-')
... ax_inset.set_xticklabels([])
... ax_inset.set_yticklabels([])
... ax_inset.set_title('$\lambda=%1.2f$' % lmbda)
>>> plt.show()
"""
data = np.asarray(data)
N = data.shape[0]
if N == 0:
return np.nan
y = boxcox(data, lmb)
y_mean = np.mean(y, axis=0)
llf = (lmb - 1) * np.sum(np.log(data), axis=0)
llf -= N / 2.0 * np.log(np.sum((y - y_mean)**2. / N, axis=0))
return llf
def _boxcox_conf_interval(x, lmax, alpha):
# Need to find the lambda for which
# f(x,lmbda) >= f(x,lmax) - 0.5*chi^2_alpha;1
fac = 0.5 * distributions.chi2.ppf(1 - alpha, 1)
target = boxcox_llf(lmax, x) - fac
def rootfunc(lmbda, data, target):
return boxcox_llf(lmbda, data) - target
# Find positive endpoint of interval in which answer is to be found
newlm = lmax + 0.5
N = 0
while (rootfunc(newlm, x, target) > 0.0) and (N < 500):
newlm += 0.1
N += 1
if N == 500:
raise RuntimeError("Could not find endpoint.")
lmplus = optimize.brentq(rootfunc, lmax, newlm, args=(x, target))
# Now find negative interval in the same way
newlm = lmax - 0.5
N = 0
while (rootfunc(newlm, x, target) > 0.0) and (N < 500):
newlm -= 0.1
N += 1
if N == 500:
raise RuntimeError("Could not find endpoint.")
lmminus = optimize.brentq(rootfunc, newlm, lmax, args=(x, target))
return lmminus, lmplus
def boxcox(x, lmbda=None, alpha=None):
r"""
Return a positive dataset transformed by a Box-Cox power transformation.
Parameters
----------
x : ndarray
Input array. Should be 1-dimensional.
lmbda : {None, scalar}, optional
If `lmbda` is not None, do the transformation for that value.
If `lmbda` is None, find the lambda that maximizes the log-likelihood
function and return it as the second output argument.
alpha : {None, float}, optional
If ``alpha`` is not None, return the ``100 * (1-alpha)%`` confidence
interval for `lmbda` as the third output argument.
Must be between 0.0 and 1.0.
Returns
-------
boxcox : ndarray
Box-Cox power transformed array.
maxlog : float, optional
If the `lmbda` parameter is None, the second returned argument is
the lambda that maximizes the log-likelihood function.
(min_ci, max_ci) : tuple of float, optional
If `lmbda` parameter is None and ``alpha`` is not None, this returned
tuple of floats represents the minimum and maximum confidence limits
given ``alpha``.
See Also
--------
probplot, boxcox_normplot, boxcox_normmax, boxcox_llf
Notes
-----
The Box-Cox transform is given by::
y = (x**lmbda - 1) / lmbda, for lmbda > 0
log(x), for lmbda = 0
`boxcox` requires the input data to be positive. Sometimes a Box-Cox
transformation provides a shift parameter to achieve this; `boxcox` does
not. Such a shift parameter is equivalent to adding a positive constant to
`x` before calling `boxcox`.
The confidence limits returned when ``alpha`` is provided give the interval
where:
.. math::
llf(\hat{\lambda}) - llf(\lambda) < \frac{1}{2}\chi^2(1 - \alpha, 1),
with ``llf`` the log-likelihood function and :math:`\chi^2` the chi-squared
function.
References
----------
G.E.P. Box and D.R. Cox, "An Analysis of Transformations", Journal of the
Royal Statistical Society B, 26, 211-252 (1964).
Examples
--------
>>> from scipy import stats
>>> import matplotlib.pyplot as plt
We generate some random variates from a non-normal distribution and make a
probability plot for it, to show it is non-normal in the tails:
>>> fig = plt.figure()
>>> ax1 = fig.add_subplot(211)
>>> x = stats.loggamma.rvs(5, size=500) + 5
>>> prob = stats.probplot(x, dist=stats.norm, plot=ax1)
>>> ax1.set_xlabel('')
>>> ax1.set_title('Probplot against normal distribution')
We now use `boxcox` to transform the data so it's closest to normal:
>>> ax2 = fig.add_subplot(212)
>>> xt, _ = stats.boxcox(x)
>>> prob = stats.probplot(xt, dist=stats.norm, plot=ax2)
>>> ax2.set_title('Probplot after Box-Cox transformation')
>>> plt.show()
"""
x = np.asarray(x)
if x.size == 0:
return x
if any(x <= 0):
raise ValueError("Data must be positive.")
if lmbda is not None: # single transformation
return special.boxcox(x, lmbda)
# If lmbda=None, find the lmbda that maximizes the log-likelihood function.
lmax = boxcox_normmax(x, method='mle')
y = boxcox(x, lmax)
if alpha is None:
return y, lmax
else:
# Find confidence interval
interval = _boxcox_conf_interval(x, lmax, alpha)
return y, lmax, interval
def boxcox_normmax(x, brack=(-2.0, 2.0), method='pearsonr'):
"""Compute optimal Box-Cox transform parameter for input data.
Parameters
----------
x : array_like
Input array.
brack : 2-tuple, optional
The starting interval for a downhill bracket search with
`optimize.brent`. Note that this is in most cases not critical; the
final result is allowed to be outside this bracket.
method : str, optional
The method to determine the optimal transform parameter (`boxcox`
``lmbda`` parameter). Options are:
'pearsonr' (default)
Maximizes the Pearson correlation coefficient between
``y = boxcox(x)`` and the expected values for ``y`` if `x` would be
normally-distributed.
'mle'
Minimizes the log-likelihood `boxcox_llf`. This is the method used
in `boxcox`.
'all'
Use all optimization methods available, and return all results.
Useful to compare different methods.
Returns
-------
maxlog : float or ndarray
The optimal transform parameter found. An array instead of a scalar
for ``method='all'``.
See Also
--------
boxcox, boxcox_llf, boxcox_normplot
Examples
--------
>>> from scipy import stats
>>> import matplotlib.pyplot as plt
>>> np.random.seed(1234) # make this example reproducible
Generate some data and determine optimal ``lmbda`` in various ways:
>>> x = stats.loggamma.rvs(5, size=30) + 5
>>> y, lmax_mle = stats.boxcox(x)
>>> lmax_pearsonr = stats.boxcox_normmax(x)
>>> lmax_mle
7.177...
>>> lmax_pearsonr
7.916...
>>> stats.boxcox_normmax(x, method='all')
array([ 7.91667384, 7.17718692])
>>> fig = plt.figure()
>>> ax = fig.add_subplot(111)
>>> prob = stats.boxcox_normplot(x, -10, 10, plot=ax)
>>> ax.axvline(lmax_mle, color='r')
>>> ax.axvline(lmax_pearsonr, color='g', ls='--')
>>> plt.show()
"""
def _pearsonr(x, brack):
osm_uniform = _calc_uniform_order_statistic_medians(len(x))
xvals = distributions.norm.ppf(osm_uniform)
def _eval_pearsonr(lmbda, xvals, samps):
# This function computes the x-axis values of the probability plot
# and computes a linear regression (including the correlation) and
# returns ``1 - r`` so that a minimization function maximizes the
# correlation.
y = boxcox(samps, lmbda)
yvals = np.sort(y)
r, prob = stats.pearsonr(xvals, yvals)
return 1 - r
return optimize.brent(_eval_pearsonr, brack=brack, args=(xvals, x))
def _mle(x, brack):
def _eval_mle(lmb, data):
# function to minimize
return -boxcox_llf(lmb, data)
return optimize.brent(_eval_mle, brack=brack, args=(x,))
def _all(x, brack):
maxlog = np.zeros(2, dtype=float)
maxlog[0] = _pearsonr(x, brack)
maxlog[1] = _mle(x, brack)
return maxlog
methods = {'pearsonr': _pearsonr,
'mle': _mle,
'all': _all}
if method not in methods.keys():
raise ValueError("Method %s not recognized." % method)
optimfunc = methods[method]
return optimfunc(x, brack)
def boxcox_normplot(x, la, lb, plot=None, N=80):
"""Compute parameters for a Box-Cox normality plot, optionally show it.
A Box-Cox normality plot shows graphically what the best transformation
parameter is to use in `boxcox` to obtain a distribution that is close
to normal.
Parameters
----------
x : array_like
Input array.
la, lb : scalar
The lower and upper bounds for the ``lmbda`` values to pass to `boxcox`
for Box-Cox transformations. These are also the limits of the
horizontal axis of the plot if that is generated.
plot : object, optional
If given, plots the quantiles and least squares fit.
`plot` is an object that has to have methods "plot" and "text".
The `matplotlib.pyplot` module or a Matplotlib Axes object can be used,
or a custom object with the same methods.
Default is None, which means that no plot is created.
N : int, optional
Number of points on the horizontal axis (equally distributed from
`la` to `lb`).
Returns
-------
lmbdas : ndarray
The ``lmbda`` values for which a Box-Cox transform was done.
ppcc : ndarray
Probability Plot Correlelation Coefficient, as obtained from `probplot`
when fitting the Box-Cox transformed input `x` against a normal
distribution.
See Also
--------
probplot, boxcox, boxcox_normmax, boxcox_llf, ppcc_max
Notes
-----
Even if `plot` is given, the figure is not shown or saved by
`boxcox_normplot`; ``plt.show()`` or ``plt.savefig('figname.png')``
should be used after calling `probplot`.
Examples
--------
>>> from scipy import stats
>>> import matplotlib.pyplot as plt
Generate some non-normally distributed data, and create a Box-Cox plot:
>>> x = stats.loggamma.rvs(5, size=500) + 5
>>> fig = plt.figure()
>>> ax = fig.add_subplot(111)
>>> prob = stats.boxcox_normplot(x, -20, 20, plot=ax)
Determine and plot the optimal ``lmbda`` to transform ``x`` and plot it in
the same plot:
>>> _, maxlog = stats.boxcox(x)
>>> ax.axvline(maxlog, color='r')
>>> plt.show()
"""
x = np.asarray(x)
if x.size == 0:
return x
if lb <= la:
raise ValueError("`lb` has to be larger than `la`.")
lmbdas = np.linspace(la, lb, num=N)
ppcc = lmbdas * 0.0
for i, val in enumerate(lmbdas):
# Determine for each lmbda the correlation coefficient of transformed x
z = boxcox(x, lmbda=val)
_, r2 = probplot(z, dist='norm', fit=True)
ppcc[i] = r2[-1]
if plot is not None:
plot.plot(lmbdas, ppcc, 'x')
_add_axis_labels_title(plot, xlabel='$\lambda$',
ylabel='Prob Plot Corr. Coef.',
title='Box-Cox Normality Plot')
return lmbdas, ppcc
def shapiro(x, a=None, reta=False):
"""
Perform the Shapiro-Wilk test for normality.
The Shapiro-Wilk test tests the null hypothesis that the
data was drawn from a normal distribution.
Parameters
----------
x : array_like
Array of sample data.
a : array_like, optional
Array of internal parameters used in the calculation. If these
are not given, they will be computed internally. If x has length
n, then a must have length n/2.
reta : bool, optional
Whether or not to return the internally computed a values. The
default is False.
Returns
-------
W : float
The test statistic.
p-value : float
The p-value for the hypothesis test.
a : array_like, optional
If `reta` is True, then these are the internally computed "a"
values that may be passed into this function on future calls.
See Also
--------
anderson : The Anderson-Darling test for normality
kstest : The Kolmogorov-Smirnov test for goodness of fit.
Notes
-----
The algorithm used is described in [4]_ but censoring parameters as
described are not implemented. For N > 5000 the W test statistic is accurate
but the p-value may not be.
The chance of rejecting the null hypothesis when it is true is close to 5%
regardless of sample size.
References
----------
.. [1] http://www.itl.nist.gov/div898/handbook/prc/section2/prc213.htm
.. [2] Shapiro, S. S. & Wilk, M.B (1965). An analysis of variance test for
normality (complete samples), Biometrika, Vol. 52, pp. 591-611.
.. [3] Razali, N. M. & Wah, Y. B. (2011) Power comparisons of Shapiro-Wilk,
Kolmogorov-Smirnov, Lilliefors and Anderson-Darling tests, Journal of
Statistical Modeling and Analytics, Vol. 2, pp. 21-33.
.. [4] ALGORITHM AS R94 APPL. STATIST. (1995) VOL. 44, NO. 4.
Examples
--------
>>> from scipy import stats
>>> np.random.seed(12345678)
>>> x = stats.norm.rvs(loc=5, scale=3, size=100)
>>> stats.shapiro(x)
(0.9772805571556091, 0.08144091814756393)
"""
if a is not None or reta:
warnings.warn("input parameters 'a' and 'reta' are scheduled to be "
"removed in version 0.18.0", FutureWarning)
x = np.ravel(x)
N = len(x)
if N < 3:
raise ValueError("Data must be at least length 3.")
if a is None:
a = zeros(N, 'f')
init = 0
else:
if len(a) != N // 2:
raise ValueError("len(a) must equal len(x)/2")
init = 1
y = sort(x)
a, w, pw, ifault = statlib.swilk(y, a[:N//2], init)
if ifault not in [0, 2]:
warnings.warn("Input data for shapiro has range zero. The results "
"may not be accurate.")
if N > 5000:
warnings.warn("p-value may not be accurate for N > 5000.")
if reta:
return w, pw, a
else:
return w, pw
# Values from Stephens, M A, "EDF Statistics for Goodness of Fit and
# Some Comparisons", Journal of he American Statistical
# Association, Vol. 69, Issue 347, Sept. 1974, pp 730-737
_Avals_norm = array([0.576, 0.656, 0.787, 0.918, 1.092])
_Avals_expon = array([0.922, 1.078, 1.341, 1.606, 1.957])
# From Stephens, M A, "Goodness of Fit for the Extreme Value Distribution",
# Biometrika, Vol. 64, Issue 3, Dec. 1977, pp 583-588.
_Avals_gumbel = array([0.474, 0.637, 0.757, 0.877, 1.038])
# From Stephens, M A, "Tests of Fit for the Logistic Distribution Based
# on the Empirical Distribution Function.", Biometrika,
# Vol. 66, Issue 3, Dec. 1979, pp 591-595.
_Avals_logistic = array([0.426, 0.563, 0.660, 0.769, 0.906, 1.010])
AndersonResult = namedtuple('AndersonResult', ('statistic',
'critical_values',
'significance_level'))
def anderson(x, dist='norm'):
"""
Anderson-Darling test for data coming from a particular distribution
The Anderson-Darling test is a modification of the Kolmogorov-
Smirnov test `kstest` for the null hypothesis that a sample is
drawn from a population that follows a particular distribution.
For the Anderson-Darling test, the critical values depend on
which distribution is being tested against. This function works
for normal, exponential, logistic, or Gumbel (Extreme Value
Type I) distributions.
Parameters
----------
x : array_like
array of sample data
dist : {'norm','expon','logistic','gumbel','extreme1'}, optional
the type of distribution to test against. The default is 'norm'
and 'extreme1' is a synonym for 'gumbel'
Returns
-------
statistic : float
The Anderson-Darling test statistic
critical_values : list
The critical values for this distribution
significance_level : list
The significance levels for the corresponding critical values
in percents. The function returns critical values for a
differing set of significance levels depending on the
distribution that is being tested against.
Notes
-----
Critical values provided are for the following significance levels:
normal/exponenential
15%, 10%, 5%, 2.5%, 1%
logistic
25%, 10%, 5%, 2.5%, 1%, 0.5%
Gumbel
25%, 10%, 5%, 2.5%, 1%
If A2 is larger than these critical values then for the corresponding
significance level, the null hypothesis that the data come from the
chosen distribution can be rejected.
References
----------
.. [1] http://www.itl.nist.gov/div898/handbook/prc/section2/prc213.htm
.. [2] Stephens, M. A. (1974). EDF Statistics for Goodness of Fit and
Some Comparisons, Journal of the American Statistical Association,
Vol. 69, pp. 730-737.
.. [3] Stephens, M. A. (1976). Asymptotic Results for Goodness-of-Fit
Statistics with Unknown Parameters, Annals of Statistics, Vol. 4,
pp. 357-369.
.. [4] Stephens, M. A. (1977). Goodness of Fit for the Extreme Value
Distribution, Biometrika, Vol. 64, pp. 583-588.
.. [5] Stephens, M. A. (1977). Goodness of Fit with Special Reference
to Tests for Exponentiality , Technical Report No. 262,
Department of Statistics, Stanford University, Stanford, CA.
.. [6] Stephens, M. A. (1979). Tests of Fit for the Logistic Distribution
Based on the Empirical Distribution Function, Biometrika, Vol. 66,
pp. 591-595.
"""
if dist not in ['norm', 'expon', 'gumbel', 'extreme1', 'logistic']:
raise ValueError("Invalid distribution; dist must be 'norm', "
"'expon', 'gumbel', 'extreme1' or 'logistic'.")
y = sort(x)
xbar = np.mean(x, axis=0)
N = len(y)
if dist == 'norm':
s = np.std(x, ddof=1, axis=0)
w = (y - xbar) / s
z = distributions.norm.cdf(w)
sig = array([15, 10, 5, 2.5, 1])
critical = around(_Avals_norm / (1.0 + 4.0/N - 25.0/N/N), 3)
elif dist == 'expon':
w = y / xbar
z = distributions.expon.cdf(w)
sig = array([15, 10, 5, 2.5, 1])
critical = around(_Avals_expon / (1.0 + 0.6/N), 3)
elif dist == 'logistic':
def rootfunc(ab, xj, N):
a, b = ab
tmp = (xj - a) / b
tmp2 = exp(tmp)
val = [np.sum(1.0/(1+tmp2), axis=0) - 0.5*N,
np.sum(tmp*(1.0-tmp2)/(1+tmp2), axis=0) + N]
return array(val)
sol0 = array([xbar, np.std(x, ddof=1, axis=0)])
sol = optimize.fsolve(rootfunc, sol0, args=(x, N), xtol=1e-5)
w = (y - sol[0]) / sol[1]
z = distributions.logistic.cdf(w)
sig = array([25, 10, 5, 2.5, 1, 0.5])
critical = around(_Avals_logistic / (1.0 + 0.25/N), 3)
else: # (dist == 'gumbel') or (dist == 'extreme1'):
xbar, s = distributions.gumbel_l.fit(x)
w = (y - xbar) / s
z = distributions.gumbel_l.cdf(w)
sig = array([25, 10, 5, 2.5, 1])
critical = around(_Avals_gumbel / (1.0 + 0.2/sqrt(N)), 3)
i = arange(1, N + 1)
A2 = -N - np.sum((2*i - 1.0) / N * (log(z) + log(1 - z[::-1])), axis=0)
return AndersonResult(A2, critical, sig)
def _anderson_ksamp_midrank(samples, Z, Zstar, k, n, N):
"""
Compute A2akN equation 7 of Scholz and Stephens.
Parameters
----------
samples : sequence of 1-D array_like
Array of sample arrays.
Z : array_like
Sorted array of all observations.
Zstar : array_like
Sorted array of unique observations.
k : int
Number of samples.
n : array_like
Number of observations in each sample.
N : int
Total number of observations.
Returns
-------
A2aKN : float
The A2aKN statistics of Scholz and Stephens 1987.
"""
A2akN = 0.
Z_ssorted_left = Z.searchsorted(Zstar, 'left')
if N == Zstar.size:
lj = 1.
else:
lj = Z.searchsorted(Zstar, 'right') - Z_ssorted_left
Bj = Z_ssorted_left + lj / 2.
for i in arange(0, k):
s = np.sort(samples[i])
s_ssorted_right = s.searchsorted(Zstar, side='right')
Mij = s_ssorted_right.astype(float)
fij = s_ssorted_right - s.searchsorted(Zstar, 'left')
Mij -= fij / 2.
inner = lj / float(N) * (N*Mij - Bj*n[i])**2 / (Bj*(N - Bj) - N*lj/4.)
A2akN += inner.sum() / n[i]
A2akN *= (N - 1.) / N
return A2akN
def _anderson_ksamp_right(samples, Z, Zstar, k, n, N):
"""
Compute A2akN equation 6 of Scholz & Stephens.
Parameters
----------
samples : sequence of 1-D array_like
Array of sample arrays.
Z : array_like
Sorted array of all observations.
Zstar : array_like
Sorted array of unique observations.
k : int
Number of samples.
n : array_like
Number of observations in each sample.
N : int
Total number of observations.
Returns
-------
A2KN : float
The A2KN statistics of Scholz and Stephens 1987.
"""
A2kN = 0.
lj = Z.searchsorted(Zstar[:-1], 'right') - Z.searchsorted(Zstar[:-1],
'left')
Bj = lj.cumsum()
for i in arange(0, k):
s = np.sort(samples[i])
Mij = s.searchsorted(Zstar[:-1], side='right')
inner = lj / float(N) * (N * Mij - Bj * n[i])**2 / (Bj * (N - Bj))
A2kN += inner.sum() / n[i]
return A2kN
Anderson_ksampResult = namedtuple('Anderson_ksampResult',
('statistic', 'critical_values',
'significance_level'))
def anderson_ksamp(samples, midrank=True):
"""The Anderson-Darling test for k-samples.
The k-sample Anderson-Darling test is a modification of the
one-sample Anderson-Darling test. It tests the null hypothesis
that k-samples are drawn from the same population without having
to specify the distribution function of that population. The
critical values depend on the number of samples.
Parameters
----------
samples : sequence of 1-D array_like
Array of sample data in arrays.
midrank : bool, optional
Type of Anderson-Darling test which is computed. Default
(True) is the midrank test applicable to continuous and
discrete populations. If False, the right side empirical
distribution is used.
Returns
-------
statistic : float
Normalized k-sample Anderson-Darling test statistic.
critical_values : array
The critical values for significance levels 25%, 10%, 5%, 2.5%, 1%.
significance_level : float
An approximate significance level at which the null hypothesis for the
provided samples can be rejected.
Raises
------
ValueError
If less than 2 samples are provided, a sample is empty, or no
distinct observations are in the samples.
See Also
--------
ks_2samp : 2 sample Kolmogorov-Smirnov test
anderson : 1 sample Anderson-Darling test
Notes
-----
[1]_ Defines three versions of the k-sample Anderson-Darling test:
one for continuous distributions and two for discrete
distributions, in which ties between samples may occur. The
default of this routine is to compute the version based on the
midrank empirical distribution function. This test is applicable
to continuous and discrete data. If midrank is set to False, the
right side empirical distribution is used for a test for discrete
data. According to [1]_, the two discrete test statistics differ
only slightly if a few collisions due to round-off errors occur in
the test not adjusted for ties between samples.
.. versionadded:: 0.14.0
References
----------
.. [1] Scholz, F. W and Stephens, M. A. (1987), K-Sample
Anderson-Darling Tests, Journal of the American Statistical
Association, Vol. 82, pp. 918-924.
Examples
--------
>>> from scipy import stats
>>> np.random.seed(314159)
The null hypothesis that the two random samples come from the same
distribution can be rejected at the 5% level because the returned
test value is greater than the critical value for 5% (1.961) but
not at the 2.5% level. The interpolation gives an approximate
significance level of 3.1%:
>>> stats.anderson_ksamp([np.random.normal(size=50),
... np.random.normal(loc=0.5, size=30)])
(2.4615796189876105,
array([ 0.325, 1.226, 1.961, 2.718, 3.752]),
0.03134990135800783)
The null hypothesis cannot be rejected for three samples from an
identical distribution. The approximate p-value (87%) has to be
computed by extrapolation and may not be very accurate:
>>> stats.anderson_ksamp([np.random.normal(size=50),
... np.random.normal(size=30), np.random.normal(size=20)])
(-0.73091722665244196,
array([ 0.44925884, 1.3052767 , 1.9434184 , 2.57696569, 3.41634856]),
0.8789283903979661)
"""
k = len(samples)
if (k < 2):
raise ValueError("anderson_ksamp needs at least two samples")
samples = list(map(np.asarray, samples))
Z = np.sort(np.hstack(samples))
N = Z.size
Zstar = np.unique(Z)
if Zstar.size < 2:
raise ValueError("anderson_ksamp needs more than one distinct "
"observation")
n = np.array([sample.size for sample in samples])
if any(n == 0):
raise ValueError("anderson_ksamp encountered sample without "
"observations")
if midrank:
A2kN = _anderson_ksamp_midrank(samples, Z, Zstar, k, n, N)
else:
A2kN = _anderson_ksamp_right(samples, Z, Zstar, k, n, N)
H = (1. / n).sum()
hs_cs = (1. / arange(N - 1, 1, -1)).cumsum()
h = hs_cs[-1] + 1
g = (hs_cs / arange(2, N)).sum()
a = (4*g - 6) * (k - 1) + (10 - 6*g)*H
b = (2*g - 4)*k**2 + 8*h*k + (2*g - 14*h - 4)*H - 8*h + 4*g - 6
c = (6*h + 2*g - 2)*k**2 + (4*h - 4*g + 6)*k + (2*h - 6)*H + 4*h
d = (2*h + 6)*k**2 - 4*h*k
sigmasq = (a*N**3 + b*N**2 + c*N + d) / ((N - 1.) * (N - 2.) * (N - 3.))
m = k - 1
A2 = (A2kN - m) / math.sqrt(sigmasq)
# The b_i values are the interpolation coefficients from Table 2
# of Scholz and Stephens 1987
b0 = np.array([0.675, 1.281, 1.645, 1.96, 2.326])
b1 = np.array([-0.245, 0.25, 0.678, 1.149, 1.822])
b2 = np.array([-0.105, -0.305, -0.362, -0.391, -0.396])
critical = b0 + b1 / math.sqrt(m) + b2 / m
pf = np.polyfit(critical, log(np.array([0.25, 0.1, 0.05, 0.025, 0.01])), 2)
if A2 < critical.min() or A2 > critical.max():
warnings.warn("approximate p-value will be computed by extrapolation")
p = math.exp(np.polyval(pf, A2))
return Anderson_ksampResult(A2, critical, p)
AnsariResult = namedtuple('AnsariResult', ('statistic', 'pvalue'))
def ansari(x, y):
"""
Perform the Ansari-Bradley test for equal scale parameters
The Ansari-Bradley test is a non-parametric test for the equality
of the scale parameter of the distributions from which two
samples were drawn.
Parameters
----------
x, y : array_like
arrays of sample data
Returns
-------
statistic : float
The Ansari-Bradley test statistic
pvalue : float
The p-value of the hypothesis test
See Also
--------
fligner : A non-parametric test for the equality of k variances
mood : A non-parametric test for the equality of two scale parameters
Notes
-----
The p-value given is exact when the sample sizes are both less than
55 and there are no ties, otherwise a normal approximation for the
p-value is used.
References
----------
.. [1] Sprent, Peter and N.C. Smeeton. Applied nonparametric statistical
methods. 3rd ed. Chapman and Hall/CRC. 2001. Section 5.8.2.
"""
x, y = asarray(x), asarray(y)
n = len(x)
m = len(y)
if m < 1:
raise ValueError("Not enough other observations.")
if n < 1:
raise ValueError("Not enough test observations.")
N = m + n
xy = r_[x, y] # combine
rank = stats.rankdata(xy)
symrank = amin(array((rank, N - rank + 1)), 0)
AB = np.sum(symrank[:n], axis=0)
uxy = unique(xy)
repeats = (len(uxy) != len(xy))
exact = ((m < 55) and (n < 55) and not repeats)
if repeats and (m < 55 or n < 55):
warnings.warn("Ties preclude use of exact statistic.")
if exact:
astart, a1, ifault = statlib.gscale(n, m)
ind = AB - astart
total = np.sum(a1, axis=0)
if ind < len(a1)/2.0:
cind = int(ceil(ind))
if ind == cind:
pval = 2.0 * np.sum(a1[:cind+1], axis=0) / total
else:
pval = 2.0 * np.sum(a1[:cind], axis=0) / total
else:
find = int(floor(ind))
if ind == floor(ind):
pval = 2.0 * np.sum(a1[find:], axis=0) / total
else:
pval = 2.0 * np.sum(a1[find+1:], axis=0) / total
return AnsariResult(AB, min(1.0, pval))
# otherwise compute normal approximation
if N % 2: # N odd
mnAB = n * (N+1.0)**2 / 4.0 / N
varAB = n * m * (N+1.0) * (3+N**2) / (48.0 * N**2)
else:
mnAB = n * (N+2.0) / 4.0
varAB = m * n * (N+2) * (N-2.0) / 48 / (N-1.0)
if repeats: # adjust variance estimates
# compute np.sum(tj * rj**2,axis=0)
fac = np.sum(symrank**2, axis=0)
if N % 2: # N odd
varAB = m * n * (16*N*fac - (N+1)**4) / (16.0 * N**2 * (N-1))
else: # N even
varAB = m * n * (16*fac - N*(N+2)**2) / (16.0 * N * (N-1))
z = (AB - mnAB) / sqrt(varAB)
pval = distributions.norm.sf(abs(z)) * 2.0
return AnsariResult(AB, pval)
BartlettResult = namedtuple('BartlettResult', ('statistic', 'pvalue'))
def bartlett(*args):
"""
Perform Bartlett's test for equal variances
Bartlett's test tests the null hypothesis that all input samples
are from populations with equal variances. For samples
from significantly non-normal populations, Levene's test
`levene` is more robust.
Parameters
----------
sample1, sample2,... : array_like
arrays of sample data. May be different lengths.
Returns
-------
statistic : float
The test statistic.
pvalue : float
The p-value of the test.
See Also
--------
fligner : A non-parametric test for the equality of k variances
levene : A robust parametric test for equality of k variances
Notes
-----
Conover et al. (1981) examine many of the existing parametric and
nonparametric tests by extensive simulations and they conclude that the
tests proposed by Fligner and Killeen (1976) and Levene (1960) appear to be
superior in terms of robustness of departures from normality and power [3]_.
References
----------
.. [1] http://www.itl.nist.gov/div898/handbook/eda/section3/eda357.htm
.. [2] Snedecor, George W. and Cochran, William G. (1989), Statistical
Methods, Eighth Edition, Iowa State University Press.
.. [3] Park, C. and Lindsay, B. G. (1999). Robust Scale Estimation and
Hypothesis Testing based on Quadratic Inference Function. Technical
Report #99-03, Center for Likelihood Studies, Pennsylvania State
University.
.. [4] Bartlett, M. S. (1937). Properties of Sufficiency and Statistical
Tests. Proceedings of the Royal Society of London. Series A,
Mathematical and Physical Sciences, Vol. 160, No.901, pp. 268-282.
"""
# Handle empty input
for a in args:
if np.asanyarray(a).size == 0:
return BartlettResult(np.nan, np.nan)
k = len(args)
if k < 2:
raise ValueError("Must enter at least two input sample vectors.")
Ni = zeros(k)
ssq = zeros(k, 'd')
for j in range(k):
Ni[j] = len(args[j])
ssq[j] = np.var(args[j], ddof=1)
Ntot = np.sum(Ni, axis=0)
spsq = np.sum((Ni - 1)*ssq, axis=0) / (1.0*(Ntot - k))
numer = (Ntot*1.0 - k) * log(spsq) - np.sum((Ni - 1.0)*log(ssq), axis=0)
denom = 1.0 + 1.0/(3*(k - 1)) * ((np.sum(1.0/(Ni - 1.0), axis=0)) -
1.0/(Ntot - k))
T = numer / denom
pval = distributions.chi2.sf(T, k - 1) # 1 - cdf
return BartlettResult(T, pval)
LeveneResult = namedtuple('LeveneResult', ('statistic', 'pvalue'))
def levene(*args, **kwds):
"""
Perform Levene test for equal variances.
The Levene test tests the null hypothesis that all input samples
are from populations with equal variances. Levene's test is an
alternative to Bartlett's test `bartlett` in the case where
there are significant deviations from normality.
Parameters
----------
sample1, sample2, ... : array_like
The sample data, possibly with different lengths
center : {'mean', 'median', 'trimmed'}, optional
Which function of the data to use in the test. The default
is 'median'.
proportiontocut : float, optional
When `center` is 'trimmed', this gives the proportion of data points
to cut from each end. (See `scipy.stats.trim_mean`.)
Default is 0.05.
Returns
-------
statistic : float
The test statistic.
pvalue : float
The p-value for the test.
Notes
-----
Three variations of Levene's test are possible. The possibilities
and their recommended usages are:
* 'median' : Recommended for skewed (non-normal) distributions>
* 'mean' : Recommended for symmetric, moderate-tailed distributions.
* 'trimmed' : Recommended for heavy-tailed distributions.
References
----------
.. [1] http://www.itl.nist.gov/div898/handbook/eda/section3/eda35a.htm
.. [2] Levene, H. (1960). In Contributions to Probability and Statistics:
Essays in Honor of Harold Hotelling, I. Olkin et al. eds.,
Stanford University Press, pp. 278-292.
.. [3] Brown, M. B. and Forsythe, A. B. (1974), Journal of the American
Statistical Association, 69, 364-367
"""
# Handle keyword arguments.
center = 'median'
proportiontocut = 0.05
for kw, value in kwds.items():
if kw not in ['center', 'proportiontocut']:
raise TypeError("levene() got an unexpected keyword "
"argument '%s'" % kw)
if kw == 'center':
center = value
else:
proportiontocut = value
k = len(args)
if k < 2:
raise ValueError("Must enter at least two input sample vectors.")
Ni = zeros(k)
Yci = zeros(k, 'd')
if center not in ['mean', 'median', 'trimmed']:
raise ValueError("Keyword argument <center> must be 'mean', 'median'"
+ "or 'trimmed'.")
if center == 'median':
func = lambda x: np.median(x, axis=0)
elif center == 'mean':
func = lambda x: np.mean(x, axis=0)
else: # center == 'trimmed'
args = tuple(stats.trimboth(np.sort(arg), proportiontocut)
for arg in args)
func = lambda x: np.mean(x, axis=0)
for j in range(k):
Ni[j] = len(args[j])
Yci[j] = func(args[j])
Ntot = np.sum(Ni, axis=0)
# compute Zij's
Zij = [None] * k
for i in range(k):
Zij[i] = abs(asarray(args[i]) - Yci[i])
# compute Zbari
Zbari = zeros(k, 'd')
Zbar = 0.0
for i in range(k):
Zbari[i] = np.mean(Zij[i], axis=0)
Zbar += Zbari[i] * Ni[i]
Zbar /= Ntot
numer = (Ntot - k) * np.sum(Ni * (Zbari - Zbar)**2, axis=0)
# compute denom_variance
dvar = 0.0
for i in range(k):
dvar += np.sum((Zij[i] - Zbari[i])**2, axis=0)
denom = (k - 1.0) * dvar
W = numer / denom
pval = distributions.f.sf(W, k-1, Ntot-k) # 1 - cdf
return LeveneResult(W, pval)
@setastest(False)
def binom_test(x, n=None, p=0.5, alternative='two-sided'):
"""
Perform a test that the probability of success is p.
This is an exact, two-sided test of the null hypothesis
that the probability of success in a Bernoulli experiment
is `p`.
Parameters
----------
x : integer or array_like
the number of successes, or if x has length 2, it is the
number of successes and the number of failures.
n : integer
the number of trials. This is ignored if x gives both the
number of successes and failures
p : float, optional
The hypothesized probability of success. 0 <= p <= 1. The
default value is p = 0.5
Returns
-------
p-value : float
The p-value of the hypothesis test
References
----------
.. [1] http://en.wikipedia.org/wiki/Binomial_test
"""
x = atleast_1d(x).astype(np.integer)
if len(x) == 2:
n = x[1] + x[0]
x = x[0]
elif len(x) == 1:
x = x[0]
if n is None or n < x:
raise ValueError("n must be >= x")
n = np.int_(n)
else:
raise ValueError("Incorrect length for x.")
if (p > 1.0) or (p < 0.0):
raise ValueError("p must be in range [0,1]")
if alternative not in ('two-sided', 'less', 'greater'):
raise ValueError("alternative not recognized\n"
"should be 'two-sided', 'less' or 'greater'")
if alternative == 'less':
pval = distributions.binom.cdf(x, n, p)
return pval
if alternative == 'greater':
pval = distributions.binom.sf(x-1, n, p)
return pval
# if alternative was neither 'less' nor 'greater', then it's 'two-sided'
d = distributions.binom.pmf(x, n, p)
rerr = 1 + 1e-7
if x == p * n:
# special case as shortcut, would also be handled by `else` below
pval = 1.
elif x < p * n:
i = np.arange(np.ceil(p * n), n+1)
y = np.sum(distributions.binom.pmf(i, n, p) <= d*rerr, axis=0)
pval = (distributions.binom.cdf(x, n, p) +
distributions.binom.sf(n - y, n, p))
else:
i = np.arange(np.floor(p*n) + 1)
y = np.sum(distributions.binom.pmf(i, n, p) <= d*rerr, axis=0)
pval = (distributions.binom.cdf(y-1, n, p) +
distributions.binom.sf(x-1, n, p))
return min(1.0, pval)
def _apply_func(x, g, func):
# g is list of indices into x
# separating x into different groups
# func should be applied over the groups
g = unique(r_[0, g, len(x)])
output = []
for k in range(len(g) - 1):
output.append(func(x[g[k]:g[k+1]]))
return asarray(output)
FlignerResult = namedtuple('FlignerResult', ('statistic', 'pvalue'))
def fligner(*args, **kwds):
"""
Perform Fligner-Killeen test for equality of variance.
Fligner's test tests the null hypothesis that all input samples
are from populations with equal variances. Fligner-Killeen's test is
distribution free when populations are identical [2]_.
Parameters
----------
sample1, sample2, ... : array_like
Arrays of sample data. Need not be the same length.
center : {'mean', 'median', 'trimmed'}, optional
Keyword argument controlling which function of the data is used in
computing the test statistic. The default is 'median'.
proportiontocut : float, optional
When `center` is 'trimmed', this gives the proportion of data points
to cut from each end. (See `scipy.stats.trim_mean`.)
Default is 0.05.
Returns
-------
statistic : float
The test statistic.
pvalue : float
The p-value for the hypothesis test.
See Also
--------
bartlett : A parametric test for equality of k variances in normal samples
levene : A robust parametric test for equality of k variances
Notes
-----
As with Levene's test there are three variants of Fligner's test that
differ by the measure of central tendency used in the test. See `levene`
for more information.
Conover et al. (1981) examine many of the existing parametric and
nonparametric tests by extensive simulations and they conclude that the
tests proposed by Fligner and Killeen (1976) and Levene (1960) appear to be
superior in terms of robustness of departures from normality and power [3]_.
References
----------
.. [1] http://www.stat.psu.edu/~bgl/center/tr/TR993.ps
.. [2] Fligner, M.A. and Killeen, T.J. (1976). Distribution-free two-sample
tests for scale. 'Journal of the American Statistical Association.'
71(353), 210-213.
.. [3] Park, C. and Lindsay, B. G. (1999). Robust Scale Estimation and
Hypothesis Testing based on Quadratic Inference Function. Technical
Report #99-03, Center for Likelihood Studies, Pennsylvania State
University.
.. [4] Conover, W. J., Johnson, M. E. and Johnson M. M. (1981). A
comparative study of tests for homogeneity of variances, with
applications to the outer continental shelf biding data.
Technometrics, 23(4), 351-361.
"""
# Handle empty input
for a in args:
if np.asanyarray(a).size == 0:
return FlignerResult(np.nan, np.nan)
# Handle keyword arguments.
center = 'median'
proportiontocut = 0.05
for kw, value in kwds.items():
if kw not in ['center', 'proportiontocut']:
raise TypeError("fligner() got an unexpected keyword "
"argument '%s'" % kw)
if kw == 'center':
center = value
else:
proportiontocut = value
k = len(args)
if k < 2:
raise ValueError("Must enter at least two input sample vectors.")
if center not in ['mean', 'median', 'trimmed']:
raise ValueError("Keyword argument <center> must be 'mean', 'median'"
+ "or 'trimmed'.")
if center == 'median':
func = lambda x: np.median(x, axis=0)
elif center == 'mean':
func = lambda x: np.mean(x, axis=0)
else: # center == 'trimmed'
args = tuple(stats.trimboth(arg, proportiontocut) for arg in args)
func = lambda x: np.mean(x, axis=0)
Ni = asarray([len(args[j]) for j in range(k)])
Yci = asarray([func(args[j]) for j in range(k)])
Ntot = np.sum(Ni, axis=0)
# compute Zij's
Zij = [abs(asarray(args[i]) - Yci[i]) for i in range(k)]
allZij = []
g = [0]
for i in range(k):
allZij.extend(list(Zij[i]))
g.append(len(allZij))
ranks = stats.rankdata(allZij)
a = distributions.norm.ppf(ranks / (2*(Ntot + 1.0)) + 0.5)
# compute Aibar
Aibar = _apply_func(a, g, np.sum) / Ni
anbar = np.mean(a, axis=0)
varsq = np.var(a, axis=0, ddof=1)
Xsq = np.sum(Ni * (asarray(Aibar) - anbar)**2.0, axis=0) / varsq
pval = distributions.chi2.sf(Xsq, k - 1) # 1 - cdf
return FlignerResult(Xsq, pval)
def mood(x, y, axis=0):
"""
Perform Mood's test for equal scale parameters.
Mood's two-sample test for scale parameters is a non-parametric
test for the null hypothesis that two samples are drawn from the
same distribution with the same scale parameter.
Parameters
----------
x, y : array_like
Arrays of sample data.
axis : int, optional
The axis along which the samples are tested. `x` and `y` can be of
different length along `axis`.
If `axis` is None, `x` and `y` are flattened and the test is done on
all values in the flattened arrays.
Returns
-------
z : scalar or ndarray
The z-score for the hypothesis test. For 1-D inputs a scalar is
returned.
p-value : scalar ndarray
The p-value for the hypothesis test.
See Also
--------
fligner : A non-parametric test for the equality of k variances
ansari : A non-parametric test for the equality of 2 variances
bartlett : A parametric test for equality of k variances in normal samples
levene : A parametric test for equality of k variances
Notes
-----
The data are assumed to be drawn from probability distributions ``f(x)``
and ``f(x/s) / s`` respectively, for some probability density function f.
The null hypothesis is that ``s == 1``.
For multi-dimensional arrays, if the inputs are of shapes
``(n0, n1, n2, n3)`` and ``(n0, m1, n2, n3)``, then if ``axis=1``, the
resulting z and p values will have shape ``(n0, n2, n3)``. Note that
``n1`` and ``m1`` don't have to be equal, but the other dimensions do.
Examples
--------
>>> from scipy import stats
>>> np.random.seed(1234)
>>> x2 = np.random.randn(2, 45, 6, 7)
>>> x1 = np.random.randn(2, 30, 6, 7)
>>> z, p = stats.mood(x1, x2, axis=1)
>>> p.shape
(2, 6, 7)
Find the number of points where the difference in scale is not significant:
>>> (p > 0.1).sum()
74
Perform the test with different scales:
>>> x1 = np.random.randn(2, 30)
>>> x2 = np.random.randn(2, 35) * 10.0
>>> stats.mood(x1, x2, axis=1)
(array([-5.7178125 , -5.25342163]), array([ 1.07904114e-08, 1.49299218e-07]))
"""
x = np.asarray(x, dtype=float)
y = np.asarray(y, dtype=float)
if axis is None:
x = x.flatten()
y = y.flatten()
axis = 0
# Determine shape of the result arrays
res_shape = tuple([x.shape[ax] for ax in range(len(x.shape)) if ax != axis])
if not (res_shape == tuple([y.shape[ax] for ax in range(len(y.shape)) if
ax != axis])):
raise ValueError("Dimensions of x and y on all axes except `axis` "
"should match")
n = x.shape[axis]
m = y.shape[axis]
N = m + n
if N < 3:
raise ValueError("Not enough observations.")
xy = np.concatenate((x, y), axis=axis)
if axis != 0:
xy = np.rollaxis(xy, axis)
xy = xy.reshape(xy.shape[0], -1)
# Generalized to the n-dimensional case by adding the axis argument, and
# using for loops, since rankdata is not vectorized. For improving
# performance consider vectorizing rankdata function.
all_ranks = np.zeros_like(xy)
for j in range(xy.shape[1]):
all_ranks[:, j] = stats.rankdata(xy[:, j])
Ri = all_ranks[:n]
M = np.sum((Ri - (N + 1.0) / 2)**2, axis=0)
# Approx stat.
mnM = n * (N * N - 1.0) / 12
varM = m * n * (N + 1.0) * (N + 2) * (N - 2) / 180
z = (M - mnM) / sqrt(varM)
# sf for right tail, cdf for left tail. Factor 2 for two-sidedness
z_pos = z > 0
pval = np.zeros_like(z)
pval[z_pos] = 2 * distributions.norm.sf(z[z_pos])
pval[~z_pos] = 2 * distributions.norm.cdf(z[~z_pos])
if res_shape == ():
# Return scalars, not 0-D arrays
z = z[0]
pval = pval[0]
else:
z.shape = res_shape
pval.shape = res_shape
return z, pval
WilcoxonResult = namedtuple('WilcoxonResult', ('statistic', 'pvalue'))
def wilcoxon(x, y=None, zero_method="wilcox", correction=False):
"""
Calculate the Wilcoxon signed-rank test.
The Wilcoxon signed-rank test tests the null hypothesis that two
related paired samples come from the same distribution. In particular,
it tests whether the distribution of the differences x - y is symmetric
about zero. It is a non-parametric version of the paired T-test.
Parameters
----------
x : array_like
The first set of measurements.
y : array_like, optional
The second set of measurements. If `y` is not given, then the `x`
array is considered to be the differences between the two sets of
measurements.
zero_method : string, {"pratt", "wilcox", "zsplit"}, optional
"pratt":
Pratt treatment: includes zero-differences in the ranking process
(more conservative)
"wilcox":
Wilcox treatment: discards all zero-differences
"zsplit":
Zero rank split: just like Pratt, but spliting the zero rank
between positive and negative ones
correction : bool, optional
If True, apply continuity correction by adjusting the Wilcoxon rank
statistic by 0.5 towards the mean value when computing the
z-statistic. Default is False.
Returns
-------
statistic : float
The sum of the ranks of the differences above or below zero, whichever
is smaller.
pvalue : float
The two-sided p-value for the test.
Notes
-----
Because the normal approximation is used for the calculations, the
samples used should be large. A typical rule is to require that
n > 20.
References
----------
.. [1] http://en.wikipedia.org/wiki/Wilcoxon_signed-rank_test
"""
if zero_method not in ["wilcox", "pratt", "zsplit"]:
raise ValueError("Zero method should be either 'wilcox' "
"or 'pratt' or 'zsplit'")
if y is None:
d = x
else:
x, y = map(asarray, (x, y))
if len(x) != len(y):
raise ValueError('Unequal N in wilcoxon. Aborting.')
d = x - y
if zero_method == "wilcox":
# Keep all non-zero differences
d = compress(np.not_equal(d, 0), d, axis=-1)
count = len(d)
if count < 10:
warnings.warn("Warning: sample size too small for normal approximation.")
r = stats.rankdata(abs(d))
r_plus = np.sum((d > 0) * r, axis=0)
r_minus = np.sum((d < 0) * r, axis=0)
if zero_method == "zsplit":
r_zero = np.sum((d == 0) * r, axis=0)
r_plus += r_zero / 2.
r_minus += r_zero / 2.
T = min(r_plus, r_minus)
mn = count * (count + 1.) * 0.25
se = count * (count + 1.) * (2. * count + 1.)
if zero_method == "pratt":
r = r[d != 0]
replist, repnum = find_repeats(r)
if repnum.size != 0:
# Correction for repeated elements.
se -= 0.5 * (repnum * (repnum * repnum - 1)).sum()
se = sqrt(se / 24)
correction = 0.5 * int(bool(correction)) * np.sign(T - mn)
z = (T - mn - correction) / se
prob = 2. * distributions.norm.sf(abs(z))
return WilcoxonResult(T, prob)
@setastest(False)
def median_test(*args, **kwds):
"""
Mood's median test.
Test that two or more samples come from populations with the same median.
Let ``n = len(args)`` be the number of samples. The "grand median" of
all the data is computed, and a contingency table is formed by
classifying the values in each sample as being above or below the grand
median. The contingency table, along with `correction` and `lambda_`,
are passed to `scipy.stats.chi2_contingency` to compute the test statistic
and p-value.
Parameters
----------
sample1, sample2, ... : array_like
The set of samples. There must be at least two samples.
Each sample must be a one-dimensional sequence containing at least
one value. The samples are not required to have the same length.
ties : str, optional
Determines how values equal to the grand median are classified in
the contingency table. The string must be one of::
"below":
Values equal to the grand median are counted as "below".
"above":
Values equal to the grand median are counted as "above".
"ignore":
Values equal to the grand median are not counted.
The default is "below".
correction : bool, optional
If True, *and* there are just two samples, apply Yates' correction
for continuity when computing the test statistic associated with
the contingency table. Default is True.
lambda_ : float or str, optional.
By default, the statistic computed in this test is Pearson's
chi-squared statistic. `lambda_` allows a statistic from the
Cressie-Read power divergence family to be used instead. See
`power_divergence` for details.
Default is 1 (Pearson's chi-squared statistic).
Returns
-------
stat : float
The test statistic. The statistic that is returned is determined by
`lambda_`. The default is Pearson's chi-squared statistic.
p : float
The p-value of the test.
m : float
The grand median.
table : ndarray
The contingency table. The shape of the table is (2, n), where
n is the number of samples. The first row holds the counts of the
values above the grand median, and the second row holds the counts
of the values below the grand median. The table allows further
analysis with, for example, `scipy.stats.chi2_contingency`, or with
`scipy.stats.fisher_exact` if there are two samples, without having
to recompute the table.
See Also
--------
kruskal : Compute the Kruskal-Wallis H-test for independent samples.
mannwhitneyu : Computes the Mann-Whitney rank test on samples x and y.
Notes
-----
.. versionadded:: 0.15.0
References
----------
.. [1] Mood, A. M., Introduction to the Theory of Statistics. McGraw-Hill
(1950), pp. 394-399.
.. [2] Zar, J. H., Biostatistical Analysis, 5th ed. Prentice Hall (2010).
See Sections 8.12 and 10.15.
Examples
--------
A biologist runs an experiment in which there are three groups of plants.
Group 1 has 16 plants, group 2 has 15 plants, and group 3 has 17 plants.
Each plant produces a number of seeds. The seed counts for each group
are::
Group 1: 10 14 14 18 20 22 24 25 31 31 32 39 43 43 48 49
Group 2: 28 30 31 33 34 35 36 40 44 55 57 61 91 92 99
Group 3: 0 3 9 22 23 25 25 33 34 34 40 45 46 48 62 67 84
The following code applies Mood's median test to these samples.
>>> g1 = [10, 14, 14, 18, 20, 22, 24, 25, 31, 31, 32, 39, 43, 43, 48, 49]
>>> g2 = [28, 30, 31, 33, 34, 35, 36, 40, 44, 55, 57, 61, 91, 92, 99]
>>> g3 = [0, 3, 9, 22, 23, 25, 25, 33, 34, 34, 40, 45, 46, 48, 62, 67, 84]
>>> from scipy.stats import median_test
>>> stat, p, med, tbl = median_test(g1, g2, g3)
The median is
>>> med
34.0
and the contingency table is
>>> tbl
array([[ 5, 10, 7],
[11, 5, 10]])
`p` is too large to conclude that the medians are not the same:
>>> p
0.12609082774093244
The "G-test" can be performed by passing ``lambda_="log-likelihood"`` to
`median_test`.
>>> g, p, med, tbl = median_test(g1, g2, g3, lambda_="log-likelihood")
>>> p
0.12224779737117837
The median occurs several times in the data, so we'll get a different
result if, for example, ``ties="above"`` is used:
>>> stat, p, med, tbl = median_test(g1, g2, g3, ties="above")
>>> p
0.063873276069553273
>>> tbl
array([[ 5, 11, 9],
[11, 4, 8]])
This example demonstrates that if the data set is not large and there
are values equal to the median, the p-value can be sensitive to the
choice of `ties`.
"""
ties = kwds.pop('ties', 'below')
correction = kwds.pop('correction', True)
lambda_ = kwds.pop('lambda_', None)
if len(kwds) > 0:
bad_kwd = kwds.keys()[0]
raise TypeError("median_test() got an unexpected keyword "
"argument %r" % bad_kwd)
if len(args) < 2:
raise ValueError('median_test requires two or more samples.')
ties_options = ['below', 'above', 'ignore']
if ties not in ties_options:
raise ValueError("invalid 'ties' option '%s'; 'ties' must be one "
"of: %s" % (ties, str(ties_options)[1:-1]))
data = [np.asarray(arg) for arg in args]
# Validate the sizes and shapes of the arguments.
for k, d in enumerate(data):
if d.size == 0:
raise ValueError("Sample %d is empty. All samples must "
"contain at least one value." % (k + 1))
if d.ndim != 1:
raise ValueError("Sample %d has %d dimensions. All "
"samples must be one-dimensional sequences." %
(k + 1, d.ndim))
grand_median = np.median(np.concatenate(data))
# Create the contingency table.
table = np.zeros((2, len(data)), dtype=np.int64)
for k, sample in enumerate(data):
nabove = count_nonzero(sample > grand_median)
nbelow = count_nonzero(sample < grand_median)
nequal = sample.size - (nabove + nbelow)
table[0, k] += nabove
table[1, k] += nbelow
if ties == "below":
table[1, k] += nequal
elif ties == "above":
table[0, k] += nequal
# Check that no row or column of the table is all zero.
# Such a table can not be given to chi2_contingency, because it would have
# a zero in the table of expected frequencies.
rowsums = table.sum(axis=1)
if rowsums[0] == 0:
raise ValueError("All values are below the grand median (%r)." %
grand_median)
if rowsums[1] == 0:
raise ValueError("All values are above the grand median (%r)." %
grand_median)
if ties == "ignore":
# We already checked that each sample has at least one value, but it
# is possible that all those values equal the grand median. If `ties`
# is "ignore", that would result in a column of zeros in `table`. We
# check for that case here.
zero_cols = np.where((table == 0).all(axis=0))[0]
if len(zero_cols) > 0:
msg = ("All values in sample %d are equal to the grand "
"median (%r), so they are ignored, resulting in an "
"empty sample." % (zero_cols[0] + 1, grand_median))
raise ValueError(msg)
stat, p, dof, expected = chi2_contingency(table, lambda_=lambda_,
correction=correction)
return stat, p, grand_median, table
def _hermnorm(N):
# return the negatively normalized hermite polynomials up to order N-1
# (inclusive)
# using the recursive relationship
# p_n+1 = p_n(x)' - x*p_n(x)
# and p_0(x) = 1
plist = [None] * N
plist[0] = poly1d(1)
for n in range(1, N):
plist[n] = plist[n-1].deriv() - poly1d([1, 0]) * plist[n-1]
return plist
# Note: when removing pdf_fromgamma, also remove the _hermnorm support function
@np.deprecate(message="scipy.stats.pdf_fromgamma is deprecated in scipy 0.16.0 "
"in favour of statsmodels.distributions.ExpandedNormal.")
def pdf_fromgamma(g1, g2, g3=0.0, g4=None):
if g4 is None:
g4 = 3 * g2**2
sigsq = 1.0 / g2
sig = sqrt(sigsq)
mu = g1 * sig**3.0
p12 = _hermnorm(13)
for k in range(13):
p12[k] /= sig**k
# Add all of the terms to polynomial
totp = (p12[0] - g1/6.0*p12[3] +
g2/24.0*p12[4] + g1**2/72.0 * p12[6] -
g3/120.0*p12[5] - g1*g2/144.0*p12[7] - g1**3.0/1296.0*p12[9] +
g4/720*p12[6] + (g2**2/1152.0 + g1*g3/720)*p12[8] +
g1**2 * g2/1728.0*p12[10] + g1**4.0 / 31104.0*p12[12])
# Final normalization
totp = totp / sqrt(2*pi) / sig
def thefunc(x):
xn = (x - mu) / sig
return totp(xn) * exp(-xn**2 / 2.)
return thefunc
def _circfuncs_common(samples, high, low):
samples = np.asarray(samples)
if samples.size == 0:
return np.nan, np.nan
ang = (samples - low)*2*pi / (high - low)
return samples, ang
def circmean(samples, high=2*pi, low=0, axis=None):
"""
Compute the circular mean for samples in a range.
Parameters
----------
samples : array_like
Input array.
high : float or int, optional
High boundary for circular mean range. Default is ``2*pi``.
low : float or int, optional
Low boundary for circular mean range. Default is 0.
axis : int, optional
Axis along which means are computed. The default is to compute
the mean of the flattened array.
Returns
-------
circmean : float
Circular mean.
"""
samples, ang = _circfuncs_common(samples, high, low)
res = angle(np.mean(exp(1j * ang), axis=axis))
mask = res < 0
if mask.ndim > 0:
res[mask] += 2*pi
elif mask:
res += 2*pi
return res*(high - low)/2.0/pi + low
def circvar(samples, high=2*pi, low=0, axis=None):
"""
Compute the circular variance for samples assumed to be in a range
Parameters
----------
samples : array_like
Input array.
low : float or int, optional
Low boundary for circular variance range. Default is 0.
high : float or int, optional
High boundary for circular variance range. Default is ``2*pi``.
axis : int, optional
Axis along which variances are computed. The default is to compute
the variance of the flattened array.
Returns
-------
circvar : float
Circular variance.
Notes
-----
This uses a definition of circular variance that in the limit of small
angles returns a number close to the 'linear' variance.
"""
samples, ang = _circfuncs_common(samples, high, low)
res = np.mean(exp(1j * ang), axis=axis)
R = abs(res)
return ((high - low)/2.0/pi)**2 * 2 * log(1/R)
def circstd(samples, high=2*pi, low=0, axis=None):
"""
Compute the circular standard deviation for samples assumed to be in the
range [low to high].
Parameters
----------
samples : array_like
Input array.
low : float or int, optional
Low boundary for circular standard deviation range. Default is 0.
high : float or int, optional
High boundary for circular standard deviation range.
Default is ``2*pi``.
axis : int, optional
Axis along which standard deviations are computed. The default is
to compute the standard deviation of the flattened array.
Returns
-------
circstd : float
Circular standard deviation.
Notes
-----
This uses a definition of circular standard deviation that in the limit of
small angles returns a number close to the 'linear' standard deviation.
"""
samples, ang = _circfuncs_common(samples, high, low)
res = np.mean(exp(1j * ang), axis=axis)
R = abs(res)
return ((high - low)/2.0/pi) * sqrt(-2*log(R))
# Tests to include (from R) -- some of these already in stats.
########
# X Ansari-Bradley
# X Bartlett (and Levene)
# X Binomial
# Y Pearson's Chi-squared (stats.chisquare)
# Y Association Between Paired samples (stats.pearsonr, stats.spearmanr)
# stats.kendalltau) -- these need work though
# Fisher's exact test
# X Fligner-Killeen Test
# Y Friedman Rank Sum (stats.friedmanchisquare?)
# Y Kruskal-Wallis
# Y Kolmogorov-Smirnov
# Cochran-Mantel-Haenszel Chi-Squared for Count
# McNemar's Chi-squared for Count
# X Mood Two-Sample
# X Test For Equal Means in One-Way Layout (see stats.ttest also)
# Pairwise Comparisons of proportions
# Pairwise t tests
# Tabulate p values for pairwise comparisons
# Pairwise Wilcoxon rank sum tests
# Power calculations two sample test of prop.
# Power calculations for one and two sample t tests
# Equal or Given Proportions
# Trend in Proportions
# Quade Test
# Y Student's T Test
# Y F Test to compare two variances
# XY Wilcoxon Rank Sum and Signed Rank Tests
|
jlcarmic/producthunt_simulator
|
venv/lib/python2.7/site-packages/scipy/stats/morestats.py
|
Python
|
mit
| 94,486
|
[
"Gaussian"
] |
8d533d697bc4bcb4895b1092dadce94e07a33280b92041ef6f16b57c432913a1
|
# Copyright 2012, 2013 The GalSim developers:
# https://github.com/GalSim-developers
#
# This file is part of GalSim: The modular galaxy image simulation toolkit.
#
# GalSim is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# GalSim is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GalSim. If not, see <http://www.gnu.org/licenses/>
#
"""
This script can be run from the command-line to check the properties of images, in particular, the
adaptive moments that come from iteratively determining the best-fit Gaussian for the object at the
center of an image.
It takes arguments from the command line, i.e.
MeasMoments.py image_file [guess_sigma sky_level]
where the ones in brackets are optional:
image_file: A file containing an image for which the moments should be measured
[guess_sigma]: An initial guess for the Gaussian sigma of the image
[sky_level]: If the image contains a non-zero sky level, it must be specified
Results will be printed to stdout:
Status (0 = success), Mxx, Myy, Mxy, e1, e2, number of iterations, total flux in best-fit elliptical
Gaussian, x centroid, y centroid
Here we use the e1 = (Mxx - Myy)/(Mxx + Myy) and e2 = 2*Mxy/(Mxx + Mxy) definition of ellipticity.
"""
import sys
import os
import numpy as np
# This machinery lets us run Python examples even though they aren't positioned properly to find
# galsim as a package in the current directory.
try:
import galsim
except ImportError:
path, filename = os.path.split(__file__)
sys.path.append(os.path.abspath(os.path.join(path, "..")))
import galsim
# properly handle command-line arguments
numArg = len(sys.argv)-1
if (numArg < 1 or numArg > 3):
raise RuntimeError("Wrong number of command-line arguments: should be in the range 1...3!")
image_file = sys.argv[1]
guess_sigma = 5.0
sky_level = 0.0
if (numArg >= 2):
guess_sigma = float(sys.argv[2])
if (numArg == 3):
sky_level = float(sys.argv[3])
# read in image
image = galsim.fits.read(image_file)
if sky_level > 0.:
image -= sky_level
# measure adaptive moments
result = galsim.hsm.FindAdaptiveMom(image, guess_sig = guess_sigma)
# manipulate results to get moments
e1_val = result.observed_shape.e1
e2_val = result.observed_shape.e2
a_val = (1.0 + e1_val) / (1.0 - e1_val)
b_val = np.sqrt(a_val - (0.5*(1.0+a_val)*e2_val)**2)
mxx = a_val * (result.moments_sigma**2) / b_val
myy = (result.moments_sigma**2) / b_val
mxy = 0.5 * e2_val * (mxx + myy)
# output results
print '%d %12.6f %12.6f %12.6f %12.6f %12.6f %03d %12.6f %12.6f %12.6f %12.6f' % \
(result.moments_status, mxx, myy, mxy, e1_val, e2_val, result.moments_n_iter,
result.moments_amp, result.moments_centroid.x-result.image_bounds.getXMin(),
result.moments_centroid.y-result.image_bounds.getYMin(), result.moments_sigma)
|
mardom/GalSim
|
examples/MeasMoments.py
|
Python
|
gpl-3.0
| 3,251
|
[
"Galaxy",
"Gaussian"
] |
56e459949428d3583ce1e87e405c05da901b18b42b41337457223497d7261dd2
|
# Copyright (C) 2002, Thomas Hamelryck ([email protected])
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
"""Output of PDB files."""
from Bio.Data.IUPACData import atom_weights # Allowed Elements
_ATOM_FORMAT_STRING="%s%5i %-4s%c%3s %c%4i%c %8.3f%8.3f%8.3f%6.2f%6.2f %4s%2s%2s\n"
class Select(object):
"""
Default selection (everything) during writing - can be used as base class
to implement selective output. This selects which entities will be written out.
"""
def __repr__(self):
return "<Select all>"
def accept_model(self, model):
"""
Overload this to reject models for output.
"""
return 1
def accept_chain(self, chain):
"""
Overload this to reject chains for output.
"""
return 1
def accept_residue(self, residue):
"""
Overload this to reject residues for output.
"""
return 1
def accept_atom(self, atom):
"""
Overload this to reject atoms for output.
"""
return 1
class PDBIO(object):
"""
Write a Structure object (or a subset of a Structure object) as a PDB file.
Example:
>>> p=PDBParser()
>>> s=p.get_structure("1fat", "1fat.pdb")
>>> io=PDBIO()
>>> io.set_structure(s)
>>> io.save("out.pdb")
"""
def __init__(self, use_model_flag=0):
"""
@param use_model_flag: if 1, force use of the MODEL record in output.
@type use_model_flag: int
"""
self.use_model_flag=use_model_flag
# private mathods
def _get_atom_line(self, atom, hetfield, segid, atom_number, resname,
resseq, icode, chain_id, charge=" "):
"""Returns an ATOM PDB string (PRIVATE)."""
if hetfield!=" ":
record_type="HETATM"
else:
record_type="ATOM "
if atom.element:
element = atom.element.strip().upper()
if element.capitalize() not in atom_weights:
raise ValueError("Unrecognised element %r" % atom.element)
element = element.rjust(2)
else:
element = " "
name=atom.get_fullname()
altloc=atom.get_altloc()
x, y, z=atom.get_coord()
bfactor=atom.get_bfactor()
occupancy=atom.get_occupancy()
args=(record_type, atom_number, name, altloc, resname, chain_id,
resseq, icode, x, y, z, occupancy, bfactor, segid,
element, charge)
return _ATOM_FORMAT_STRING % args
# Public methods
def set_structure(self, structure):
self.structure=structure
def save(self, file, select=Select(), write_end=0):
"""
@param file: output file
@type file: string or filehandle
@param select: selects which entities will be written.
@type select:
select hould have the following methods:
- accept_model(model)
- accept_chain(chain)
- accept_residue(residue)
- accept_atom(atom)
These methods should return 1 if the entity
is to be written out, 0 otherwise.
Typically select is a subclass of L{Select}.
"""
get_atom_line=self._get_atom_line
if isinstance(file, basestring):
fp=open(file, "w")
close_file=1
else:
# filehandle, I hope :-)
fp=file
close_file=0
# multiple models?
if len(self.structure)>1 or self.use_model_flag:
model_flag=1
else:
model_flag=0
for model in self.structure.get_list():
if not select.accept_model(model):
continue
# necessary for ENDMDL
# do not write ENDMDL if no residues were written
# for this model
model_residues_written=0
atom_number=1
if model_flag:
fp.write("MODEL %s\n" % model.serial_num)
for chain in model.get_list():
if not select.accept_chain(chain):
continue
chain_id=chain.get_id()
# necessary for TER
# do not write TER if no residues were written
# for this chain
chain_residues_written=0
for residue in chain.get_unpacked_list():
if not select.accept_residue(residue):
continue
hetfield, resseq, icode=residue.get_id()
resname=residue.get_resname()
segid=residue.get_segid()
for atom in residue.get_unpacked_list():
if select.accept_atom(atom):
chain_residues_written=1
model_residues_written=1
s=get_atom_line(atom, hetfield, segid, atom_number, resname,
resseq, icode, chain_id)
fp.write(s)
atom_number=atom_number+1
if chain_residues_written:
fp.write("TER\n")
if model_flag and model_residues_written:
fp.write("ENDMDL\n")
if write_end:
fp.write('END\n')
if close_file:
fp.close()
if __name__=="__main__":
from Bio.PDB.PDBParser import PDBParser
import sys
p=PDBParser(PERMISSIVE=True)
s=p.get_structure("test", sys.argv[1])
io=PDBIO()
io.set_structure(s)
io.save("out1.pdb")
fp=open("out2.pdb", "w")
s1=p.get_structure("test1", sys.argv[1])
s2=p.get_structure("test2", sys.argv[2])
io=PDBIO(1)
io.set_structure(s1)
io.save(fp)
io.set_structure(s2)
io.save(fp, write_end=1)
fp.close()
|
bryback/quickseq
|
genescript/Bio/PDB/PDBIO.py
|
Python
|
mit
| 6,035
|
[
"Biopython"
] |
552744126cdacfd5060f07c8eaea83004e9801457e3c56e8bbd5a41613b09d66
|
import fnmatch
import tempfile
from contextlib import contextmanager
from os import makedirs
from os import unlink
from os.path import (
abspath,
basename,
dirname,
exists,
join,
sep,
)
from re import compile, escape
from typing import Any, Dict, List, Type
from galaxy.util.bunch import Bunch
from .config_util import read_file
from .transport import get_file
from .transport import post_file
from .transport import (
rsync_get_file,
rsync_post_file,
scp_get_file,
scp_post_file,
)
from .util import copy_to_path
from .util import directory_files
from .util import unique_path_prefix
DEFAULT_MAPPED_ACTION = 'transfer' # Not really clear to me what this should be, exception?
DEFAULT_PATH_MAPPER_TYPE = 'prefix'
STAGING_ACTION_REMOTE = "remote"
STAGING_ACTION_LOCAL = "local"
STAGING_ACTION_NONE = None
STAGING_ACTION_DEFAULT = "default"
# Poor man's enum.
path_type = Bunch(
# Galaxy input datasets and extra files.
INPUT="input",
# Galaxy config and param files.
CONFIG="config",
# Files from tool's tool_dir (for now just wrapper if available).
TOOL="tool",
# Input tool work dir files - e.g. task-split input file
WORKDIR="workdir",
# Job directory files (e.g. tool standard input/output and containerized command).
JOBDIR="jobdir",
# Input metadata dir files - e.g. metadata files, etc..
METADATA="metadata",
# Galaxy output datasets in their final home.
OUTPUT="output",
# Galaxy from_work_dir output paths and other files (e.g. galaxy.json)
OUTPUT_WORKDIR="output_workdir",
# Meta job and data files (e.g. Galaxy metadata generation files and
# metric instrumentation files)
OUTPUT_METADATA="output_metadata",
# Job directory files output.
OUTPUT_JOBDIR="output_jobdir",
# Other fixed tool parameter paths (likely coming from tool data, but not
# necessarily).
UNSTRUCTURED="unstructured",
)
ACTION_DEFAULT_PATH_TYPES = [
path_type.INPUT,
path_type.CONFIG,
path_type.TOOL,
path_type.WORKDIR,
path_type.JOBDIR,
path_type.METADATA,
path_type.OUTPUT,
path_type.OUTPUT_WORKDIR,
path_type.OUTPUT_METADATA,
path_type.OUTPUT_JOBDIR,
]
ALL_PATH_TYPES = ACTION_DEFAULT_PATH_TYPES + [path_type.UNSTRUCTURED]
MISSING_FILES_ENDPOINT_ERROR = "Attempted to use remote_transfer action without defining a files_endpoint."
MISSING_SSH_KEY_ERROR = "Attempt to use file transfer action requiring an SSH key without specifying a ssh_key."
class FileActionMapper(object):
"""
Objects of this class define how paths are mapped to actions.
>>> json_string = r'''{"paths": [ \
{"path": "/opt/galaxy", "action": "none"}, \
{"path": "/galaxy/data", "action": "transfer"}, \
{"path": "/cool/bamfiles/**/*.bam", "action": "copy", "match_type": "glob"}, \
{"path": ".*/dataset_\\\\d+.dat", "action": "copy", "match_type": "regex"} \
]}'''
>>> from tempfile import NamedTemporaryFile
>>> from os import unlink
>>> def mapper_for(default_action, config_contents):
... f = NamedTemporaryFile(delete=False)
... f.write(config_contents.encode('UTF-8'))
... f.close()
... mock_client = Bunch(default_file_action=default_action, action_config_path=f.name, files_endpoint=None)
... mapper = FileActionMapper(mock_client)
... as_dict = config=mapper.to_dict()
... mapper = FileActionMapper(config=as_dict) # Serialize and deserialize it to make sure still works
... unlink(f.name)
... return mapper
>>> mapper = mapper_for(default_action='none', config_contents=json_string)
>>> # Test first config line above, implicit path prefix mapper
>>> action = mapper.action({'path': '/opt/galaxy/tools/filters/catWrapper.py'}, 'input')
>>> action.action_type == u'none'
True
>>> action.staging_needed
False
>>> # Test another (2nd) mapper, this one with a different action
>>> action = mapper.action({'path': '/galaxy/data/files/000/dataset_1.dat'}, 'input')
>>> action.action_type == u'transfer'
True
>>> action.staging_needed
True
>>> # Always at least copy work_dir outputs.
>>> action = mapper.action({'path': '/opt/galaxy/database/working_directory/45.sh'}, 'workdir')
>>> action.action_type == u'copy'
True
>>> action.staging_needed
True
>>> # Test glob mapper (matching test)
>>> mapper.action({'path': '/cool/bamfiles/projectABC/study1/patient3.bam'}, 'input').action_type == u'copy'
True
>>> # Test glob mapper (non-matching test)
>>> mapper.action({'path': '/cool/bamfiles/projectABC/study1/patient3.bam.bai'}, 'input').action_type == u'none'
True
>>> # Regex mapper test.
>>> mapper.action({'path': '/old/galaxy/data/dataset_10245.dat'}, 'input').action_type == u'copy'
True
>>> # Doesn't map unstructured paths by default
>>> mapper.action({'path': '/old/galaxy/data/dataset_10245.dat'}, 'unstructured').action_type == u'none'
True
>>> input_only_mapper = mapper_for(default_action="none", config_contents=r'''{"paths": [ \
{"path": "/", "action": "transfer", "path_types": "input"} \
] }''')
>>> input_only_mapper.action({'path': '/dataset_1.dat'}, 'input').action_type == u'transfer'
True
>>> input_only_mapper.action({'path': '/dataset_1.dat'}, 'output').action_type == u'none'
True
>>> unstructured_mapper = mapper_for(default_action="none", config_contents=r'''{"paths": [ \
{"path": "/", "action": "transfer", "path_types": "*any*"} \
] }''')
>>> unstructured_mapper.action({'path': '/old/galaxy/data/dataset_10245.dat'}, 'unstructured').action_type == u'transfer'
True
>>> match_type_only_mapper = mapper_for(default_action="none", config_contents=r'''{"paths": [ \
{"action": "transfer", "path_types": "input"}, \
{"action": "remote_copy", "path_types": "output"} \
] }''')
>>> input_action = match_type_only_mapper.action({}, 'input')
>>> input_action.action_type
'transfer'
>>> output_action = match_type_only_mapper.action({}, 'output')
>>> output_action.action_type
'remote_copy'
"""
def __init__(self, client=None, config=None):
if config is None and client is None:
message = "FileActionMapper must be constructed from either a client or a config dictionary."
raise Exception(message)
if config is None:
config = self.__client_to_config(client)
self.default_action = config.get("default_action", "transfer")
self.ssh_key = config.get("ssh_key", None)
self.ssh_user = config.get("ssh_user", None)
self.ssh_host = config.get("ssh_host", None)
self.ssh_port = config.get("ssh_port", None)
self.mappers = mappers_from_dicts(config.get("paths", []))
self.files_endpoint = config.get("files_endpoint", None)
def action(self, source, type, mapper=None):
path = source.get("path", None)
mapper = self.__find_mapper(path, type, mapper)
action_class = self.__action_class(path, type, mapper)
file_lister = DEFAULT_FILE_LISTER
action_kwds = {}
if mapper:
file_lister = mapper.file_lister
action_kwds = mapper.action_kwds
action = action_class(source, file_lister=file_lister, **action_kwds)
self.__process_action(action, type)
return action
def unstructured_mappers(self):
""" Return mappers that will map 'unstructured' files (i.e. go beyond
mapping inputs, outputs, and config files).
"""
return filter(lambda m: path_type.UNSTRUCTURED in m.path_types, self.mappers)
def to_dict(self):
return dict(
default_action=self.default_action,
files_endpoint=self.files_endpoint,
ssh_key=self.ssh_key,
ssh_user=self.ssh_user,
ssh_port=self.ssh_port,
ssh_host=self.ssh_host,
paths=list(map(lambda m: m.to_dict(), self.mappers))
)
def __client_to_config(self, client):
action_config_path = client.action_config_path
if action_config_path:
config = read_file(action_config_path)
else:
config = getattr(client, "file_actions", {})
config["default_action"] = client.default_file_action
config["files_endpoint"] = client.files_endpoint
for attr in ['ssh_key', 'ssh_user', 'ssh_port', 'ssh_host']:
if hasattr(client, attr):
config[attr] = getattr(client, attr)
return config
def __find_mapper(self, path, type, mapper=None):
if not mapper:
if path is not None:
normalized_path = abspath(path)
else:
normalized_path = None
for query_mapper in self.mappers:
if query_mapper.matches(normalized_path, type):
mapper = query_mapper
break
return mapper
def __action_class(self, path, type, mapper):
action_type = self.default_action if type in ACTION_DEFAULT_PATH_TYPES else "none"
if mapper:
action_type = mapper.action_type
if type in ["workdir", "jobdir", "output_workdir", "output_metadata", "output_jobdir"] and action_type == "none":
# We are changing the working_directory/job_directory relative to what
# Galaxy would use, these need to be copied over.
action_type = "copy"
action_class = actions.get(action_type, None)
if action_class is None:
message_template = "Unknown action_type encountered %s while trying to map path %s"
message_args = (action_type, path)
raise Exception(message_template % message_args)
return action_class
def __process_action(self, action, file_type):
""" Extension point to populate extra action information after an
action has been created.
"""
if getattr(action, "inject_url", False):
self.__inject_url(action, file_type)
if getattr(action, "inject_ssh_properties", False):
self.__inject_ssh_properties(action)
def __inject_url(self, action, file_type):
url_base = self.files_endpoint
if not url_base:
raise Exception(MISSING_FILES_ENDPOINT_ERROR)
if "?" not in url_base:
url_base = "%s?" % url_base
# TODO: URL encode path.
url = "%s&path=%s&file_type=%s" % (url_base, action.path, file_type)
action.url = url
def __inject_ssh_properties(self, action):
for attr in ["ssh_key", "ssh_host", "ssh_port", "ssh_user"]:
action_attr = getattr(action, attr)
if action_attr == UNSET_ACTION_KWD:
client_default_attr = getattr(self, attr, None)
setattr(action, attr, client_default_attr)
if action.ssh_key is None:
raise Exception(MISSING_SSH_KEY_ERROR)
REQUIRED_ACTION_KWD = object()
UNSET_ACTION_KWD = "__UNSET__"
class BaseAction(object):
whole_directory_transfer_supported = False
action_spec: Dict[str, Any] = {}
action_type: str
def __init__(self, source, file_lister=None):
self.source = source
self.file_lister = file_lister or DEFAULT_FILE_LISTER
@property
def path(self):
return self.source.get("path")
def unstructured_map(self, path_helper):
unstructured_map = self.file_lister.unstructured_map(self.path)
if self.staging_needed:
# To ensure uniqueness, prepend unique prefix to each name
prefix = unique_path_prefix(self.path)
for path, name in unstructured_map.items():
unstructured_map[path] = join(prefix, name)
else:
path_rewrites = {}
for path in unstructured_map:
rewrite = self.path_rewrite(path_helper, path)
if rewrite:
path_rewrites[path] = rewrite
unstructured_map = path_rewrites
return unstructured_map
@property
def staging_needed(self):
return self.staging != STAGING_ACTION_NONE
@property
def staging_action_local(self):
return self.staging == STAGING_ACTION_LOCAL
def _extend_base_dict(self, **kwds):
base_dict = dict(
path=self.path, # For older Pulsar servers (pre-0.13.0?)
source=self.source,
action_type=self.action_type,
)
base_dict.update(**kwds)
return base_dict
def to_dict(self):
return self._extend_base_dict()
def __str__(self):
as_dict = self.to_dict()
attribute_str = ""
first = True
for key, value in as_dict.items():
if key == "source":
continue
if first:
first = False
else:
attribute_str += ","
attribute_str += "%s=%s" % (key, value)
return "FileAction[%s]" % attribute_str
class NoneAction(BaseAction):
""" This action indicates the corresponding path does not require any
additional action. This should indicate paths that are available both on
the Pulsar client (i.e. Galaxy server) and remote Pulsar server with the same
paths. """
action_type = "none"
staging = STAGING_ACTION_NONE
def to_dict(self):
return self._extend_base_dict()
@classmethod
def from_dict(cls, action_dict):
return NoneAction(source=action_dict["source"])
def path_rewrite(self, path_helper, path=None):
return None
class RewriteAction(BaseAction):
""" This actin indicates the Pulsar server should simply rewrite the path
to the specified file.
"""
action_spec = dict(
source_directory=REQUIRED_ACTION_KWD,
destination_directory=REQUIRED_ACTION_KWD
)
action_type = "rewrite"
staging = STAGING_ACTION_NONE
def __init__(self, source, file_lister=None, source_directory=None, destination_directory=None):
super(RewriteAction, self).__init__(source, file_lister=file_lister)
self.source_directory = source_directory
self.destination_directory = destination_directory
def to_dict(self):
return self._extend_base_dict(
source_directory=self.source_directory,
destination_directory=self.destination_directory,
)
@classmethod
def from_dict(cls, action_dict):
return RewriteAction(
source=action_dict["source"],
source_directory=action_dict["source_directory"],
destination_directory=action_dict["destination_directory"],
)
def path_rewrite(self, path_helper, path=None):
if not path:
path = self.path
new_path = path_helper.from_posix_with_new_base(self.path, self.source_directory, self.destination_directory)
return None if new_path == self.path else new_path
class TransferAction(BaseAction):
""" This actions indicates that the Pulsar client should initiate an HTTP
transfer of the corresponding path to the remote Pulsar server before
launching the job. """
action_type = "transfer"
staging = STAGING_ACTION_LOCAL
class CopyAction(BaseAction):
""" This action indicates that the Pulsar client should execute a file system
copy of the corresponding path to the Pulsar staging directory prior to
launching the corresponding job. """
action_type = "copy"
staging = STAGING_ACTION_LOCAL
class RemoteCopyAction(BaseAction):
""" This action indicates the Pulsar server should copy the file before
execution via direct file system copy. This is like a CopyAction, but
it indicates the action should occur on the Pulsar server instead of on
the client.
"""
action_type = "remote_copy"
staging = STAGING_ACTION_REMOTE
@classmethod
def from_dict(cls, action_dict):
return RemoteCopyAction(source=action_dict["source"])
def write_to_path(self, path):
copy_to_path(open(self.path, "rb"), path)
def write_from_path(self, pulsar_path):
destination = self.path
parent_directory = dirname(destination)
if not exists(parent_directory):
makedirs(parent_directory)
with open(pulsar_path, "rb") as f:
copy_to_path(f, destination)
class RemoteTransferAction(BaseAction):
""" This action indicates the Pulsar server should transfer the file before
execution via one of the remote transfer implementations. This is like a TransferAction, but
it indicates the action requires network access to the staging server, and
should be executed via ssh/rsync/etc
"""
inject_url = True
action_type = "remote_transfer"
staging = STAGING_ACTION_REMOTE
def __init__(self, source, file_lister=None, url=None):
super(RemoteTransferAction, self).__init__(source, file_lister=file_lister)
self.url = url
def to_dict(self):
return self._extend_base_dict(url=self.url)
@classmethod
def from_dict(cls, action_dict):
return RemoteTransferAction(source=action_dict["source"], url=action_dict["url"])
def write_to_path(self, path):
get_file(self.url, path)
def write_from_path(self, pulsar_path):
post_file(self.url, pulsar_path)
class RemoteObjectStoreCopyAction(BaseAction):
"""
"""
action_type = "remote_object_store_copy"
staging = STAGING_ACTION_REMOTE
inject_object_store = True
@classmethod
def from_dict(cls, action_dict):
return RemoteObjectStoreCopyAction(source=action_dict["source"])
def write_to_path(self, path):
assert self.object_store # Make sure object_store attribute injected
assert "object_store_ref" in self.source
object_store_ref = self.source["object_store_ref"]
dataset_object = Bunch(
id=object_store_ref["dataset_id"],
uuid=object_store_ref["dataset_uuid"],
object_store_id=object_store_ref["object_store_id"],
)
filename = self.object_store.get_filename(dataset_object)
copy_to_path(open(filename, 'rb'), path)
def write_from_path(self, pulsar_path):
raise NotImplementedError("Writing raw files to object store not supported at this time.")
class PubkeyAuthenticatedTransferAction(BaseAction):
"""Base class for file transfers requiring an SSH public/private key
"""
inject_ssh_properties = True
action_spec = dict(
ssh_key=UNSET_ACTION_KWD,
ssh_user=UNSET_ACTION_KWD,
ssh_host=UNSET_ACTION_KWD,
ssh_port=UNSET_ACTION_KWD,
)
staging = STAGING_ACTION_REMOTE
def __init__(self, source, file_lister=None, ssh_user=UNSET_ACTION_KWD,
ssh_host=UNSET_ACTION_KWD, ssh_port=UNSET_ACTION_KWD, ssh_key=UNSET_ACTION_KWD):
super(PubkeyAuthenticatedTransferAction, self).__init__(source, file_lister=file_lister)
self.ssh_user = ssh_user
self.ssh_host = ssh_host
self.ssh_port = ssh_port
self.ssh_key = ssh_key
def to_dict(self):
return self._extend_base_dict(
ssh_user=self.ssh_user,
ssh_host=self.ssh_host,
ssh_port=self.ssh_port
)
@contextmanager
def _serialized_key(self):
key_file = self.__serialize_ssh_key()
yield key_file
self.__cleanup_ssh_key(key_file)
def __serialize_ssh_key(self):
f = tempfile.NamedTemporaryFile(delete=False)
if self.ssh_key is not None:
f.write(self.ssh_key.encode("utf-8"))
else:
raise Exception("SSH_KEY not available")
return f.name
def __cleanup_ssh_key(self, keyfile):
if exists(keyfile):
unlink(keyfile)
class RsyncTransferAction(PubkeyAuthenticatedTransferAction):
action_type = "remote_rsync_transfer"
@classmethod
def from_dict(cls, action_dict):
return RsyncTransferAction(source=action_dict["source"],
ssh_user=action_dict["ssh_user"],
ssh_host=action_dict["ssh_host"],
ssh_port=action_dict["ssh_port"],
ssh_key=action_dict["ssh_key"])
def write_to_path(self, path):
with self._serialized_key() as key_file:
rsync_get_file(self.path, path, self.ssh_user, self.ssh_host,
self.ssh_port, key_file)
def write_from_path(self, pulsar_path):
with self._serialized_key() as key_file:
rsync_post_file(pulsar_path, self.path, self.ssh_user,
self.ssh_host, self.ssh_port, key_file)
class ScpTransferAction(PubkeyAuthenticatedTransferAction):
action_type = "remote_scp_transfer"
@classmethod
def from_dict(cls, action_dict):
return ScpTransferAction(source=action_dict["source"],
ssh_user=action_dict["ssh_user"],
ssh_host=action_dict["ssh_host"],
ssh_port=action_dict["ssh_port"],
ssh_key=action_dict["ssh_key"])
def write_to_path(self, path):
with self._serialized_key() as key_file:
scp_get_file(self.path, path, self.ssh_user, self.ssh_host,
self.ssh_port, key_file)
def write_from_path(self, pulsar_path):
with self._serialized_key() as key_file:
scp_post_file(pulsar_path, self.path, self.ssh_user, self.ssh_host,
self.ssh_port, key_file)
class MessageAction(object):
""" Sort of pseudo action describing "files" store in memory and
transferred via message (HTTP, Python-call, MQ, etc...)
"""
action_type = "message"
staging = STAGING_ACTION_DEFAULT
def __init__(self, contents, client=None):
self.contents = contents
self.client = client
@property
def staging_needed(self):
return True
@property
def staging_action_local(self):
# Ekkk, cannot be called if created through from_dict.
# Shouldn't be a problem the way it is used - but is an
# object design problem.
return self.client.prefer_local_staging
def to_dict(self):
return dict(contents=self.contents, action_type=MessageAction.action_type)
@classmethod
def from_dict(cls, action_dict):
return MessageAction(contents=action_dict["contents"])
def write_to_path(self, path):
open(path, "w").write(self.contents)
DICTIFIABLE_ACTION_CLASSES = [
RemoteCopyAction,
RemoteTransferAction,
MessageAction,
RsyncTransferAction,
ScpTransferAction,
RemoteObjectStoreCopyAction
]
def from_dict(action_dict):
action_type = action_dict.get("action_type", None)
target_class = None
for action_class in DICTIFIABLE_ACTION_CLASSES:
if action_type == action_class.action_type:
target_class = action_class
if not target_class:
message = "Failed to recover action from dictionary - invalid action type specified %s." % action_type
raise Exception(message)
if "source" in action_dict:
action_dict.pop("path") # remove redundant information stored for backward compatibility.
elif "path" in action_dict:
# legacy message received from older Pulsar client, pop the path from the dict
# and convert it to a source.
source = {"path": action_dict.pop("path")}
action_dict["source"] = source
return target_class.from_dict(action_dict)
class BasePathMapper(object):
match_type: str
def __init__(self, config):
action_type = config.get('action', DEFAULT_MAPPED_ACTION)
action_class = actions.get(action_type, None)
action_kwds = action_class.action_spec.copy()
for key, value in action_kwds.items():
if key in config:
action_kwds[key] = config[key]
elif value is REQUIRED_ACTION_KWD:
message_template = "action_type %s requires key word argument %s"
message = message_template % (action_type, key)
raise Exception(message)
else:
action_kwds[key] = value
self.action_type = action_type
self.action_kwds = action_kwds
path_types_str = config.get('path_types', "*defaults*")
path_types_str = path_types_str.replace("*defaults*", ",".join(ACTION_DEFAULT_PATH_TYPES))
path_types_str = path_types_str.replace("*any*", ",".join(ALL_PATH_TYPES))
self.path_types = path_types_str.split(",")
self.file_lister = FileLister(config)
def matches(self, path, path_type):
path_type_matches = path_type in self.path_types
rval = path_type_matches and self._path_matches(path)
return rval
def _extend_base_dict(self, **kwds):
base_dict = dict(
action=self.action_type,
path_types=",".join(self.path_types),
match_type=self.match_type
)
base_dict.update(self.file_lister.to_dict())
base_dict.update(self.action_kwds)
base_dict.update(**kwds)
return base_dict
def to_pattern(self):
raise NotImplementedError()
class PathTypeOnlyMapper(BasePathMapper):
match_type = 'path_type_only'
def __init__(self, config):
super(PathTypeOnlyMapper, self).__init__(config)
def _path_matches(self, path):
return True
def to_dict(self):
return self._extend_base_dict()
class PrefixPathMapper(BasePathMapper):
match_type = 'prefix'
def __init__(self, config):
super(PrefixPathMapper, self).__init__(config)
self.prefix_path = abspath(config['path'])
def _path_matches(self, path):
return path is not None and path.startswith(self.prefix_path)
def to_pattern(self):
pattern_str = r"(%s%s[^\s,\"\']+)" % (escape(self.prefix_path), escape(sep))
return compile(pattern_str)
def to_dict(self):
return self._extend_base_dict(path=self.prefix_path)
class GlobPathMapper(BasePathMapper):
match_type = 'glob'
def __init__(self, config):
super(GlobPathMapper, self).__init__(config)
self.glob_path = config['path']
def _path_matches(self, path):
return path is not None and fnmatch.fnmatch(path, self.glob_path)
def to_pattern(self):
return compile(fnmatch.translate(self.glob_path))
def to_dict(self):
return self._extend_base_dict(path=self.glob_path)
class RegexPathMapper(BasePathMapper):
match_type = 'regex'
def __init__(self, config):
super(RegexPathMapper, self).__init__(config)
self.pattern_raw = config['path']
self.pattern = compile(self.pattern_raw)
def _path_matches(self, path):
return path is not None and self.pattern.match(path) is not None
def to_pattern(self):
return self.pattern
def to_dict(self):
return self._extend_base_dict(path=self.pattern_raw)
MAPPER_CLASSES = [PathTypeOnlyMapper, PrefixPathMapper, GlobPathMapper, RegexPathMapper]
MAPPER_CLASS_DICT = dict(map(lambda c: (c.match_type, c), MAPPER_CLASSES))
def mappers_from_dicts(mapper_def_list):
return list(map(lambda m: _mappper_from_dict(m), mapper_def_list))
def _mappper_from_dict(mapper_dict):
if "path" in mapper_dict:
map_type = mapper_dict.get('match_type', DEFAULT_PATH_MAPPER_TYPE)
else:
map_type = 'path_type_only'
return MAPPER_CLASS_DICT[map_type](mapper_dict)
class FileLister(object):
def __init__(self, config):
self.depth = int(config.get("depth", "0"))
def to_dict(self):
return dict(
depth=self.depth
)
def unstructured_map(self, path):
depth = self.depth
if self.depth == 0:
return {path: basename(path)}
else:
while depth > 0:
path = dirname(path)
depth -= 1
return dict([(join(path, f), f) for f in directory_files(path)])
DEFAULT_FILE_LISTER = FileLister(dict(depth=0))
ACTION_CLASSES: List[Type[BaseAction]] = [
NoneAction,
RewriteAction,
TransferAction,
CopyAction,
RemoteCopyAction,
RemoteTransferAction,
RemoteObjectStoreCopyAction,
RsyncTransferAction,
ScpTransferAction,
]
actions = dict([(clazz.action_type, clazz) for clazz in ACTION_CLASSES])
__all__ = (
'FileActionMapper',
'path_type',
'from_dict',
'MessageAction',
'RemoteTransferAction', # For testing
)
|
natefoo/pulsar
|
pulsar/client/action_mapper.py
|
Python
|
apache-2.0
| 28,849
|
[
"Galaxy"
] |
ea955699b309b47bf40cd50423ad8defb95860246bfea79c105e86058a2e2e1f
|
# -*- coding: utf-8 -*-
"""
Automated optimization of simulation of Basket cell
"""
__version__ = 0.1
from neuron import h
import neuron
import numpy as np
from neurotune import optimizers
from neurotune import evaluators
from neurotune import controllers
import os
import sys
class Simulation(object):
"""
Simulation class - inspired by example of Philipp Rautenberg
Objects of this class control a current clamp simulation. Example of use:
>>> cell = Cell() #some kind of NEURON section
>>> sim = Simulation(cell)
>>> sim.go()
>>> sim.show()
"""
def __init__(self, recording_section, sim_time=1000, dt=0.05, v_init=-60):
self.recording_section = recording_section
self.sim_time = sim_time
self.dt = dt
self.go_already = False
self.v_init=v_init
def set_IClamp(self, delay=5, amp=0.1, dur=1000):
"""
Initializes values for current clamp.
Default values:
delay = 5 [ms]
amp = 0.1 [nA]
dur = 1000 [ms]
"""
stim = h.IClamp(self.recording_section(0.5))
stim.delay = delay
stim.amp = amp
stim.dur = dur
self.stim = stim
def set_recording(self):
# Record Time
self.rec_t = neuron.h.Vector()
self.rec_t.record(h._ref_t)
# Record Voltage
self.rec_v = h.Vector()
self.rec_v.record(self.recording_section(0.5)._ref_v)
def show(self):
"""
Plot the result of the simulation once it's been intialized
"""
from matplotlib import pyplot as plt
if self.go_already:
x = np.array(self.rec_t)
y = np.array(self.rec_v)
plt.plot(x, y)
plt.title("Simulation voltage vs time")
plt.xlabel("Time [ms]")
plt.ylabel("Voltage [mV]")
else:
print("""First you have to `go()` the simulation.""")
plt.show()
def go(self, sim_time=None):
"""
Start the simulation once it's been intialized
"""
self.set_recording()
h.dt = self.dt
h.finitialize(self.v_init)
neuron.init()
if sim_time:
neuron.run(sim_time)
else:
neuron.run(self.sim_time)
self.go_already = True
class BasketCellController():
"""
This is a canonical example of a controller class
It provides a run() method, this run method must accept at least two parameters:
1. candidates (list of list of numbers)
2. The corresponding parameters.
"""
def __init__(self, show_plots):
self.show_plots = show_plots
def run(self,candidates,parameters):
"""
Run simulation for each candidate
This run method will loop through each candidate and run the simulation
corresponding to it's parameter values. It will populate an array called
traces with the resulting voltage traces for the simulation and return it.
"""
traces = []
for candidate in candidates:
sim_var = dict(zip(parameters,candidate))
t,v = self.run_individual(sim_var)
traces.append([t,v])
return traces
def set_section_mechanism(self, sec, mech, mech_attribute, mech_value):
"""
Set the value of an attribute of a NEURON section
"""
for seg in sec:
setattr(getattr(seg, mech), mech_attribute, mech_value)
def run_individual(self,sim_var):
"""
Run an individual simulation.
The candidate data has been flattened into the sim_var dict. The
sim_var dict contains parameter:value key value pairs, which are
applied to the model before it is simulated.
The simulation itself is carried out via the instantiation of a
Simulation object (see Simulation class above).
"""
#make compartments and connect them
soma=h.Section()
axon=h.Section()
soma.connect(axon)
axon.insert('na')
axon.insert('kv')
axon.insert('kv_3')
soma.insert('na')
soma.insert('kv')
soma.insert('kv_3')
soma.diam=10
soma.L=10
axon.diam=2
axon.L=100
#soma.insert('canrgc')
#soma.insert('cad2')
self.set_section_mechanism(axon,'na','gbar',sim_var['axon_gbar_na'])
self.set_section_mechanism(axon,'kv','gbar',sim_var['axon_gbar_kv'])
self.set_section_mechanism(axon,'kv_3','gbar',sim_var['axon_gbar_kv3'])
self.set_section_mechanism(soma,'na','gbar',sim_var['soma_gbar_na'])
self.set_section_mechanism(soma,'kv','gbar',sim_var['soma_gbar_kv'])
self.set_section_mechanism(soma,'kv_3','gbar',sim_var['soma_gbar_kv3'])
for sec in h.allsec():
sec.insert('pas')
sec.Ra=300
sec.cm=0.75
self.set_section_mechanism(sec,'pas','g',1.0/30000)
self.set_section_mechanism(sec,'pas','e',-70)
h.vshift_na=-5.0
sim=Simulation(soma,sim_time=1000,v_init=-70.0)
sim.set_IClamp(150, 0.1, 750)
sim.go()
if self.show_plots:
sim.show()
return np.array(sim.rec_t), np.array(sim.rec_v)
def main():
"""
The optimization runs in this main method
"""
show_plots = not (len(sys.argv) == 2 and sys.argv[1] == '-nogui')
#make a controller
my_controller= BasketCellController(show_plots)
#parameters to be modified in each simulation
parameters = ['axon_gbar_na',
'axon_gbar_kv',
'axon_gbar_kv3',
'soma_gbar_na',
'soma_gbar_kv',
'soma_gbar_kv3']
#above parameters will not be modified outside these bounds:
min_constraints = [0,0,0,0,0,0]
max_constraints = [10000,30,1,300,20,2]
# EXAMPLE - how to set a seed
#manual_vals=[50,50,2000,70,70,5,0.1,28.0,49.0,-73.0,23.0]
#analysis variables, these default values will do:
analysis_var={'peak_delta':0,
'baseline':0,
'dvdt_threshold':2}
weights={'average_minimum': 1.0,
'spike_frequency_adaptation': 1.0,
'trough_phase_adaptation': 1.0,
'mean_spike_frequency': 1.0,
'average_maximum': 1.0,
'trough_decay_exponent': 1.0,
'interspike_time_covar': 1.0,
'min_peak_no': 1.0,
'spike_broadening': 1.0,
'spike_width_adaptation': 1.0,
'max_peak_no': 1.0,
'first_spike_time': 1.0,
'peak_decay_exponent': 1.0,
'pptd_error':1.0}
#make an evaluator, using automatic target evaluation:
my_evaluator=evaluators.IClampEvaluator(controller=my_controller,
analysis_start_time=1,
analysis_end_time=500,
target_data_path='100pA_1a.csv',
parameters=parameters,
analysis_var=analysis_var,
weights=weights,
targets=None, # because we're using automatic
automatic=True)
#make an optimizer
my_optimizer=optimizers.CustomOptimizerA(max_constraints,min_constraints,my_evaluator,
population_size=3,
max_evaluations=100,
num_selected=3,
num_offspring=3,
num_elites=1,
seeds=None)
#run the optimizer
my_optimizer.optimize(do_plot=show_plots)
main()
|
vellamike/neurotune
|
examples/example_1/optimization.py
|
Python
|
bsd-3-clause
| 8,119
|
[
"NEURON"
] |
619914e34a232977244e8e52c06c99e77f566773919b6a46f168e616517aef94
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.