text
stringlengths 12
1.05M
| repo_name
stringlengths 5
86
| path
stringlengths 4
191
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 12
1.05M
| keyword
listlengths 1
23
| text_hash
stringlengths 64
64
|
|---|---|---|---|---|---|---|---|
import re
from constance import config
from django.views.generic import FormView, TemplateView
from haystack.views import SearchView
from recipes.models import Recipe, RecipeTag
class Home(TemplateView):
"""The first page that users see when they visit the site"""
template_name = "home.html"
def get_context_data(self, **kwargs):
return {'featured_recipes': Recipe.objects.filter_featured(
limit=config.FEATURED_RECIPE_COUNT),
'newest_recipes': Recipe.objects.filter_newest(
limit=config.NEWEST_RECIPE_COUNT),
'tags': RecipeTag.objects.filter_list(exclude_miscellaneous=False)}
class RecipeSearchView(SearchView):
"""Primary search page for recipes. Uses Haystack."""
results_per_page = config.SEARCH_RESULTS_PER_PAGE
def build_form(self, *args, **kwargs):
form = super(RecipeSearchView, self).build_form(*args, **kwargs)
user = self.request.user
form.user = user.id if user.is_authenticated() else None
return form
def get_selected_tags(self):
"""Determines which tags should show up as 'selected' in the view"""
# TODO: test
return self.form.tags
def extra_context(self):
# TODO: test
return {'is_search': True,
'tags': RecipeTag.objects.filter_list(exclude_miscellaneous=False),
'selected_tags': self.get_selected_tags(),
'order': self.request.GET.get('order')}
|
kamni/nodonuts
|
recipes/views.py
|
Python
|
agpl-3.0
| 1,560
|
[
"VisIt"
] |
6e30a796259d7ab4b48859b86f7bdb21edf730cd5548cb6b489c567f574008c1
|
from __future__ import division # needed for 1/2 = 0.5
import numpy as np
from accelerator import Lattice, Element, LinearElement, Quad, Drift, LieAlgebra, LieAlgElement, leapfrog, Dipole, Cavity
#from sympy.parsing.sympy_parser import parse_expr
#from sympy import *
from particleFactory import straight, scanned, randomed, gaussian, gaussianTwiss3D, envelopeFromMultipart
from plotting import plotEverything, plotEnvelope, plotPhaseSpace
from IOHandler import saveAll, loadAll, saveMultipart, loadMultipart, saveTwiss, loadTwiss, saveEnvelope, loadEnvelope, saveLattice, loadLattice, saveSummer2015Format, loadSummer2015Format, loadSummer2015Formatzasx, parseLatticeString
from scipy import constants
from relativity import betaFromE
import math
#sigma = 0.001 # the standard deviation that the user will enter
#epsilon = sqrt(sigmax**2*sigmaxp**2-sigmaxxp**2)
### Non-linear
#order = 5 # user sets this
#
#LA = LieAlgebra()
#
#quaddefocuskval = 0.1
#quaddefocuslval = 1
#quaddefocusNumFuns = LA.hamToNumFuns(quadhamdefocus, quaddefocuskval, quaddefocuslval, order)
#
##print "quaddefocusNumFuns:" + str(quaddefocusNumFuns)
#
##diffAlgRes = (quaddefocusNumFuns[0](particle1), quaddefocusNumFuns[1](particle1), quaddefocusNumFuns[2](particle1), quaddefocusNumFuns[3](particle1), quaddefocusNumFuns[4](particle1), quaddefocusNumFuns[5](particle1))
#
##sextukval =
##sextupoleNumFuns = DA.hamToNumFuns(sextupoleham, kval, lval, order)
#
### Multiparticle
#x = 0.0
#xp =1.0
#y = 0.0
#yp =1.0
#z = 0.0
#zp =0.0
#s = 1.0 # from ref 1 in section 2.3
#zvector = np.array([x, xp, y, yp, z, zp])
#particle1 = np.array([zvector, s])
#particle2 = np.array([-zvector, s]) # careful, if this is the same expression as for particle1 there will only be one object!
#multipart = np.array([particle1, particle2])
#multipart2 = copy.copy(multipart)
##print "len(np.atleast_1d(multipart))" + str(len(np.atleast_1d(multipart)))
##print "multipart[0][0:6]" + str(multipart[0][0:6])
##print "multipart[1] (s)" + str(multipart[1])
#print "multipart" + str(multipart)
##print "len(multipart) " + str(len(multipart))
#
#
## twiss comes as [alpha_x, beta_x, epsilon_rms_x, alpha_y, beta_y, epsilon_rms_y, alpha_z, beta_z, epsilon_rms_z]
#twiss = np.array([0.0, 10.3338028723, 1e-06, -3.331460652e-16, 8.85901414121, 1e-06, -3.331460652e-16, 8.85901414121, 1e-06])
#
## this new def needs twiss params perhaps? YES, since otherwise SC won't work
## envelope comes as [sigma_x**2, sigma_x*sigma_xp, sigma_xp**2, sigma_y**2, sigma_y*sigma_yp, sigma_yp**2, sigma_z**2, sigma_z*sigma_zp, sigma_zp**2]
#envelope = np.array([1, 0, 0, 1, 0, 0, 1, 0, 0])
#
#
### Lattice construction
#spaceChargeOn = 1
#drift = Drift('drift', 1, spaceChargeOn, multipart, twiss)
#
## K = sqrt(e*g/p) ,from ref E.
#quad = Quad('quad', 0.1, 1, spaceChargeOn, multipart, twiss)
#
#lattice = Lattice('lattice')
#lattice.appendElement(drift)
#lattice.appendElement(quad)
#print "latticeprint: \n" + lattice.printLattice()
##print "multipart before latt eval: " + str(multipart)
#partres, envres = lattice.evaluate(multipart,envelope) ### changes multipart and envelope!!!!!!!!!!!!!!!!!!
##print "multipart2 after latt eval: " + str(multipart2)
#
#print "partres: " + str(partres)
#print "envres: " + str(envres)
#
## Using the Lie algebra "raw"
#LieAlgRes = (quaddefocusNumFuns[0](particle1[0][0],particle1[0][1],particle1[0][2],particle1[0][3],particle1[0][4],particle1[0][5]), quaddefocusNumFuns[1](particle1[0][0],particle1[0][1],particle1[0][2],particle1[0][3],particle1[0][4],particle1[0][5]), quaddefocusNumFuns[2](particle1[0][0],particle1[0][1],particle1[0][2],particle1[0][3],particle1[0][4],particle1[0][5]), quaddefocusNumFuns[3](particle1[0][0],particle1[0][1],particle1[0][2],particle1[0][3],particle1[0][4],particle1[0][5]), quaddefocusNumFuns[4](particle1[0][0],particle1[0][1],particle1[0][2],particle1[0][3],particle1[0][4],particle1[0][5]), quaddefocusNumFuns[5](particle1[0][0],particle1[0][1],particle1[0][2],particle1[0][3],particle1[0][4],particle1[0][5]))
##print "DiffAlgRes: " + str(diffAlgRes) # MATCHES the linear calculation!!!!! :)
#
## Using the differential algebra through DiffAlgElement
#LieAlgElemQuad = LieAlgElement("LieAlgElemQuad", LA, quadhamdefocus, quaddefocuskval, quaddefocuslval, order, 0, multipart2, envelope)
#LieAlgElemQuadpartres, LieAlgElemQuadenvres = LieAlgElemQuad.evaluate(multipart2,envelope) # not the same as raw! Since the element is split! But if n = 1 in the class then there is a very good match!!!! :)
#print "LieAlgElemQuadRes: " + str(LieAlgElemQuadpartres)
### Tons of particles
print "Tons of particles..."
#nbrOfParticles = 10
#multiparttonsMat = straight(nbrOfParticles)
#multiparttonsDiffAlg = straight(nbrOfParticles)
#
##print "multiparttonsMat: \n" + str(multiparttonsMat)
#
#tonsMatPartRes, tonsMatEnvRes = lattice.evaluate(multiparttonsMat,envelope)
#tonsLieAlgPartRes, tonsLieAlgEnvRes = LieAlgElemQuad.evaluate(multiparttonsDiffAlg,envelope)
#
##print "tonsMatPartRes: \n" + str(tonsMatPartRes)
##print "tonsLieAlgPartRes: \n" + str(tonsLieAlgPartRes)
#
## Checking the others
#scannedparts = scanned(nbrOfParticles)
##print "scannedparts: \n" + str(scannedparts)
#
#randomedparts = randomed(nbrOfParticles)
##print "randomedparts: \n" + str(randomedparts)
#
#gaussianparts = gaussian(nbrOfParticles)
##print "gaussianparts: \n" + str(gaussianparts)
#
#gaussianTwiss3Dparts = gaussianTwiss3D(nbrOfParticles, twiss)
##print "gaussianTwiss3Dparts: \n" + str(gaussianTwiss3Dparts)
### Leapfrog
print "Leapfrog..."
#x_0 = 100.0
#v_0 = 0.0
#def F(x):
# return -x
#
## F should perhaps be numfuns: xpNumFun, ypNumFun, zpNumFun
## F is Force!
#
#L = 4
#n = 4*10*10000
#h = L/n
#
#x_of_i, v_of_i = leapfrog(x_0, v_0, F, h, n)
##print "Results:"
#print "x_of_i[-1]: " + str(x_of_i[-1])
#print "v_of_i[-1]: " + str(v_of_i[-1])
### IOHandling
print "IOHandling..."
## multipart
#filenameMultipart = "savedParticles"
#saveMultipart(filenameMultipart, multipart2)
#print "multipart2 stored in " + filenameMultipart
#
#loadedmultipart = loadMultipart(filenameMultipart + ".npy")
#print "loaded particles: \n" + str(loadedmultipart)
#
## twiss
#filenameTwiss = "savedTwiss"
#saveTwiss(filenameTwiss, twiss)
#print "twiss stored in " + filenameTwiss
#
#loadedtwiss = loadMultipart(filenameTwiss + ".npy")
#print "loaded twiss: \n" + str(loadedtwiss)
#
## envelope
#filenameEnvelope = "savedEnvelope"
#saveEnvelope(filenameEnvelope, envelope)
#print "envelope stored in " + filenameEnvelope
#
#loadedenvelope = loadEnvelope(filenameEnvelope + ".npy")
#print "loaded envelope: \n" + str(loadedenvelope)
#
## lattice
#filenameLattice = "savedLattice.npy"
#saveLattice(filenameLattice, lattice)
#print "lattice stored in " + filenameLattice
#
#loadedlattice = loadLattice(filenameLattice)
#print "loaded lattice: \n" + loadedlattice.printLattice()
#filename = "data/" + "saved"
#saveAll(filename, multipart2, twiss, envelope, lattice)
#multipartload, twissload, envelopeload, latticeload = loadAll(filename)
#print "multipartload: \n" + str(multipartload)
#print "twissload: \n" + str(twissload)
#print "envelopeload: \n" + str(envelopeload)
#print "latticeload: \n" + str(latticeload.printLattice())
#
### Compare with old code
print "Compare with old code/ compare space charges..."
## Load the same particles (old code comp)
datafilepart = "../data/oldformat/" + "inpart1000" + ".txt"
datafiletwiss = "../data/oldformat/" + "intwiss" + ".txt"
#multipartfromold, twissfromold = loadSummer2015Format(datafilepart, datafiletwiss)
# sets z as x from old format
multipartfromold, twissfromold = loadSummer2015Formatzasx(datafilepart, datafiletwiss)
#nbrOfParticles = 1000
#multipartfromold = gaussianTwiss3D(nbrOfParticles, twissfromold)
#print "multipartfromoldcopy: \n" + str(multipartfromoldcopy)
spaceChargeOnInComp = 0
E = 2e9*constants.e # 2GeV to joule from ref F.
freq = 704.42e6 # (Hz) from ref. F
rf_lambda = constants.c/freq # beam data needed
m = constants.m_p
beta = betaFromE(m, E)
q = constants.e
beamdata = [beta, rf_lambda, m, q, E]
#envelopeInComp = np.array([1, 0, 0, 1, 0, 0, 1, 0, 0])
envelopeInComp = envelopeFromMultipart(multipartfromold)
#print "envelopeInComp: " + str(envelopeInComp)
nbrOfSplits = 1
## the lattice will be a FODSO cell (Focusing Quad, Drift, Defocusing Quad, Sextupole, Drift)
compLattice = Lattice('compLattice', beamdata, twissfromold, multipartfromold)
cavityName = "cavity"
cavityLength = 2.0
cavityOscillations = 2
cavityAmplitudeA = 0
cavityAmplitudeB = 30 # 30 MeV / m
cavityE_0 = cavityAmplitudeB
cavitySigma = 1
cavityP = 3
cavityEzofs = [cavityOscillations, cavityAmplitudeA, cavityAmplitudeB, cavityE_0, cavitySigma, cavityP]
cavity = Cavity(cavityName, cavityLength, cavityEzofs, beamdata, nbrOfSplits) # Changes beta in beamdata!
E = cavity.getNewE() # Updates the energy
compLattice.appendElement(cavity)
print compLattice.printLattice()
fQName = "fQ"
fQuadLength = 0.4
fQuadStrength = -0.8 # this is k
fQ = Quad(fQName, fQuadStrength, fQuadLength, spaceChargeOnInComp, multipartfromold, twissfromold, beamdata, nbrOfSplits)
#compLattice.appendElement(fQ)
driftName = "drift"
driftLength = 1.0
compDrift = Drift(driftName, driftLength, spaceChargeOnInComp, multipartfromold, twissfromold, beamdata, nbrOfSplits)
#compLattice.appendElement(compDrift)
dQName = "dQ"
dQuadLength = 0.4
dQuadStrength = 0.8
dQ = Quad(dQName, dQuadStrength, dQuadLength, spaceChargeOnInComp, multipartfromold, twissfromold, beamdata, nbrOfSplits)
#compLattice.appendElement(dQ)
sextuName = "sextu"
hamToUse = "sextupoleham"
sextuLength = 0.3
sextuStrength = 0.6
compOrder = 6
compLattice.createSextupole(sextuName, sextuStrength, sextuLength, compOrder)
#sextu = LieAlgElement(sextuName, hamToUse, sextuStrength, sextuLength, compOrder, spaceChargeOnInComp, multipartfromold, twissfromold, beamdata, nbrOfSplits)
#compLattice.appendElement(sextu)
#compLattice.appendElement(compDrift)
dipoleName = "dipole"
dipoleRho = 5
dipoleAlpha = math.pi/4
dipolen = 0.5
compLattice.createDipole(dipoleName, dipoleRho, dipoleAlpha, dipolen)
#compDipole = Dipole(dipoleName, dipoleRho, dipoleAlpha, dipolen, spaceChargeOnInComp, multipartfromold, twissfromold, beamdata, nbrOfSplits)
#compLattice.appendElement(compDipole)
#compLattice.appendElement(fQ)
compLattice.createQuadrupole(fQName, fQuadStrength, fQuadLength)
#compLattice.appendElement(compDrift)
compLattice.createDrift(driftName, driftLength)
#compLattice.appendElement(dQ)
compLattice.createQuadrupole(dQName, dQuadStrength, dQuadLength)
#compLattice.appendElement(compDrift)
compLattice.createDrift(driftName, driftLength)
#compLattice.appendElement(fQ)
compLattice.createQuadrupole(fQName, fQuadStrength, fQuadLength)
#compLattice.appendElement(compDrift)
compLattice.createDrift(driftName, driftLength)
#compLattice.appendElement(dQ)
compLattice.createQuadrupole(dQName, dQuadStrength, dQuadLength)
#compLattice.appendElement(compDrift)
compLattice.createDrift(driftName, driftLength)
#compLattice.appendElement(fQ)
compLattice.createQuadrupole(fQName, fQuadStrength, fQuadLength)
#compLattice.appendElement(compDrift)
compLattice.createDrift(driftName, driftLength)
#compLattice.appendElement(dQ)
compLattice.createQuadrupole(dQName, dQuadStrength, dQuadLength)
#compLattice.appendElement(compDrift)
compLattice.createDrift(driftName, driftLength)
#compLattice.appendElement(fQ)
#compLattice.appendElement(compDrift)
#compLattice.appendElement(dQ)
#compLattice.appendElement(compDrift)
#
#compLattice.appendElement(fQ)
#compLattice.appendElement(compDrift)
#compLattice.appendElement(dQ)
#compLattice.appendElement(compDrift)
print compLattice.printLattice()
#saveLattice("../data/" + "savedlatticestringsextudipolecavity" + ".npy", compLattice)
## Calculate
partresInComp, envresInComp, twissresInComp = compLattice.evaluate(multipartfromold,envelopeInComp,twissfromold) # Does eval still change input? yes
#saveSummer2015Format("../data/" + "outpartFODSOspaceChargetesttest" + ".txt","../data/" + "outtwiss" + ".txt",partresInComp, twissfromold)
plotEverything(multipartfromold, twissfromold, partresInComp)
### Plotting
print "Plotting..."
#twissin = np.array([0.0, 10.3338028723, 1e-06, -3.331460652e-16, 8.85901414121, 1e-06, -3.331460652e-16, 8.85901414121, 1e-06])
#twissout = twissin
#envelopein = np.array([1, 0, 0, 1, 0, 0, 1, 0, 0])
#envelopeout = envelopein
#
#nbrOfParticlestest = 100
#
#multipartin = gaussianTwiss3D(nbrOfParticlestest, twissout)
#multipartout = multipartin
#
#multipartout, envelopeout = lattice.evaluate(multipartout, envelopeout)
#
##plotEverything(multipartin, twissin, multipartout)
# references
# 1. simulatingbeamswithellipsoidalsymmetry-secondedition
# A. 7.2. Space Charge Impulses in simulatingbeamswithellipsoidalsymmetry-secondedition
# B. A MODIFIED QUADSCAN TECHNIQUE FOR EMITTANCE.pdf
# C. Accelerator-Recipies.pdf by E. Laface
# D. The leapfrog method and other symplectic algorithms for integrating Newtons laws of motion Peter Young Dated April 21 2014
# E. ESS Linac simulator
# F. WEPEA 040
|
OscarES/Differential-Algebra-Tracker
|
testfacility.py
|
Python
|
gpl-3.0
| 13,073
|
[
"Gaussian"
] |
251d4053ab89eab89c4fbee7a7d8fd02fb642f1825a87e42bf3bbbf7b8f38c70
|
#!/usr/bin/env python
"""
Convert data from Genbank format to GFF.
Usage:
python gbk_to_gff_conv.py in.gbk > out.gff
Requirements:
BioPython:- http://biopython.org/
"""
import os, sys, re
import collections
from Bio import SeqIO
from common_util import _open_file
def feature_table(chr_id, source, orient, genes, transcripts, cds, exons, unk):
"""
Write the feature information
"""
for gname, ginfo in genes.items():
line = [str(chr_id),
'gbk_to_gff',
ginfo[3],
str(ginfo[0]),
str(ginfo[1]),
'.',
ginfo[2],
'.',
'ID='+str(gname)+';Name='+str(gname)+';Note='+ginfo[-1]]
print '\t'.join(line)
## construct the transcript line is not defined in the original file
t_line = [str(chr_id), 'gbk_to_gff', source, 0, 1, '.', ginfo[2], '.']
if not transcripts:
t_line.append('ID=Transcript:'+str(gname)+';Parent='+str(gname))
if exons: ## get the entire transcript region from the defined feature
t_line[3] = str(exons[gname][0][0])
t_line[4] = str(exons[gname][0][-1])
elif cds:
t_line[3] = str(cds[gname][0][0])
t_line[4] = str(cds[gname][0][-1])
print '\t'.join(t_line)
if exons:
exon_line_print(t_line, exons[gname], 'Transcript:'+str(gname), 'exon')
if cds:
exon_line_print(t_line, cds[gname], 'Transcript:'+str(gname), 'CDS')
if not exons:
exon_line_print(t_line, cds[gname], 'Transcript:'+str(gname), 'exon')
else: ## transcript is defined
for idx in transcripts[gname]:
t_line[2] = idx[3]
t_line[3] = str(idx[0])
t_line[4] = str(idx[1])
t_line.append('ID='+str(idx[2])+';Parent='+str(gname))
print '\t'.join(t_line)
## feature line print call
if exons:
exon_line_print(t_line, exons[gname], str(idx[2]), 'exon')
if cds:
exon_line_print(t_line, cds[gname], str(idx[2]), 'CDS')
if not exons:
exon_line_print(t_line, cds[gname], str(idx[2]), 'exon')
if len(genes) == 0: ## feature entry with fragment information
line = [str(chr_id), 'gbk_to_gff', source, 0, 1, '.', orient, '.']
fStart = fStop = None
for eid, ex in cds.items():
fStart = ex[0][0]
fStop = ex[0][-1]
for eid, ex in exons.items():
fStart = ex[0][0]
fStop = ex[0][-1]
if fStart or fStart:
line[2] = 'gene'
line[3] = str(fStart)
line[4] = str(fStop)
line.append('ID=Unknown_Gene_' + str(unk) + ';Name=Unknown_Gene_' + str(unk))
print "\t".join(line)
if not cds:
line[2] = 'transcript'
else:
line[2] = 'mRNA'
line[8] = 'ID=Unknown_Transcript_' + str(unk) + ';Parent=Unknown_Gene_' + str(unk)
print "\t".join(line)
if exons:
exon_line_print(line, cds[None], 'Unknown_Transcript_' + str(unk), 'exon')
if cds:
exon_line_print(line, cds[None], 'Unknown_Transcript_' + str(unk), 'CDS')
if not exons:
exon_line_print(line, cds[None], 'Unknown_Transcript_' + str(unk), 'exon')
unk +=1
return unk
def exon_line_print(temp_line, trx_exons, parent, ftype):
"""
Print the EXON feature line
"""
for ex in trx_exons:
temp_line[2] = ftype
temp_line[3] = str(ex[0])
temp_line[4] = str(ex[1])
temp_line[8] = 'Parent='+parent
print '\t'.join(temp_line)
def gbk_parse(fname):
"""
Extract genome annotation recods from genbank format
"""
fhand = _open_file(gbkfname)
unk = 1
for record in SeqIO.parse(fhand, "genbank"):
gene_tags = dict()
tx_tags = collections.defaultdict(list)
exon = collections.defaultdict(list)
cds = collections.defaultdict(list)
mol_type, chr_id = None, None
for rec in record.features:
if rec.type == 'source':
mol_type = rec.qualifiers['mol_type'][0]
try:
chr_id = rec.qualifiers['chromosome'][0]
except:
chr_id = record.name
continue
strand='-'
strand='+' if rec.strand>0 else strand
fid = None
try:
fid = rec.qualifiers['gene'][0]
except:
pass
transcript_id = None
try:
transcript_id = rec.qualifiers['transcript_id'][0]
except:
pass
if re.search(r'gene', rec.type):
gene_tags[fid] = (rec.location._start.position+1,
rec.location._end.position,
strand,
rec.type,
rec.qualifiers['note'][0])
elif rec.type == 'exon':
exon[fid].append((rec.location._start.position+1,
rec.location._end.position))
elif rec.type=='CDS':
cds[fid].append((rec.location._start.position+1,
rec.location._end.position))
else:
# get all transcripts
if transcript_id:
tx_tags[fid].append((rec.location._start.position+1,
rec.location._end.position,
transcript_id,
rec.type))
# record extracted, generate feature table
unk = feature_table(chr_id, mol_type, strand, gene_tags, tx_tags, cds, exon, unk)
#break
fhand.close()
if __name__=='__main__':
try:
gbkfname = sys.argv[1]
except:
print __doc__
sys.exit(-1)
## extract gbk records
gbk_parse(gbkfname)
|
ratschlab/oqtans_tools
|
GFFtools/0.1/gbk_to_gff_conv.py
|
Python
|
mit
| 6,462
|
[
"Biopython"
] |
b4faafb4261031e92a6e09f6711c7243aec718cf26a09b7a59599fc53dd31362
|
import numpy as np
from ase.optimize import Optimizer
class MDMin(Optimizer):
def __init__(self, atoms, restart=None, logfile='-', trajectory=None,
dt=None):
Optimizer.__init__(self, atoms, restart, logfile, trajectory)
if dt is not None:
self.dt = dt
def initialize(self):
self.v = None
self.dt = 0.2
def read(self):
self.v, self.dt = self.load()
def step(self, f):
atoms = self.atoms
if self.v is None:
self.v = np.zeros((len(atoms), 3))
else:
self.v += 0.5 * self.dt * f
# Correct velocities:
vf = np.vdot(self.v, f)
if vf < 0.0:
self.v[:] = 0.0
else:
self.v[:] = f * vf / np.vdot(f, f)
self.v += 0.5 * self.dt * f
r = atoms.get_positions()
atoms.set_positions(r + self.dt * self.v)
self.dump((self.v, self.dt))
|
freephys/python_ase
|
ase/optimize/mdmin.py
|
Python
|
gpl-3.0
| 977
|
[
"ASE"
] |
a412cd7c6cc52f0ef9248471cdf27c2e522a0a08a12b27aee4197a0154174b8b
|
from setuptools import setup, find_packages
import os
import glob
package_dir = {'': 'src'}
for pkg in [pkg for pkg in find_packages("src") if pkg.find('.') > -1]:
package_dir['rocketblast' + "." + pkg] = "src" + os.sep + pkg
datadir = os.path.join('data')
data_files = [(datadir, [f for f in glob.glob(os.path.join(datadir, '*'))])]
setup(
name = 'Rocket Blast RCON Troller',
version = '0.1.1',
# maintainer = '',
# maintainer_email = '',
author = 'Martin Danielson',
author_email = '[email protected]',
long_description = open("README.md").read(),
keywords = 'rcon battlefield game servers mods plugin socket connection',
description = 'Plugin system to write game servers modifications (Battlefield)',
license = 'GNU Affero GPL v3',
#platforms = '',
url = 'https://github.com/rocketblast/rcon-troller',
download_url = 'https://github.com/rocketblast/rcon-troller/downloads',
classifiers = '',
# package installation
package_dir = package_dir,
packages = find_packages('src'),
namespace_packages = ['rocketblast', 'rocketblast.rcon'],
install_requires = ['pygeoip', 'rocket_blast_rcon'],
dependency_links = ['https://github.com/rocketblast/rcon/archive/master.zip#egg=rocket_blast_rcon'],
# uncomment if you have share/data files
data_files = data_files,
#use_2to3 = True, # causes issue with nosetests
)
|
rocketblast/rcon-troller
|
setup.py
|
Python
|
agpl-3.0
| 1,414
|
[
"BLAST"
] |
bd1969795ca65e924b3ee2c4940b475883570bb36f33b91812eb7e241d7c49ed
|
##############################################################################
# MDTraj: A Python Library for Loading, Saving, and Manipulating
# Molecular Dynamics Trajectories.
# Copyright 2012-2014 Stanford University and the Authors
#
# Authors: Kyle A. Beauchamp
# Contributors: Robert McGibbon, Matthew Harrigan, Carlos Xavier Hernandez
#
# MDTraj is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation, either version 2.1
# of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with MDTraj. If not, see <http://www.gnu.org/licenses/>.
##############################################################################
import os
import tempfile
import mdtraj as md
import numpy as np
from mdtraj.utils.six.moves import cPickle
from mdtraj.utils import import_
from mdtraj.testing import get_fn, eq, skipif, assert_raises
try:
from simtk.openmm import app
HAVE_OPENMM = True
except ImportError:
HAVE_OPENMM = False
try:
import pandas as pd
HAVE_PANDAS = True
except ImportError:
HAVE_PANDAS = False
@skipif(not HAVE_OPENMM)
def test_topology_openmm():
topology = md.load(get_fn('1bpi.pdb')).topology
# the openmm trajectory doesn't have the distinction
# between resSeq and index, so if they're out of whack
# in the openmm version, that cant be preserved
for residue in topology.residues:
residue.resSeq = residue.index
mm = topology.to_openmm()
assert isinstance(mm, app.Topology)
topology2 = md.Topology.from_openmm(mm)
eq(topology, topology2)
@skipif(not HAVE_OPENMM)
def test_topology_openmm_boxes():
u = import_('simtk.unit')
traj = md.load(get_fn('1vii_sustiva_water.pdb'))
mmtop = traj.topology.to_openmm(traj=traj)
box = mmtop.getUnitCellDimensions() / u.nanometer
@skipif(not HAVE_PANDAS)
def test_topology_pandas():
topology = md.load(get_fn('native.pdb')).topology
atoms, bonds = topology.to_dataframe()
topology2 = md.Topology.from_dataframe(atoms, bonds)
eq(topology, topology2)
topology3 = md.Topology.from_dataframe(atoms) # Make sure you default arguement of None works, see issue #774
@skipif(not HAVE_PANDAS)
def test_topology_pandas_TIP4PEW():
topology = md.load(get_fn('GG-tip4pew.pdb')).topology
atoms, bonds = topology.to_dataframe()
topology2 = md.Topology.from_dataframe(atoms, bonds)
eq(topology, topology2)
def test_topology_numbers():
topology = md.load(get_fn('1bpi.pdb')).topology
assert len(list(topology.atoms)) == topology.n_atoms
assert len(list(topology.residues)) == topology.n_residues
assert all([topology.atom(i).index == i for i in range(topology.n_atoms)])
@skipif(not HAVE_PANDAS)
def test_topology_unique_elements_bpti():
traj = md.load(get_fn('bpti.pdb'))
top, bonds = traj.top.to_dataframe()
atoms = np.unique(["C", "O", "N", "H", "S"])
eq(atoms, np.unique(top.element.values))
def test_chain():
top = md.load(get_fn('bpti.pdb')).topology
chain = top.chain(0)
assert chain.n_residues == len(list(chain.residues))
atoms = list(chain.atoms)
assert chain.n_atoms == len(atoms)
for i in range(chain.n_atoms):
assert atoms[i] == chain.atom(i)
def test_residue():
top = md.load(get_fn('bpti.pdb')).topology
residue = top.residue(0)
assert len(list(residue.atoms)) == residue.n_atoms
atoms = list(residue.atoms)
for i in range(residue.n_atoms):
assert residue.atom(i) == atoms[i]
def test_segment_id():
top = md.load(get_fn('ala_ala_ala.pdb')).topology
assert next(top.residues).segment_id == "AAL", "Segment id is not being assigned correctly for ala_ala_ala.psf"
df = top.to_dataframe()[0]
assert len(df["segmentID"] == "AAL")==len(df), "Segment id is not being assigned correctly to topology data frame ala_ala_ala.psf"
def test_nonconsective_resSeq():
t = md.load(get_fn('nonconsecutive_resSeq.pdb'))
yield lambda : eq(np.array([r.resSeq for r in t.top.residues]), np.array([1, 3, 5]))
df1 = t.top.to_dataframe()
df2 = md.Topology.from_dataframe(*df1).to_dataframe()
yield lambda : eq(df1[0], df2[0])
# round-trip through a PDB load/save loop
fd, fname = tempfile.mkstemp(suffix='.pdb')
os.close(fd)
t.save(fname)
t2 = md.load(fname)
yield lambda : eq(df1[0], t2.top.to_dataframe()[0])
os.unlink(fname)
def test_pickle():
# test pickling of topology (bug #391)
cPickle.loads(cPickle.dumps(md.load(get_fn('bpti.pdb')).topology))
def test_atoms_by_name():
top = md.load(get_fn('bpti.pdb')).topology
atoms = list(top.atoms)
for atom1, atom2 in zip(top.atoms_by_name('CA'), top.chain(0).atoms_by_name('CA')):
assert atom1 == atom2
assert atom1 in atoms
assert atom1.name == 'CA'
assert len(list(top.atoms_by_name('CA'))) == sum(1 for _ in atoms if _.name == 'CA')
assert top.residue(15).atom('CA') == [a for a in top.residue(15).atoms if a.name == 'CA'][0]
assert_raises(KeyError, lambda: top.residue(15).atom('sdfsdsdf'))
def test_select_atom_indices():
top = md.load(get_fn('native.pdb')).topology
yield lambda: eq(top.select_atom_indices('alpha'), np.array([8]))
yield lambda: eq(top.select_atom_indices('minimal'),
np.array([4, 5, 6, 8, 10, 14, 15, 16, 18]))
assert_raises(ValueError, lambda: top.select_atom_indices('sdfsdfsdf'))
@skipif(not HAVE_OPENMM)
def test_top_dataframe_openmm_roundtrip():
t = md.load(get_fn('2EQQ.pdb'))
top, bonds = t.top.to_dataframe()
t.topology = md.Topology.from_dataframe(top, bonds)
omm_top = t.top.to_openmm()
def test_n_bonds():
t = md.load(get_fn('2EQQ.pdb'))
for atom in t.top.atoms:
if atom.element.symbol == 'H':
assert atom.n_bonds == 1
elif atom.element.symbol == 'C':
assert atom.n_bonds in [3, 4]
elif atom.element.symbol == 'O':
assert atom.n_bonds in [1, 2]
def test_load_unknown_topology():
try:
md.load(get_fn('frame0.dcd'), top=get_fn('frame0.dcd'))
except IOError as e:
# we want to make sure there's a nice error message than includes
# a list of the supported topology formats.
assert all(s in str(e) for s in ('.pdb', '.psf', '.prmtop'))
else:
assert False # fail
def test_unique_pairs():
n = 10
a = np.arange(n)
b = np.arange(n, n+n)
eq(md.Topology._unique_pairs(a, a).sort(), md.Topology._unique_pairs_equal(a).sort())
eq(md.Topology._unique_pairs(a, b).sort(), md.Topology._unique_pairs_mutually_exclusive(a, b).sort())
def test_select_pairs():
traj = md.load(get_fn('tip3p_300K_1ATM.pdb'))
select_pairs = traj.top.select_pairs
assert len(select_pairs(selection1='name O', selection2='name O')) == 258 * (258 - 1) // 2
assert len(select_pairs(selection1='name H1', selection2='name O')) == 258 * 258
selections = iter([
# Equal
("(name O) or (name =~ 'H.*')", "(name O) or (name =~ 'H.*')"),
('all', 'all'),
# Exclusive
('name O', 'name H1'),
('name H1', 'name O'),
# Overlap
(range(traj.n_atoms), 'name O'),
('all', 'name O')])
for select1, select2 in selections:
select3, select4 = next(selections)
assert eq(select_pairs(selection1=select1, selection2=select2).sort(),
select_pairs(selection1=select3, selection2=select4).sort())
def test_to_fasta():
t = md.load(get_fn('2EQQ.pdb'))
assert t.topology.to_fasta(0) == "ENFSGGCVAGYMRTPDGRCKPTFYQLIT"
def test_subset():
t1 = md.load(get_fn('2EQQ.pdb')).top
t2 = t1.subset([1,2,3])
assert t2.n_residues == 1
def test_molecules():
top = md.load(get_fn('4OH9.pdb')).topology
molecules = top.find_molecules()
assert sum(len(mol) for mol in molecules) == top.n_atoms
assert sum(1 for mol in molecules if len(mol) > 1) == 2 # All but two molecules are water
|
tcmoore3/mdtraj
|
mdtraj/tests/test_topology.py
|
Python
|
lgpl-2.1
| 8,412
|
[
"MDTraj",
"OpenMM"
] |
efcc9fa460e0aa81e18c9b648dd4d59b020c174ee836e20180e6bb62d5badc7e
|
#
# Copyright (C) 2008, Brian Tanner
#
#http://rl-glue-ext.googlecode.com/
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# $Revision: 446 $
# $Date: 2009-01-22 22:20:21 -0500 (Thu, 22 Jan 2009) $
# $Author: [email protected] $
# $HeadURL: http://rl-glue-ext.googlecode.com/svn/trunk/projects/codecs/Python/src/tests/test_message_agent.py $
import random
import sys
from rlglue.agent.Agent import Agent
from rlglue.agent import AgentLoader as AgentLoader
from rlglue.types import Action
from rlglue.types import Observation
class test_message_agent(Agent):
whichEpisode=0
def agent_init(self,taskSpec):
pass
def agent_start(self,observation):
return Action()
def agent_step(self,reward, observation):
return Action()
def agent_end(self,reward):
pass
def agent_cleanup(self):
pass
def agent_message(self,inMessage):
if inMessage==None:
return "null"
if inMessage=="":
return "empty"
if inMessage=="null":
return None
if inMessage=="empty":
return ""
return inMessage;
if __name__=="__main__":
AgentLoader.loadAgent(test_message_agent())
|
evenmarbles/rlglued
|
tests/test_message_agent.py
|
Python
|
bsd-3-clause
| 1,608
|
[
"Brian"
] |
e4e72f998b3a6b70e809acb0430cdf27011efd71939b08740c67c2d4994e1abe
|
#!/usr/bin/env python
""" update local cfg
"""
import os
from DIRAC.Core.Base import Script
Script.setUsageMessage('\n'.join([__doc__.split('\n')[1],
'Usage:',
' %s [option|cfgFile] ... DB ...' % Script.scriptName]))
Script.parseCommandLine()
args = Script.getPositionalArgs()
setupName = args[0]
# Where to store outputs
if not os.path.isdir('%s/sandboxes' % setupName):
os.makedirs('%s/sandboxes' % setupName)
# now updating the CS
from DIRAC.ConfigurationSystem.Client.CSAPI import CSAPI
csAPI = CSAPI()
csAPI.setOption('Systems/WorkloadManagement/Production/Services/SandboxStore/BasePath', '%s/sandboxes' % setupName)
csAPI.setOption('Systems/WorkloadManagement/Production/Services/SandboxStore/LogLevel', 'DEBUG')
# Now setting a SandboxSE as the following:
# ProductionSandboxSE
# {
# BackendType = DISET
# AccessProtocol = dips
# DIP
# {
# Host = localhost
# Port = 9196
# ProtocolName = DIP
# Protocol = dips
# Path = /scratch/workspace/%s/sandboxes % setupName
# Access = remote
# }
# }
res = csAPI.createSection('Resources/StorageElements/')
if not res['OK']:
print res['Message']
exit(1)
res = csAPI.createSection('Resources/StorageElements/ProductionSandboxSE')
if not res['OK']:
print res['Message']
exit(1)
csAPI.setOption('Resources/StorageElements/ProductionSandboxSE/BackendType', 'DISET')
csAPI.setOption('Resources/StorageElements/ProductionSandboxSE/AccessProtocol', 'dips')
res = csAPI.createSection('Resources/StorageElements/ProductionSandboxSE/DIP')
if not res['OK']:
print res['Message']
exit(1)
csAPI.setOption('Resources/StorageElements/ProductionSandboxSE/DIP/Host', 'localhost')
csAPI.setOption('Resources/StorageElements/ProductionSandboxSE/DIP/Port', '9196')
csAPI.setOption('Resources/StorageElements/ProductionSandboxSE/DIP/ProtocolName', 'DIP')
csAPI.setOption('Resources/StorageElements/ProductionSandboxSE/DIP/Protocol', 'dips')
csAPI.setOption('Resources/StorageElements/ProductionSandboxSE/DIP/Access', 'remote')
csAPI.setOption('Resources/StorageElements/ProductionSandboxSE/DIP/Path', '%s/sandboxes' % setupName)
# Now setting a FileCatalogs section as the following:
# FileCatalogs
# {
# FileCatalog
# {
# AccessType = Read-Write
# Status = Active
# Master = True
# }
# }
res = csAPI.createSection('Resources/FileCatalogs/')
if not res['OK']:
print res['Message']
exit(1)
res = csAPI.createSection('Resources/FileCatalogs/FileCatalog')
if not res['OK']:
print res['Message']
exit(1)
csAPI.setOption('Resources/FileCatalogs/FileCatalog/AccessType', 'Read-Write')
csAPI.setOption('Resources/FileCatalogs/FileCatalog/Status', 'Active')
csAPI.setOption('Resources/FileCatalogs/FileCatalog/Master', 'True')
# Now setting up the following option:
# Resources
# {
# Sites
# {
# DIRAC
# {
# DIRAC.Jenkins.ch
# {
# CEs
# {
# jenkins.cern.ch
# {
# CEType = Test
# Queues
# {
# jenkins-queue_not_important
# {
# maxCPUTime = 200000
# SI00 = 2400
# }
# }
# }
# }
# }
# }
# }
for st in ['Resources/Sites/DIRAC/',
'Resources/Sites/DIRAC/DIRAC.Jenkins.ch',
'Resources/Sites/DIRAC/DIRAC.Jenkins.ch/jenkins.cern.ch',
'Resources/Sites/DIRAC/DIRAC.Jenkins.ch/jenkins.cern.ch/Queues'
'Resources/Sites/DIRAC/DIRAC.Jenkins.ch/jenkins.cern.ch/Queues/jenkins-queue_not_important']:
res = csAPI.createSection(st)
if not res['OK']:
print res['Message']
exit(1)
csAPI.setOption('Resources/Sites/DIRAC/DIRAC.Jenkins.ch/CEs/jenkins.cern.ch/CEType', 'Test')
csAPI.setOption(
'Resources/Sites/DIRAC/DIRAC.Jenkins.ch/CEs/jenkins.cern.ch/Queues/jenkins-queue_not_important/maxCPUTime',
'200000')
csAPI.setOption('Resources/Sites/DIRAC/DIRAC.Jenkins.ch/CEs/jenkins.cern.ch/Queues/jenkins-queue_not_important/SI00',
'2400')
# Now setting up the following option:
# Resources
# {
# FTSEndpoints
# {
# FTS3
# {
# JENKINS-FTS3 = https://jenkins-fts3.cern.ch:8446
# }
# }
for st in ['Resources/FTSEndpoints/',
'Resources/FTSEndpoints/FTS3/']:
res = csAPI.createSection(st)
if not res['OK']:
print res['Message']
exit(1)
csAPI.setOption('Resources/FTSEndpoints/FTS3/JENKINS-FTS3', 'https://jenkins-fts3.cern.ch:8446')
# Now setting a RSS section as the following inside /Operations/Defaults:
#
# ResourceStatus
# {
# Policies
# {
# AlwaysActiveForResource
# {
# matchParams
# {
# element = Resource
# }
# policyType = AlwaysActive
# }
# AlwaysBannedForSE1SE2
# {
# matchParams
# {
# name = SE1,SE2
# }
# policyType = AlwaysBanned
# }
# AlwaysBannedForSite
# {
# matchParams
# {
# element = Site
# }
# policyType = AlwaysBanned
# }
# }
# }
res = csAPI.createSection('Operations/')
if not res['OK']:
print res['Message']
exit(1)
res = csAPI.createSection('Operations/Defaults')
if not res['OK']:
print res['Message']
exit(1)
res = csAPI.createSection('Operations/Defaults/ResourceStatus')
if not res['OK']:
print res['Message']
exit(1)
res = csAPI.createSection('Operations/Defaults/ResourceStatus/Policies')
if not res['OK']:
print res['Message']
exit(1)
res = csAPI.createSection('Operations/Defaults/ResourceStatus/Policies/AlwaysActiveForResource')
if not res['OK']:
print res['Message']
exit(1)
csAPI.setOption('Operations/Defaults/ResourceStatus/Policies/AlwaysActiveForResource/policyType', 'AlwaysActive')
res = csAPI.createSection('Operations/Defaults/ResourceStatus/Policies/AlwaysActiveForResource/matchParams')
if not res['OK']:
print res['Message']
exit(1)
csAPI.setOption('Operations/Defaults/ResourceStatus/Policies/AlwaysActiveForResource/matchParams/element', 'Resource')
res = csAPI.createSection('Operations/Defaults/ResourceStatus/Policies/AlwaysBannedForSE1SE2')
if not res['OK']:
print res['Message']
exit(1)
csAPI.setOption('Operations/Defaults/ResourceStatus/Policies/AlwaysBannedForSE1SE2/policyType', 'AlwaysBanned')
res = csAPI.createSection('Operations/Defaults/ResourceStatus/Policies/AlwaysBannedForSE1SE2/matchParams')
if not res['OK']:
print res['Message']
exit(1)
csAPI.setOption('Operations/Defaults/ResourceStatus/Policies/AlwaysBannedForSE1SE2/matchParams/name', 'SE1,SE2')
res = csAPI.createSection('Operations/Defaults/ResourceStatus/Policies/AlwaysBannedForSite')
if not res['OK']:
print res['Message']
exit(1)
res = csAPI.createSection('Operations/Defaults/ResourceStatus/Policies/AlwaysBannedForSite/matchParams')
csAPI.setOption('Operations/Defaults/ResourceStatus/Policies/AlwaysBannedForSite/policyType', 'AlwaysBanned')
csAPI.setOption('Operations/Defaults/ResourceStatus/Policies/AlwaysBannedForSite/matchParams/element', 'Site')
# Now setting the catalog list in Operations/Defults/Services/Catalogs/CatalogList
#
# Services
# {
# Catalogs
# {
# CatalogList = FileCatalog
# }
# }
res = csAPI.createSection('Operations/Defaults/Services')
if not res['OK']:
print res['Message']
exit(1)
res = csAPI.createSection('Operations/Defaults/Services/Catalogs')
if not res['OK']:
print res['Message']
exit(1)
res = csAPI.createSection('Operations/Defaults/Services/Catalogs/CatalogList')
if not res['OK']:
print res['Message']
exit(1)
csAPI.setOption('Operations/Defaults/Services/Catalogs/CatalogList', 'FileCatalog')
# Now setting the Registry section
#
# Registry
# {
# VO
# {
# Jenkins
# {
# VOMSName = myVOMS
# }
# }
# }
res = csAPI.createSection('Registry')
if not res['OK']:
print res['Message']
exit(1)
res = csAPI.createSection('Registry/VO/')
if not res['OK']:
print res['Message']
exit(1)
res = csAPI.createSection('Registry/VO/Jenkins')
if not res['OK']:
print res['Message']
exit(1)
res = csAPI.createSection('Registry/VO/Jenkins/VOMSName')
if not res['OK']:
print res['Message']
exit(1)
csAPI.setOption('Registry/VO/Jenkins/VOMSName', 'myVOMS')
# Final action: commit in CS
csAPI.commit()
|
arrabito/DIRAC
|
tests/Jenkins/dirac-cfg-update-server.py
|
Python
|
gpl-3.0
| 8,700
|
[
"DIRAC"
] |
0b0fb2e5a96f0bb9712fd670b3b800ec21954e81e2ecf61530895716db1313ec
|
#
# Copyright 2014-2015 University of Southern California
# Distributed under the (new) BSD License. See LICENSE.txt for more info.
#
"""Synapse detection in 3D microscopy images using size-specific blob detection.
The method uses several convolution kernels which have been
experimentally derived:
Low: a gaussian distribution approximating a synapse's signal
distribution
Red: a gaussian distribution to blur an optional autofluorescence
channel
Core: a kernel containing mostly central/high-intensity voxels
within synapse signal blobs
Span: a larger kernel containing the entire local region of
synapse centroids
Hollow: a difference of (Span - Core) containing mostly
peripheral/low-intensity voxels around synapse signal blobs
The 3D image convolution I*Low is trivially separated into 1D gaussian
convolutions on each axis for efficiency.
Candidate synapses are detected by finding local maxima in the I*Low
convoluton result, i.e. voxels where the measured synapse core signal
is equal to the maximum within a local box centered on the same voxel.
Additional measures are computed sparsely at each candidate centroid
location:
A. I*Core
B. I*Hollow
C. I*Red
These measures use direct (non-separated) 3D convolution over the
small image region surrounding the centroid. Because the set of
candidate centroids is so small relative to the total volume size,
this is faster than convolving the entire image even with separated
kernels.
Centroid classification is based on the computed measures for each
centroid.
Currently, the Core and Span kernels are simple box kernels, but we
may vary these empirically to improve our synapse blob classification.
The sparse measurement setup allows arbitrary 3D kernels since they
do not need to be separable.
"""
|
informatics-isi-edu/synspy
|
synspy/analyze/__init__.py
|
Python
|
bsd-3-clause
| 1,816
|
[
"Gaussian"
] |
57025956a7ec1a7fc1b0039bfca3e0f80bcf34551bfc2e42b4ec280f6686134a
|
"""
==================================
Constants (:mod:`scipy.constants`)
==================================
.. currentmodule:: scipy.constants
Physical and mathematical constants and units.
Mathematical constants
======================
============ =================================================================
``pi`` Pi
``golden`` Golden ratio
============ =================================================================
Physical constants
==================
============= =================================================================
``c`` speed of light in vacuum
``mu_0`` the magnetic constant :math:`\mu_0`
``epsilon_0`` the electric constant (vacuum permittivity), :math:`\epsilon_0`
``h`` the Planck constant :math:`h`
``hbar`` :math:`\hbar = h/(2\pi)`
``G`` Newtonian constant of gravitation
``g`` standard acceleration of gravity
``e`` elementary charge
``R`` molar gas constant
``alpha`` fine-structure constant
``N_A`` Avogadro constant
``k`` Boltzmann constant
``sigma`` Stefan-Boltzmann constant :math:`\sigma`
``Wien`` Wien displacement law constant
``Rydberg`` Rydberg constant
``m_e`` electron mass
``m_p`` proton mass
``m_n`` neutron mass
============= =================================================================
Constants database
------------------
In addition to the above variables, :mod:`scipy.constants` also contains the
2010 CODATA recommended values [CODATA2010]_ database containing more physical
constants.
.. autosummary::
:toctree: generated/
value -- Value in physical_constants indexed by key
unit -- Unit in physical_constants indexed by key
precision -- Relative precision in physical_constants indexed by key
find -- Return list of physical_constant keys with a given string
ConstantWarning -- Constant sought not in newest CODATA data set
.. data:: physical_constants
Dictionary of physical constants, of the format
``physical_constants[name] = (value, unit, uncertainty)``.
Available constants:
====================================================================== ====
%(constant_names)s
====================================================================== ====
Units
=====
SI prefixes
-----------
============ =================================================================
``yotta`` :math:`10^{24}`
``zetta`` :math:`10^{21}`
``exa`` :math:`10^{18}`
``peta`` :math:`10^{15}`
``tera`` :math:`10^{12}`
``giga`` :math:`10^{9}`
``mega`` :math:`10^{6}`
``kilo`` :math:`10^{3}`
``hecto`` :math:`10^{2}`
``deka`` :math:`10^{1}`
``deci`` :math:`10^{-1}`
``centi`` :math:`10^{-2}`
``milli`` :math:`10^{-3}`
``micro`` :math:`10^{-6}`
``nano`` :math:`10^{-9}`
``pico`` :math:`10^{-12}`
``femto`` :math:`10^{-15}`
``atto`` :math:`10^{-18}`
``zepto`` :math:`10^{-21}`
============ =================================================================
Binary prefixes
---------------
============ =================================================================
``kibi`` :math:`2^{10}`
``mebi`` :math:`2^{20}`
``gibi`` :math:`2^{30}`
``tebi`` :math:`2^{40}`
``pebi`` :math:`2^{50}`
``exbi`` :math:`2^{60}`
``zebi`` :math:`2^{70}`
``yobi`` :math:`2^{80}`
============ =================================================================
Weight
------
================= ============================================================
``gram`` :math:`10^{-3}` kg
``metric_ton`` :math:`10^{3}` kg
``grain`` one grain in kg
``lb`` one pound (avoirdupous) in kg
``oz`` one ounce in kg
``stone`` one stone in kg
``grain`` one grain in kg
``long_ton`` one long ton in kg
``short_ton`` one short ton in kg
``troy_ounce`` one Troy ounce in kg
``troy_pound`` one Troy pound in kg
``carat`` one carat in kg
``m_u`` atomic mass constant (in kg)
================= ============================================================
Angle
-----
================= ============================================================
``degree`` degree in radians
``arcmin`` arc minute in radians
``arcsec`` arc second in radians
================= ============================================================
Time
----
================= ============================================================
``minute`` one minute in seconds
``hour`` one hour in seconds
``day`` one day in seconds
``week`` one week in seconds
``year`` one year (365 days) in seconds
``Julian_year`` one Julian year (365.25 days) in seconds
================= ============================================================
Length
------
================= ============================================================
``inch`` one inch in meters
``foot`` one foot in meters
``yard`` one yard in meters
``mile`` one mile in meters
``mil`` one mil in meters
``pt`` one point in meters
``survey_foot`` one survey foot in meters
``survey_mile`` one survey mile in meters
``nautical_mile`` one nautical mile in meters
``fermi`` one Fermi in meters
``angstrom`` one Angstrom in meters
``micron`` one micron in meters
``au`` one astronomical unit in meters
``light_year`` one light year in meters
``parsec`` one parsec in meters
================= ============================================================
Pressure
--------
================= ============================================================
``atm`` standard atmosphere in pascals
``bar`` one bar in pascals
``torr`` one torr (mmHg) in pascals
``psi`` one psi in pascals
================= ============================================================
Area
----
================= ============================================================
``hectare`` one hectare in square meters
``acre`` one acre in square meters
================= ============================================================
Volume
------
=================== ========================================================
``liter`` one liter in cubic meters
``gallon`` one gallon (US) in cubic meters
``gallon_imp`` one gallon (UK) in cubic meters
``fluid_ounce`` one fluid ounce (US) in cubic meters
``fluid_ounce_imp`` one fluid ounce (UK) in cubic meters
``bbl`` one barrel in cubic meters
=================== ========================================================
Speed
-----
================= ==========================================================
``kmh`` kilometers per hour in meters per second
``mph`` miles per hour in meters per second
``mach`` one Mach (approx., at 15 C, 1 atm) in meters per second
``knot`` one knot in meters per second
================= ==========================================================
Temperature
-----------
===================== =======================================================
``zero_Celsius`` zero of Celsius scale in Kelvin
``degree_Fahrenheit`` one Fahrenheit (only differences) in Kelvins
===================== =======================================================
.. autosummary::
:toctree: generated/
C2K
K2C
F2C
C2F
F2K
K2F
Energy
------
==================== =======================================================
``eV`` one electron volt in Joules
``calorie`` one calorie (thermochemical) in Joules
``calorie_IT`` one calorie (International Steam Table calorie, 1956) in Joules
``erg`` one erg in Joules
``Btu`` one British thermal unit (International Steam Table) in Joules
``Btu_th`` one British thermal unit (thermochemical) in Joules
``ton_TNT`` one ton of TNT in Joules
==================== =======================================================
Power
-----
==================== =======================================================
``hp`` one horsepower in watts
==================== =======================================================
Force
-----
==================== =======================================================
``dyn`` one dyne in newtons
``lbf`` one pound force in newtons
``kgf`` one kilogram force in newtons
==================== =======================================================
Optics
------
.. autosummary::
:toctree: generated/
lambda2nu
nu2lambda
References
==========
.. [CODATA2010] CODATA Recommended Values of the Fundamental
Physical Constants 2010.
http://physics.nist.gov/cuu/Constants/index.html
"""
from __future__ import division, print_function, absolute_import
# Modules contributed by BasSw ([email protected])
from .codata import *
from .constants import *
from .codata import _obsolete_constants
_constant_names = [(_k.lower(), _k, _v)
for _k, _v in physical_constants.items()
if _k not in _obsolete_constants]
_constant_names = "\n".join(["``%s``%s %s %s" % (_x[1], " "*(66-len(_x[1])),
_x[2][0], _x[2][1])
for _x in sorted(_constant_names)])
__doc__ = __doc__ % dict(constant_names=_constant_names)
del _constant_names
__all__ = [s for s in dir() if not s.startswith('_')]
from numpy.testing import Tester
test = Tester().test
|
sargas/scipy
|
scipy/constants/__init__.py
|
Python
|
bsd-3-clause
| 9,817
|
[
"Avogadro"
] |
5f0f94df449d04ccfe9711477fa22569527c2b990213058f877e0aebf30f6518
|
#-----------------------------------------------------------------------------
# Copyright (c) 2010-2012 Brian Granger, Min Ragan-Kelley
#
# This file is part of pyzmq
#
# Distributed under the terms of the New BSD License. The full license is in
# the file COPYING.BSD, distributed as part of this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
import zmq
from zmq.tests import BaseZMQTestCase, have_gevent, GreenTest
#-----------------------------------------------------------------------------
# Tests
#-----------------------------------------------------------------------------
x = b' '
class TestPair(BaseZMQTestCase):
def test_basic(self):
s1, s2 = self.create_bound_pair(zmq.PAIR, zmq.PAIR)
msg1 = b'message1'
msg2 = self.ping_pong(s1, s2, msg1)
self.assertEqual(msg1, msg2)
def test_multiple(self):
s1, s2 = self.create_bound_pair(zmq.PAIR, zmq.PAIR)
for i in range(10):
msg = i*x
s1.send(msg)
for i in range(10):
msg = i*x
s2.send(msg)
for i in range(10):
msg = s1.recv()
self.assertEqual(msg, i*x)
for i in range(10):
msg = s2.recv()
self.assertEqual(msg, i*x)
def test_json(self):
s1, s2 = self.create_bound_pair(zmq.PAIR, zmq.PAIR)
o = dict(a=10,b=list(range(10)))
o2 = self.ping_pong_json(s1, s2, o)
def test_pyobj(self):
s1, s2 = self.create_bound_pair(zmq.PAIR, zmq.PAIR)
o = dict(a=10,b=range(10))
o2 = self.ping_pong_pyobj(s1, s2, o)
if have_gevent:
class TestReqRepGreen(GreenTest, TestPair):
pass
|
IsCoolEntertainment/debpkg_python-pyzmq
|
zmq/tests/test_pair.py
|
Python
|
lgpl-3.0
| 1,895
|
[
"Brian"
] |
b53078ad1568e668b6c909d51d2bba901b9a2d35ec01a07652f28ecbf7d5bc84
|
#!/usr/bin/env python
import sys
sys.path.append("../")
import numpy as np
import pandas as pd
import scipy.io
from functions import *
from sklearn.decomposition import PCA
import _pickle as cPickle
import neuroseries as nts
import sys
import scipy.ndimage.filters as filters
from sklearn.mixture import GaussianMixture
from sklearn.cluster import KMeans
from functools import reduce
from multiprocessing import Pool
import h5py as hd
from scipy.stats import zscore
from sklearn.manifold import TSNE, SpectralEmbedding
from skimage import filters
import os
from scipy.misc import imread
space = pd.read_hdf("../../figures/figures_articles_v2/figure1/space.hdf5")
burst = pd.HDFStore("/mnt/DataGuillaume/MergedData/BURSTINESS.h5")['w']
burst = burst.loc[space.index]
# autocorr = pd.read_hdf("../../figures/figures_articles_v2/figure1/autocorr.hdf5")
store_autocorr = pd.HDFStore("/mnt/DataGuillaume/MergedData/AUTOCORR_ALL.h5")
# carte38_mouse17 = imread('../../figures/mapping_to_align/paxino/paxino_38_mouse17.png')
# carte38_mouse17_2 = imread('../../figures/mapping_to_align/paxino/paxino_38_mouse17_2.png')
# bound_map_38 = (-2336/1044, 2480/1044, 0, 2663/1044)
# cut_bound_map = (-86/1044, 2480/1044, 0, 2663/1044)
carte_adrien = imread('/home/guillaume/Dropbox (Peyrache Lab)/Peyrache Lab Team Folder/Projects/HPC-Thal/Figures/ATAnatomy_ALL-01.png')
carte_adrien2 = imread('/home/guillaume/Dropbox (Peyrache Lab)/Peyrache Lab Team Folder/Projects/HPC-Thal/Figures/ATAnatomy_Contour-01.png')
bound_adrien = (-398/1254, 3319/1254, -(239/1254 - 20/1044), 3278/1254)
# carte_adrien2[:,:,-1][carte_adrien2[:,:,-1]<150] = 0.0
tmp = cPickle.load(open("../../figures/figures_articles_v2/figure1/shifts.pickle", 'rb'))
angles = tmp['angles']
shifts = tmp['shifts']
hd_index = space.index.values[space['hd'] == 1]
neurontoplot = [np.intersect1d(hd_index, space.index.values[space['cluster'] == 1])[0],
burst.loc[space.index.values[space['cluster'] == 0]].sort_values('sws').index[3],
burst.sort_values('sws').index.values[-20]]
# specific to mouse 17
subspace = pd.read_hdf("../../figures/figures_articles_v2/figure1/subspace_Mouse17.hdf5")
data = cPickle.load(open("../../figures/figures_articles_v2/figure1/rotated_images_Mouse17.pickle", 'rb'))
rotated_images = data['rotated_images']
new_xy_shank = data['new_xy_shank']
bound = data['bound']
data = cPickle.load(open("../../data/maps/Mouse17.pickle", 'rb'))
x = data['x']
y = data['y']*-1.0+np.max(data['y'])
headdir = data['headdir']
xy_pos = new_xy_shank.reshape(len(y), len(x), 2)
# XGB score
mean_score = pd.read_hdf('/mnt/DataGuillaume/MergedData/'+'SCORE_XGB.h5')
###############################################################################################################
###############################################################################################################
# PLOT
###############################################################################################################
def figsize(scale):
fig_width_pt = 483.69687 # Get this from LaTeX using \the\textwidth
inches_per_pt = 1.0/72.27 # Convert pt to inch
golden_mean = (np.sqrt(5.0)-1.0)/2.0 # Aesthetic ratio (you could change this)
fig_width = fig_width_pt*inches_per_pt*scale # width in inches
fig_height = fig_width*golden_mean # height in inches
fig_size = [fig_width,fig_height]
return fig_size
def simpleaxis(ax):
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.get_xaxis().tick_bottom()
ax.get_yaxis().tick_left()
# ax.xaxis.set_tick_params(size=6)
# ax.yaxis.set_tick_params(size=6)
def noaxis(ax):
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.spines['left'].set_visible(False)
ax.spines['bottom'].set_visible(False)
ax.get_xaxis().tick_bottom()
ax.get_yaxis().tick_left()
ax.set_xticks([])
ax.set_yticks([])
# ax.xaxis.set_tick_params(size=6)
# ax.yaxis.set_tick_params(size=6)
import matplotlib as mpl
from mpl_toolkits.axes_grid1 import make_axes_locatable
# mpl.use("pdf")
pdf_with_latex = { # setup matplotlib to use latex for output
"pgf.texsystem": "pdflatex", # change this if using xetex or lautex
# "text.usetex": True, # use LaTeX to write all text
# "font.family": "serif",
"font.serif": [], # blank entries should cause plots to inherit fonts from the document
"font.sans-serif": [],
"font.monospace": [],
"axes.labelsize": 10, # LaTeX default is 10pt font.
"font.size": 10,
"legend.fontsize": 10, # Make the legend/label fonts a little smaller
"xtick.labelsize": 10,
"ytick.labelsize": 10,
"pgf.preamble": [
r"\usepackage[utf8x]{inputenc}", # use utf8 fonts becasue your computer can handle it :)
r"\usepackage[T1]{fontenc}", # plots will be generated using this preamble
],
"lines.markeredgewidth" : 1,
"axes.linewidth" : 2,
"ytick.major.size" : 1.5,
"xtick.major.size" : 1.5
}
mpl.rcParams.update(pdf_with_latex)
import matplotlib.gridspec as gridspec
from matplotlib.pyplot import *
from mpl_toolkits.axes_grid1.inset_locator import inset_axes
colors = ['red', 'green', 'blue', 'purple', 'orange']
cmaps = ['Reds', 'Greens', 'Blues', 'Purples', 'Oranges']
markers = ['o', '^', '*', 's']
fig = figure(figsize = figsize(1.6))#, tight_layout=True)
outergs = gridspec.GridSpec(2,1, figure = fig, height_ratios = [1, 0.6])
#############################################
# D. TSNE
#############################################
gs = gridspec.GridSpecFromSubplotSpec(1,1,subplot_spec = outergs[0,:])
# axD = fig.add_subplot(outergs[2,1])
axD = Subplot(fig, gs[:,0])
fig.add_subplot(axD)
noaxis(axD)
sc = scatter(space[space['cluster'] == 0][1]*-1.0, space[space['cluster'] == 0][0]*-1, s = 110, c = burst['sws'][space['cluster'] == 0].values, edgecolor = 'none', alpha = 0.8, label = '_nolegend_')
# hd
scatter(space[space['cluster'] == 1][1]*-1.0, space[space['cluster'] == 1][0]*-1, s = 110, facecolor = 'red', edgecolor = 'none', alpha = 0.8, label = 'Cluster 1')
scatter(space[space['hd'] == 1][1]*-1.0, space[space['hd'] == 1][0]*-1, s = 30, marker = 'o', facecolor = 'white', edgecolor = 'black', linewidth = 0.5, label = 'HD neuron', alpha = 0.8)
# xlim(-90, 100)
# ylim(-300,80)
# legend
handles, labels = axD.get_legend_handles_labels()
axD.legend(handles[::-1], labels[::-1],
fancybox=False,
framealpha =0,
fontsize = 16,
loc = 'lower left',
bbox_to_anchor=(-0.14, 0.5),
handletextpad=0.05)
#axD.text(1, 0.54, "Cluster 2", transform = axD.transAxes, fontsize = 16)
# surrounding examples
scatter(space.loc[neurontoplot,1]*-1.0, space.loc[neurontoplot,0]*-1.0, s = 25, facecolor = 'none', edgecolor = 'grey', linewidths = 2)
txts = ['a', 'b', 'c']
xyoffset = [[-10, 12], [14, 12], [8, 20]]
for i, t in zip(range(3), txts):
x, y = (space.loc[neurontoplot[i],1]*-1, space.loc[neurontoplot[i],0]*-1)
annotate(t, xy=(x, y),
xytext=(x+np.sign(x)*xyoffset[i][0], y+np.sign(y)*xyoffset[i][1]),
fontsize = 16,
arrowprops=dict(facecolor='black',
arrowstyle="->",
connectionstyle="arc3")
)
#colorbar
cax = inset_axes(axD, "17%", "5%",
bbox_to_anchor=(0.3, 0.1, 1, 1),
bbox_transform=axD.transAxes,
loc = 'lower left')
cb = colorbar(sc, cax = cax, orientation = 'horizontal', ticks = [1, int(np.floor(burst['sws'].max()))])
cb.set_label('Burst index', labelpad = -40, fontsize = 16)
cb.ax.xaxis.set_tick_params(pad = 1)
cax.set_title("Cluster 2", fontsize = 16, pad = -35.0)
#############################################
# C. Exemples of autocorrelogram
#############################################
from matplotlib.patches import FancyArrowPatch, ArrowStyle, ConnectionPatch, Patch
from matplotlib.lines import Line2D
import matplotlib.colors as colors
import matplotlib.cm as cmx
# suC = fig.add_subplot(3,2,3)
gsC = gridspec.GridSpecFromSubplotSpec(3,3,subplot_spec=outergs[1,:], hspace = 0.4, wspace = 0.3)
axC = {}
labels = ['a HD', 'b Non-bursty', 'c Bursty']
titles = ['Wake', 'REM', 'NREM']
viridis = get_cmap('viridis')
cNorm = colors.Normalize(vmin=burst['sws'].min(), vmax = burst['sws'].max())
scalarMap = cmx.ScalarMappable(norm=cNorm, cmap = viridis)
colors = ['red', scalarMap.to_rgba(burst.loc[neurontoplot[1], 'sws']), scalarMap.to_rgba(burst.loc[neurontoplot[2], 'sws'])]
for i in range(3):
for l,j in enumerate(['wake', 'rem', 'sws']):
tmp = store_autocorr[j][neurontoplot[i]]
tmp[0] = 0.0
tmp = tmp.rolling(window = 20, win_type = 'gaussian', center = True, min_periods = 1).mean(std = 3.0)
tmp[0] = 0.0
tmp = tmp[-80:80]
axC[(i,l)] = subplot(gsC[i,l])
simpleaxis(axC[(i,l)])
plot(tmp, color = colors[i], label = labels[i], linewidth = 2.5)
if i in [0,1]:
axC[(i,l)].set_xticks([])
axC[(i,l)].set_xticklabels([])
if i == 0:
axC[(i,l)].set_title(titles[l], fontsize = 16)
if l == 0:
# leg = legend(fancybox = False, framealpha = 0, loc='lower left', bbox_to_anchor=(-1, 0))
# axB[(i,l)].set_ylabel(labels[i], labelpad = 2.)
axC[(i,l)].text(-0.6, 0.5, labels[i], transform = axC[(i,l)].transAxes, ha = 'center', va = 'center', fontsize = 16)
if i == 2:
axC[(i,l)].set_xlabel("Time (ms)", labelpad = 0.1)
# if i == 0 and l == 0:
# axC[(i,l)].text(-0.8, 1.12, "c", transform = axC[(i,l)].transAxes, fontsize = 16, fontweight='bold')
subplots_adjust(bottom = 0.05, top = 0.95, right = 0.98, left = 0.2, hspace = 0.4)
#savefig("../../figures/figures_articles_v3/figart_1.pdf", dpi = 900, facecolor = 'white')
savefig(r"../../../Dropbox (Peyrache Lab)/Talks/fig_talk_8.png", dpi = 300, facecolor = 'white')
#os.system("evince ../../figures/figures_articles_v3/figart_1.pdf &")
|
gviejo/ThalamusPhysio
|
python/figure_talk/main_talk_3_tsne3.py
|
Python
|
gpl-3.0
| 9,905
|
[
"Gaussian",
"NEURON"
] |
dfbc2e68f7b57bba3295b385928901644c628e14e614e5e9ddc1eda1592d3198
|
# Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
from frappe import session, _
from frappe.utils import today
from erpnext.utilities.transaction_base import TransactionBase
class CustomerIssue(TransactionBase):
def validate(self):
if session['user'] != 'Guest' and not self.customer:
frappe.throw(_("Customer is required"))
if self.status=="Closed" and \
frappe.db.get_value("Customer Issue", self.name, "status")!="Closed":
self.resolution_date = today()
self.resolved_by = frappe.session.user
def on_cancel(self):
lst = frappe.db.sql("""select t1.name
from `tabMaintenance Visit` t1, `tabMaintenance Visit Purpose` t2
where t2.parent = t1.name and t2.prevdoc_docname = %s and t1.docstatus!=2""",
(self.name))
if lst:
lst1 = ','.join([x[0] for x in lst])
frappe.throw(_("Cancel Material Visit {0} before cancelling this Customer Issue").format(lst1))
else:
frappe.db.set(self, 'status', 'Cancelled')
def on_update(self):
pass
@frappe.whitelist()
def make_maintenance_visit(source_name, target_doc=None):
from frappe.model.mapper import get_mapped_doc, map_child_doc
def _update_links(source_doc, target_doc, source_parent):
target_doc.prevdoc_doctype = source_parent.doctype
target_doc.prevdoc_docname = source_parent.name
visit = frappe.db.sql("""select t1.name
from `tabMaintenance Visit` t1, `tabMaintenance Visit Purpose` t2
where t2.parent=t1.name and t2.prevdoc_docname=%s
and t1.docstatus=1 and t1.completion_status='Fully Completed'""", source_name)
if not visit:
target_doc = get_mapped_doc("Customer Issue", source_name, {
"Customer Issue": {
"doctype": "Maintenance Visit",
"field_map": {
"complaint": "description",
"doctype": "prevdoc_doctype",
"name": "prevdoc_docname"
}
}
}, target_doc)
source_doc = frappe.get_doc("Customer Issue", source_name)
if source_doc.get("item_code"):
table_map = {
"doctype": "Maintenance Visit Purpose",
"postprocess": _update_links
}
map_child_doc(source_doc, target_doc, table_map, source_doc)
return target_doc
|
gangadharkadam/office_erp
|
erpnext/support/doctype/customer_issue/customer_issue.py
|
Python
|
agpl-3.0
| 2,229
|
[
"VisIt"
] |
739fc599e00e1c9c0f5baf13eff92f0dcc31bdd39f52f7a346154a0bd531d0c5
|
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
import random
import unittest
from pymatgen.core.composition import Composition
from pymatgen.core.ion import Ion
from pymatgen.core.periodic_table import Element
class IonTest(unittest.TestCase):
def setUp(self):
self.comp = []
self.comp.append(Ion.from_formula("Li+"))
self.comp.append(Ion.from_formula("MnO4-"))
self.comp.append(Ion.from_formula("Mn++"))
self.comp.append(Ion.from_formula("PO3-2"))
self.comp.append(Ion.from_formula("Fe(CN)6-3"))
self.comp.append(Ion.from_formula("Fe(CN)6----"))
self.comp.append(Ion.from_formula("Fe2((PO4)3(CO3)5)2-3"))
self.comp.append(Ion.from_formula("Ca[2+]"))
self.comp.append(Ion.from_formula("NaOH(aq)"))
def test_init_(self):
c = Composition({"Fe": 4, "O": 16, "P": 4})
charge = 4
self.assertEqual("Fe4 P4 O16 +4", Ion(c, charge).formula)
f = {1: 1, 8: 1}
charge = -1
self.assertEqual("H1 O1 -1", Ion(Composition(f), charge).formula)
self.assertEqual("S2 O3 -2", Ion(Composition(S=2, O=3), -2).formula)
def test_charge_from_formula(self):
self.assertEqual(Ion.from_formula("Li+").charge, 1)
self.assertEqual(Ion.from_formula("Li[+]").charge, 1)
self.assertEqual(Ion.from_formula("Ca[2+]").charge, 2)
self.assertEqual(Ion.from_formula("Ca[+2]").charge, 2)
self.assertEqual(Ion.from_formula("Ca++").charge, 2)
self.assertEqual(Ion.from_formula("Ca[++]").charge, 2)
self.assertEqual(Ion.from_formula("Ca2+").charge, 1)
self.assertEqual(Ion.from_formula("Cl-").charge, -1)
self.assertEqual(Ion.from_formula("Cl[-]").charge, -1)
self.assertEqual(Ion.from_formula("SO4[-2]").charge, -2)
self.assertEqual(Ion.from_formula("SO4-2").charge, -2)
self.assertEqual(Ion.from_formula("SO42-").charge, -1)
self.assertEqual(Ion.from_formula("SO4--").charge, -2)
self.assertEqual(Ion.from_formula("SO4[--]").charge, -2)
self.assertEqual(Ion.from_formula("Na[+-+]").charge, 1)
def test_special_formulas(self):
special_formulas = [
("Cl-", "Cl[-1]"),
("H+", "H[+1]"),
("F-", "F[-1]"),
("H4O4", "H2O2(aq)"),
("OH-", "OH[-1]"),
("CH3COO-", "CH3COO[-1]"),
("CH3COOH", "CH3COOH(aq)"),
("CH3OH", "CH3OH(aq)"),
("H4CO", "CH3OH(aq)"),
("C2H6O", "C2H5OH(aq)"),
("C3H8O", "C3H7OH(aq)"),
("C4H10O", "C4H9OH(aq)"),
("Fe(OH)4+", "FeO2.2H2O[+1]"),
("Zr(OH)4", "ZrO2.2H2O(aq)"),
]
for tup in special_formulas:
self.assertEqual(Ion.from_formula(tup[0]).reduced_formula, tup[1])
self.assertEqual(Ion.from_formula("Fe(OH)4+").get_reduced_formula_and_factor(hydrates=False), ("Fe(OH)4", 1))
self.assertEqual(Ion.from_formula("Zr(OH)4").get_reduced_formula_and_factor(hydrates=False), ("Zr(OH)4", 1))
def test_formula(self):
correct_formulas = [
"Li1 +1",
"Mn1 O4 -1",
"Mn1 +2",
"P1 O3 -2",
"Fe1 C6 N6 -3",
"Fe1 C6 N6 -4",
"Fe2 P6 C10 O54 -3",
"Ca1 +2",
"Na1 H1 O1 (aq)",
]
all_formulas = [c.formula for c in self.comp]
self.assertEqual(all_formulas, correct_formulas)
self.assertRaises(ValueError, Ion.from_formula, "(co2)(po4)2")
def test_mixed_valence(self):
comp = Ion(Composition({"Fe2+": 2, "Fe3+": 4, "Li+": 8}))
self.assertEqual(comp.reduced_formula, "Li4Fe3(aq)")
self.assertEqual(comp.alphabetical_formula, "Fe6 Li8 (aq)")
self.assertEqual(comp.formula, "Li8 Fe6 (aq)")
def test_alphabetical_formula(self):
correct_formulas = [
"Li1 +1",
"Mn1 O4 -1",
"Mn1 +2",
"O3 P1 -2",
"C6 Fe1 N6 -3",
"C6 Fe1 N6 -4",
"C10 Fe2 O54 P6 -3",
"Ca1 +2",
"H1 Na1 O1 (aq)",
]
all_formulas = [c.alphabetical_formula for c in self.comp]
self.assertEqual(all_formulas, correct_formulas)
def test_num_atoms(self):
correct_num_atoms = [1, 5, 1, 4, 13, 13, 72, 1, 3]
all_natoms = [c.num_atoms for c in self.comp]
self.assertEqual(all_natoms, correct_num_atoms)
def test_anonymized_formula(self):
expected_formulas = [
"A+1",
"AB4-1",
"A+2",
"AB3-2",
"AB6C6-3",
"AB6C6-4",
"AB3C5D27-3",
"A+2",
"ABC(aq)",
]
for i in range(len(self.comp)):
self.assertEqual(self.comp[i].anonymized_formula, expected_formulas[i])
def test_from_dict(self):
sym_dict = {"P": 1, "O": 4, "charge": -2}
self.assertEqual(
Ion.from_dict(sym_dict).reduced_formula,
"PO4[-2]",
"Creation form sym_amount dictionary failed!",
)
def test_as_dict(self):
c = Ion.from_dict({"Mn": 1, "O": 4, "charge": -1})
d = c.as_dict()
correct_dict = {"Mn": 1.0, "O": 4.0, "charge": -1.0}
self.assertEqual(d, correct_dict)
self.assertEqual(d["charge"], correct_dict["charge"])
correct_dict = {"Mn": 1.0, "O": 4.0, "charge": -1}
d = c.to_reduced_dict
self.assertEqual(d, correct_dict)
self.assertEqual(d["charge"], correct_dict["charge"])
def test_equals(self):
random_z = random.randint(1, 92)
fixed_el = Element.from_Z(random_z)
other_z = random.randint(1, 92)
while other_z == random_z:
other_z = random.randint(1, 92)
comp1 = Ion(Composition({fixed_el: 1, Element.from_Z(other_z): 0}), 1)
other_z = random.randint(1, 92)
while other_z == random_z:
other_z = random.randint(1, 92)
comp2 = Ion(Composition({fixed_el: 1, Element.from_Z(other_z): 0}), 1)
self.assertEqual(
comp1,
comp2,
"Composition equality test failed. " + f"{comp1.formula} should be equal to {comp2.formula}",
)
self.assertEqual(comp1.__hash__(), comp2.__hash__(), "Hashcode equality test failed!")
def test_equality(self):
self.assertTrue(self.comp[0] == (self.comp[0]))
self.assertFalse(self.comp[0] == (self.comp[1]))
self.assertFalse(self.comp[0] != (self.comp[0]))
self.assertTrue(self.comp[0] != (self.comp[1]))
def test_mul(self):
self.assertEqual(
(self.comp[1] * 4).formula,
"Mn4 O16 -4",
"Incorrect composition after addition!",
)
def test_len(self):
self.assertEqual(len(self.comp[1]), 2, "Lengths are not equal!")
def test_to_latex_string(self):
correct_latex = [
"Li$^{+1}$",
"MnO$_{4}$$^{-1}$",
"Mn$^{+2}$",
"PO$_{3}$$^{-2}$",
"Fe(CN)$_{6}$$^{-3}$",
"Fe(CN)$_{6}$$^{-4}$",
"FeP$_{3}$C$_{5}$O$_{27}$$^{-3}$",
"Ca$^{+2}$",
"NaOH",
]
all_latex = [c.to_latex_string() for c in self.comp]
self.assertEqual(all_latex, correct_latex)
if __name__ == "__main__":
unittest.main()
|
vorwerkc/pymatgen
|
pymatgen/core/tests/test_ion.py
|
Python
|
mit
| 7,462
|
[
"pymatgen"
] |
6c8d6e958256f0de90502117b0582e89ae1ee297bb6ae88d5875164867687108
|
"""Handlers for GROMACS trajectories xtc and trr formats
"""
from .base import IOHandler
import numpy as np
from ...libs.pyxdr import XTCReader
class BlockedArray(np.ndarray):
def __init__(self, *args, **kwargs):
super(BlockedArray, self).__init__(*args, **kwargs)
self.block_size = 1000 # It preallocates 1000
self._last_i = self.shape[0] - 1
def append(self, item):
assert item.shape == self.shape[1:]
self._last_i += 1
if self._last_i == self.shape[0]:
self._enlarge()
else:
nslices = len(self.shape) - 1
slices = (slice(None, None, None),)*nslices
self[(self._last_i,)+slices] = item
def _enlarge(self):
newshape = list(self.shape)
newshape[0] += self.block_size
self.resize(newshape, refcheck=False)
def trim(self):
self.resize((self._last_i,) + self.shape[1:] , refcheck=False)
import itertools
class XtcIO(IOHandler):
'''Reader for GROMACS XTC trajectories.
**Features**
.. method:: read("trajectory")
Read the frames from the file and returns the trajectory as an
array of times and an array of atomic positions::
>>> times, positions = datafile('traj.xtc').read('trajectory')
[t1, t2, t3], [pos1, pos2, ...]
positions is a *list* of ``np.ndarray(n_atoms, 3)``.
.. method:: read("boxes")
After reading the "trajectory" feature you can call
`read("boxes")` that will return a list of *box_vectors*
correspoiding to each frame.
'''
can_read = ['trajectory', 'boxes']
can_write = []
def read(self, feature, **kwargs):
import time
t0 = time.time()
if feature == 'trajectory':
skipframes = kwargs.get("skip", None)
times = []
xtcreader = XTCReader(self.fd.name)
cursize = 0
# 1 frame, 10 atoms, 3 coordinates each
frames = np.empty((0, 10, 3), dtype='float32')
self._boxes = []
for i, frame in enumerate(xtcreader):
if skipframes is None or i%skipframes == 0:
cursize += 1
# Enarge if necessary
if cursize > frames.shape[0]:
frames.resize((cursize * 2, ) + frame.coords.shape)
frames[cursize - 1, :] = frame.coords
times.append(frame.time)
self._boxes.append(frame.box)
# Shrink if necessary
if frames.shape[0] != cursize:
frames.resize((cursize, frames.shape[1], frames.shape[2]))
return np.array(times), frames
if feature == 'boxes':
return self._boxes
|
swails/chemlab
|
chemlab/io/handlers/xtctrr.py
|
Python
|
gpl-3.0
| 2,906
|
[
"Gromacs"
] |
09dc5cdfb5c50e1d3294e2223b98059d54fe68e0418cf48b06dff5458d87cc49
|
#!/usr/bin/env python
# Copyright 2015 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import base64
import re
import os
import subprocess
import hashlib
import json
import random
from pathlib import Path
from subprocess import check_output, check_call
from socket import gethostname, getfqdn
from shlex import split
from subprocess import CalledProcessError
from charmhelpers.core import hookenv, unitdata
from charmhelpers.core import host
from charms.reactive import endpoint_from_flag, is_state
from time import sleep
db = unitdata.kv()
kubeclientconfig_path = '/root/.kube/config'
gcp_creds_env_key = 'GOOGLE_APPLICATION_CREDENTIALS'
kubeproxyconfig_path = '/root/cdk/kubeproxyconfig'
certs_dir = Path('/root/cdk')
ca_crt_path = certs_dir / 'ca.crt'
server_crt_path = certs_dir / 'server.crt'
server_key_path = certs_dir / 'server.key'
client_crt_path = certs_dir / 'client.crt'
client_key_path = certs_dir / 'client.key'
def get_version(bin_name):
"""Get the version of an installed Kubernetes binary.
:param str bin_name: Name of binary
:return: 3-tuple version (maj, min, patch)
Example::
>>> `get_version('kubelet')
(1, 6, 0)
"""
cmd = '{} --version'.format(bin_name).split()
version_string = subprocess.check_output(cmd).decode('utf-8')
return tuple(int(q) for q in re.findall("[0-9]+", version_string)[:3])
def retry(times, delay_secs):
""" Decorator for retrying a method call.
Args:
times: How many times should we retry before giving up
delay_secs: Delay in secs
Returns: A callable that would return the last call outcome
"""
def retry_decorator(func):
""" Decorator to wrap the function provided.
Args:
func: Provided function should return either True od False
Returns: A callable that would return the last call outcome
"""
def _wrapped(*args, **kwargs):
res = func(*args, **kwargs)
attempt = 0
while not res and attempt < times:
sleep(delay_secs)
res = func(*args, **kwargs)
if res:
break
attempt += 1
return res
return _wrapped
return retry_decorator
def calculate_resource_checksum(resource):
''' Calculate a checksum for a resource '''
md5 = hashlib.md5()
path = hookenv.resource_get(resource)
if path:
with open(path, 'rb') as f:
data = f.read()
md5.update(data)
return md5.hexdigest()
def get_resource_checksum_db_key(checksum_prefix, resource):
''' Convert a resource name to a resource checksum database key. '''
return checksum_prefix + resource
def migrate_resource_checksums(checksum_prefix, snap_resources):
''' Migrate resource checksums from the old schema to the new one '''
for resource in snap_resources:
new_key = get_resource_checksum_db_key(checksum_prefix, resource)
if not db.get(new_key):
path = hookenv.resource_get(resource)
if path:
# old key from charms.reactive.helpers.any_file_changed
old_key = 'reactive.files_changed.' + path
old_checksum = db.get(old_key)
db.set(new_key, old_checksum)
else:
# No resource is attached. Previously, this meant no checksum
# would be calculated and stored. But now we calculate it as if
# it is a 0-byte resource, so let's go ahead and do that.
zero_checksum = hashlib.md5().hexdigest()
db.set(new_key, zero_checksum)
def check_resources_for_upgrade_needed(checksum_prefix, snap_resources):
hookenv.status_set('maintenance', 'Checking resources')
for resource in snap_resources:
key = get_resource_checksum_db_key(checksum_prefix, resource)
old_checksum = db.get(key)
new_checksum = calculate_resource_checksum(resource)
if new_checksum != old_checksum:
return True
return False
def calculate_and_store_resource_checksums(checksum_prefix, snap_resources):
for resource in snap_resources:
key = get_resource_checksum_db_key(checksum_prefix, resource)
checksum = calculate_resource_checksum(resource)
db.set(key, checksum)
def get_ingress_address(endpoint_name):
try:
network_info = hookenv.network_get(endpoint_name)
except NotImplementedError:
network_info = []
if network_info and 'ingress-addresses' in network_info:
# just grab the first one for now, maybe be more robust here?
return network_info['ingress-addresses'][0]
else:
# if they don't have ingress-addresses they are running a juju that
# doesn't support spaces, so just return the private address
return hookenv.unit_get('private-address')
def service_restart(service_name):
hookenv.status_set('maintenance', 'Restarting {0} service'.format(
service_name))
host.service_restart(service_name)
def service_start(service_name):
hookenv.log('Starting {0} service.'.format(service_name))
host.service_stop(service_name)
def service_stop(service_name):
hookenv.log('Stopping {0} service.'.format(service_name))
host.service_stop(service_name)
def arch():
'''Return the package architecture as a string. Raise an exception if the
architecture is not supported by kubernetes.'''
# Get the package architecture for this system.
architecture = check_output(['dpkg', '--print-architecture']).rstrip()
# Convert the binary result into a string.
architecture = architecture.decode('utf-8')
return architecture
def get_service_ip(service, namespace="kube-system", errors_fatal=True):
cmd = "kubectl get service --namespace {} {} --output json".format(
namespace, service)
if errors_fatal:
output = check_output(cmd, shell=True).decode()
else:
try:
output = check_output(cmd, shell=True).decode()
except CalledProcessError:
return None
svc = json.loads(output)
return svc['spec']['clusterIP']
def kubectl(*args):
''' Run a kubectl cli command with a config file. Returns stdout and throws
an error if the command fails. '''
command = ['kubectl', '--kubeconfig=' + kubeclientconfig_path] + list(args)
hookenv.log('Executing {}'.format(command))
return check_output(command)
def kubectl_success(*args):
''' Runs kubectl with the given args. Returns True if successful, False if
not. '''
try:
kubectl(*args)
return True
except CalledProcessError:
return False
def kubectl_manifest(operation, manifest):
''' Wrap the kubectl creation command when using filepath resources
:param operation - one of get, create, delete, replace
:param manifest - filepath to the manifest
'''
# Deletions are a special case
if operation == 'delete':
# Ensure we immediately remove requested resources with --now
return kubectl_success(operation, '-f', manifest, '--now')
else:
# Guard against an error re-creating the same manifest multiple times
if operation == 'create':
# If we already have the definition, its probably safe to assume
# creation was true.
if kubectl_success('get', '-f', manifest):
hookenv.log('Skipping definition for {}'.format(manifest))
return True
# Execute the requested command that did not match any of the special
# cases above
return kubectl_success(operation, '-f', manifest)
def get_node_name():
kubelet_extra_args = parse_extra_args('kubelet-extra-args')
cloud_provider = kubelet_extra_args.get('cloud-provider', '')
if is_state('endpoint.aws.ready'):
cloud_provider = 'aws'
elif is_state('endpoint.gcp.ready'):
cloud_provider = 'gce'
elif is_state('endpoint.openstack.ready'):
cloud_provider = 'openstack'
elif is_state('endpoint.vsphere.ready'):
cloud_provider = 'vsphere'
elif is_state('endpoint.azure.ready'):
cloud_provider = 'azure'
if cloud_provider == 'aws':
return getfqdn().lower()
else:
return gethostname().lower()
def create_kubeconfig(kubeconfig, server, ca, key=None, certificate=None,
user='ubuntu', context='juju-context',
cluster='juju-cluster', password=None, token=None,
keystone=False):
'''Create a configuration for Kubernetes based on path using the supplied
arguments for values of the Kubernetes server, CA, key, certificate, user
context and cluster.'''
if not key and not certificate and not password and not token:
raise ValueError('Missing authentication mechanism.')
# token and password are mutually exclusive. Error early if both are
# present. The developer has requested an impossible situation.
# see: kubectl config set-credentials --help
if token and password:
raise ValueError('Token and Password are mutually exclusive.')
# Create the config file with the address of the master server.
cmd = 'kubectl config --kubeconfig={0} set-cluster {1} ' \
'--server={2} --certificate-authority={3} --embed-certs=true'
check_call(split(cmd.format(kubeconfig, cluster, server, ca)))
# Delete old users
cmd = 'kubectl config --kubeconfig={0} unset users'
check_call(split(cmd.format(kubeconfig)))
# Create the credentials using the client flags.
cmd = 'kubectl config --kubeconfig={0} ' \
'set-credentials {1} '.format(kubeconfig, user)
if key and certificate:
cmd = '{0} --client-key={1} --client-certificate={2} '\
'--embed-certs=true'.format(cmd, key, certificate)
if password:
cmd = "{0} --username={1} --password={2}".format(cmd, user, password)
# This is mutually exclusive from password. They will not work together.
if token:
cmd = "{0} --token={1}".format(cmd, token)
check_call(split(cmd))
# Create a default context with the cluster.
cmd = 'kubectl config --kubeconfig={0} set-context {1} ' \
'--cluster={2} --user={3}'
check_call(split(cmd.format(kubeconfig, context, cluster, user)))
# Make the config use this new context.
cmd = 'kubectl config --kubeconfig={0} use-context {1}'
check_call(split(cmd.format(kubeconfig, context)))
if keystone:
# create keystone user
cmd = 'kubectl config --kubeconfig={0} ' \
'set-credentials keystone-user'.format(kubeconfig)
check_call(split(cmd))
# create keystone context
cmd = 'kubectl config --kubeconfig={0} ' \
'set-context --cluster={1} ' \
'--user=keystone-user keystone'.format(kubeconfig, cluster)
check_call(split(cmd))
# use keystone context
cmd = 'kubectl config --kubeconfig={0} ' \
'use-context keystone'.format(kubeconfig)
check_call(split(cmd))
# manually add exec command until kubectl can do it for us
with open(kubeconfig, "r") as f:
content = f.read()
content = content.replace("""- name: keystone-user
user: {}""", """- name: keystone-user
user:
exec:
command: "/snap/bin/client-keystone-auth"
apiVersion: "client.authentication.k8s.io/v1beta1"
""")
with open(kubeconfig, "w") as f:
f.write(content)
def parse_extra_args(config_key):
elements = hookenv.config().get(config_key, '').split()
args = {}
for element in elements:
if '=' in element:
key, _, value = element.partition('=')
args[key] = value
else:
args[element] = 'true'
return args
def configure_kubernetes_service(key, service, base_args, extra_args_key):
db = unitdata.kv()
prev_args_key = key + service
prev_args = db.get(prev_args_key) or {}
extra_args = parse_extra_args(extra_args_key)
args = {}
for arg in prev_args:
# remove previous args by setting to null
args[arg] = 'null'
for k, v in base_args.items():
args[k] = v
for k, v in extra_args.items():
args[k] = v
cmd = ['snap', 'set', service] + ['%s=%s' % item for item in args.items()]
check_call(cmd)
db.set(prev_args_key, args)
def _snap_common_path(component):
return Path('/var/snap/{}/common'.format(component))
def cloud_config_path(component):
return _snap_common_path(component) / 'cloud-config.conf'
def _gcp_creds_path(component):
return _snap_common_path(component) / 'gcp-creds.json'
def _daemon_env_path(component):
return _snap_common_path(component) / 'environment'
def _cloud_endpoint_ca_path(component):
return _snap_common_path(component) / 'cloud-endpoint-ca.crt'
def encryption_config_path():
apiserver_snap_common_path = _snap_common_path('kube-apiserver')
encryption_conf_dir = apiserver_snap_common_path / 'encryption'
return encryption_conf_dir / 'encryption_config.yaml'
def write_gcp_snap_config(component):
# gcp requires additional credentials setup
gcp = endpoint_from_flag('endpoint.gcp.ready')
creds_path = _gcp_creds_path(component)
with creds_path.open('w') as fp:
os.fchmod(fp.fileno(), 0o600)
fp.write(gcp.credentials)
# create a cloud-config file that sets token-url to nil to make the
# services use the creds env var instead of the metadata server, as
# well as making the cluster multizone
comp_cloud_config_path = cloud_config_path(component)
comp_cloud_config_path.write_text('[Global]\n'
'token-url = nil\n'
'multizone = true\n')
daemon_env_path = _daemon_env_path(component)
if daemon_env_path.exists():
daemon_env = daemon_env_path.read_text()
if not daemon_env.endswith('\n'):
daemon_env += '\n'
else:
daemon_env = ''
if gcp_creds_env_key not in daemon_env:
daemon_env += '{}={}\n'.format(gcp_creds_env_key, creds_path)
daemon_env_path.parent.mkdir(parents=True, exist_ok=True)
daemon_env_path.write_text(daemon_env)
def write_openstack_snap_config(component):
# openstack requires additional credentials setup
openstack = endpoint_from_flag('endpoint.openstack.ready')
lines = [
'[Global]',
'auth-url = {}'.format(openstack.auth_url),
'region = {}'.format(openstack.region),
'username = {}'.format(openstack.username),
'password = {}'.format(openstack.password),
'tenant-name = {}'.format(openstack.project_name),
'domain-name = {}'.format(openstack.user_domain_name),
]
if openstack.endpoint_tls_ca:
cloud_endpoint_ca_path = _cloud_endpoint_ca_path(component)
cloud_endpoint_ca_path.write_text(base64.b64decode(
openstack.endpoint_tls_ca
).decode('utf-8'))
lines.append('ca-file = {}'.format(str(cloud_endpoint_ca_path)))
if any([openstack.subnet_id,
openstack.floating_network_id,
openstack.lb_method,
openstack.manage_security_groups]):
lines.append('')
lines.append('[LoadBalancer]')
if openstack.subnet_id:
lines.append('subnet-id = {}'.format(openstack.subnet_id))
if openstack.floating_network_id:
lines.append('floating-network-id = {}'.format(
openstack.floating_network_id))
if openstack.lb_method:
lines.append('lb-method = {}'.format(
openstack.lb_method))
if openstack.manage_security_groups:
lines.append('manage-security-groups = {}'.format(
openstack.manage_security_groups))
if any([openstack.bs_version,
openstack.trust_device_path,
openstack.ignore_volume_az]):
lines.append('')
lines.append('[BlockStorage]')
if openstack.bs_version is not None:
lines.append('bs-version = {}'.format(openstack.bs_version))
if openstack.trust_device_path is not None:
lines.append('trust-device-path = {}'.format(
openstack.trust_device_path))
if openstack.ignore_volume_az is not None:
lines.append('ignore-volume-az = {}'.format(
openstack.ignore_volume_az))
comp_cloud_config_path = cloud_config_path(component)
comp_cloud_config_path.write_text(''.join('{}\n'.format(l) for l in lines))
def write_azure_snap_config(component):
azure = endpoint_from_flag('endpoint.azure.ready')
comp_cloud_config_path = cloud_config_path(component)
comp_cloud_config_path.write_text(json.dumps({
'useInstanceMetadata': True,
'useManagedIdentityExtension': True,
'subscriptionId': azure.subscription_id,
'resourceGroup': azure.resource_group,
'location': azure.resource_group_location,
'vnetName': azure.vnet_name,
'vnetResourceGroup': azure.vnet_resource_group,
'subnetName': azure.subnet_name,
'securityGroupName': azure.security_group_name,
}))
def configure_kube_proxy(configure_prefix, api_servers, cluster_cidr):
kube_proxy_opts = {}
kube_proxy_opts['cluster-cidr'] = cluster_cidr
kube_proxy_opts['kubeconfig'] = kubeproxyconfig_path
kube_proxy_opts['logtostderr'] = 'true'
kube_proxy_opts['v'] = '0'
num_apis = len(api_servers)
kube_proxy_opts['master'] = api_servers[get_unit_number() % num_apis]
kube_proxy_opts['hostname-override'] = get_node_name()
if host.is_container():
kube_proxy_opts['conntrack-max-per-core'] = '0'
configure_kubernetes_service(configure_prefix, 'kube-proxy',
kube_proxy_opts, 'proxy-extra-args')
def get_unit_number():
return int(hookenv.local_unit().split('/')[1])
|
juju-solutions/kubernetes
|
cluster/juju/layers/kubernetes-common/lib/charms/layer/kubernetes_common.py
|
Python
|
apache-2.0
| 18,539
|
[
"CDK"
] |
48ed84ffae6a3c9be5882d7c63811f1494eef98ab839084ad2ab284f3638a20f
|
'''@package docstring
@author: Jyh-Miin Lin (Jimmy), Cambridge University
@address: [email protected]
Created on 2013/1/21
================================================================================
This file is part of pynufft.
pynufft is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
pynufft is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with pynufft. If not, see <http://www.gnu.org/licenses/>.
================================================================================
Remark
pynufft is the fast program aims to do constraint-inversion
of irregularly sampled data.
Among them, nufft.py was translated from NUFFT in MATLAB of
Jeffrey A Fessler et al, University of Michigan
which was a BSD-licensed work.
However, there are several important modifications. In
particular, the scaling factor adjoint NUFFT,
as only the Kaiser-Bessel window is realized.
Please cite J A Fessler, Bradley P Sutton.
Nonuniform fast Fourier transforms using min-max interpolation.
IEEE Trans. Sig. Proc., 51(2):560-74, Feb. 2003.
and
"CS-PROPELLER MRI with Parallel Coils Using NUFFT and Split-Bregman Method"(in progress 2013)
Jyh-Miin Lin, Andrew Patterson, Hing-Chiu Chang, Tzu-Chao Chuang, Martin J. Graves,
which is planned to be published soon.
2. Note the "better" results by min-max interpolator of J.A. Fessler et al
3. Other relevant works:
*c-version: http://www-user.tu-chemnitz.de/~potts/nfft/
is a c-library with gaussian interpolator
*fortran version: http://www.cims.nyu.edu/cmcl/nufft/nufft.html
alpha/beta stage
* MEX-version http://www.mathworks.com/matlabcentral/fileexchange/25135-nufft-nufft-usffft
'''
import numpy
import scipy.sparse
from scipy.sparse.csgraph import _validation # for cx_freeze debug
# import sys
import scipy.fftpack
try:
import pyfftw
except:
pass
try:
from numba import jit
except:
pass
# def mydot(A,B):
# return numpy.dot(A,B)
# def mysinc(A):
# return numpy.sinc(A)
# print('no pyfftw, use slow fft')
dtype = numpy.complex64
# try:
# from numba import autojit
# except:
# pass
# print('numba not supported')
# @jit
def pipe_density(V,W):
V1=V.conj().T
# E = V.dot( V1.dot( W ) )
# W = W*(E+1.0e-17)/(E*E+1.0e-17)
for pppj in xrange(0,10):
# W[W>1.0]=1.0
# print(pppj)
E = V.dot( V1.dot( W ) )
W = W*(E+1.0e-17)/(E**2+1.0e-17)
return W
def checker(input_var,desire_size):
if input_var is None:
print('input_variable does not exist!')
if desire_size is None:
print('desire_size does not exist!')
dd=numpy.size(desire_size)
dims = numpy.shape(input_var)
# print('dd=',dd,'dims=',dims)
if numpy.isnan(numpy.sum(input_var[:])):
print('input has NaN')
if numpy.ndim(input_var) < dd:
print('input signal has too few dimensions')
if dd > 1:
if dims[0:dd] != desire_size[0:dd]:
print(dims[0:dd])
print(desire_size)
print('input signal has wrong size1')
elif dd == 1:
if dims[0] != desire_size:
print(dims[0])
print(desire_size)
print('input signal has wrong size2')
if numpy.mod(numpy.prod(dims),numpy.prod(desire_size)) != 0:
print('input signal shape is not multiples of desired size!')
def outer_sum(xx,yy):
nx=numpy.size(xx)
ny=numpy.size(yy)
arg1 = numpy.tile(xx,(ny,1)).T
arg2 = numpy.tile(yy,(nx,1))
#cc = arg1 + arg2
return arg1 + arg2
def nufft_offset(om, J, K):
'''
For every om points(outside regular grids), find the nearest
central grid (from Kd dimension)
'''
gam = 2.0*numpy.pi/(K*1.0);
k0 = numpy.floor(1.0*om / gam - 1.0*J/2.0) # new way
return k0
def nufft_alpha_kb_fit(N, J, K):
'''
find out parameters alpha and beta
of scaling factor st['sn']
Note, when J = 1 , alpha is hardwired as [1,0,0...]
(uniform scaling factor)
'''
beta=1
#chat=0
Nmid=(N-1.0)/2.0
if N > 40:
#empirical L
L=13
else:
L=numpy.ceil(N/3)
nlist = numpy.arange(0,N)*1.0-Nmid
# print(nlist)
(kb_a,kb_m)=kaiser_bessel('string', J, 'best', 0, K/N)
# print(kb_a,kb_m)
if J > 1:
sn_kaiser = 1 / kaiser_bessel_ft(nlist/K, J, kb_a, kb_m, 1.0)
elif J ==1: # cases on grids
sn_kaiser = numpy.ones((1,N),dtype=dtype)
# print(sn_kaiser)
gam = 2*numpy.pi/K;
X_ant =beta*gam*nlist.reshape((N,1),order='F')
X_post= numpy.arange(0,L+1)
X_post=X_post.reshape((1,L+1),order='F')
X=numpy.dot(X_ant, X_post) # [N,L]
X=numpy.cos(X)
sn_kaiser=sn_kaiser.reshape((N,1),order='F').conj()
# print(numpy.shape(X),numpy.shape(sn_kaiser))
# print(X)
#sn_kaiser=sn_kaiser.reshape(N,1)
X=numpy.array(X,dtype=dtype)
sn_kaiser=numpy.array(sn_kaiser,dtype=dtype)
coef = numpy.linalg.lstsq(X,sn_kaiser)[0] #(X \ sn_kaiser.H);
# print('coef',coef)
#alphas=[]
alphas=coef
if J > 1:
alphas[0]=alphas[0]
alphas[1:]=alphas[1:]/2.0
elif J ==1: # cases on grids
alphas[0]=1.0
alphas[1:]=0.0
alphas=numpy.real(alphas)
return (alphas, beta)
def kaiser_bessel(x, J, alpha, kb_m, K_N):
if K_N != 2 :
kb_m = 0
alpha = 2.34 * J
else:
kb_m = 0 # hardwritten in Fessler's code, because it was claimed as the best!
jlist_bestzn={2: 2.5,
3: 2.27,
4: 2.31,
5: 2.34,
6: 2.32,
7: 2.32,
8: 2.35,
9: 2.34,
10: 2.34,
11: 2.35,
12: 2.34,
13: 2.35,
14: 2.35,
15: 2.35,
16: 2.33 }
if J in jlist_bestzn:
# print('demo key',jlist_bestzn[J])
alpha = J*jlist_bestzn[J]
#for jj in tmp_key:
#tmp_key=abs(tmp_key-J*numpy.ones(len(tmp_key)))
# print('alpha',alpha)
else:
#sml_idx=numpy.argmin(J-numpy.arange(2,17))
tmp_key=(jlist_bestzn.keys())
min_ind=numpy.argmin(abs(tmp_key-J*numpy.ones(len(tmp_key))))
p_J=tmp_key[min_ind]
alpha = J * jlist_bestzn[p_J]
print('well, this is not the best though',alpha)
kb_a=alpha
return (kb_a, kb_m)
def kaiser_bessel_ft(u, J, alpha, kb_m, d):
'''
interpolation weight for given J/alpha/kb-m
'''
# import types
# scipy.special.jv (besselj in matlab) only accept complex
# if u is not types.ComplexType:
# u=numpy.array(u,dtype=numpy.complex64)
u = u*(1.0+0.0j)
import scipy.special
z = numpy.sqrt( (2*numpy.pi*(J/2)*u)**2 - alpha**2 );
nu = d/2 + kb_m;
y = ((2*numpy.pi)**(d/2))* ((J/2)**d) * (alpha**kb_m) / scipy.special.iv(kb_m, alpha) * scipy.special.jv(nu, z) / (z**nu)
y = numpy.real(y);
return y
def nufft_scale1(N, K, alpha, beta, Nmid):
'''
calculate image space scaling factor
'''
# import types
# if alpha is types.ComplexType:
alpha=numpy.real(alpha)
# print('complex alpha may not work, but I just let it as')
L = len(alpha) - 1
if L > 0:
sn = numpy.zeros((N,1))
n = numpy.arange(0,N).reshape((N,1),order='F')
i_gam_n_n0 = 1j * (2*numpy.pi/K)*( n- Nmid)* beta
for l1 in xrange(-L,L+1):
alf = alpha[abs(l1)];
if l1 < 0:
alf = numpy.conj(alf)
sn = sn + alf*numpy.exp(i_gam_n_n0 * l1)
else:
sn = numpy.dot(alpha , numpy.ones((N,1),dtype=numpy.float32))
return sn
def nufft_scale(Nd, Kd, alpha, beta):
dd=numpy.size(Nd)
Nmid = (Nd-1)/2.0
if dd == 1:
sn = nufft_scale1(Nd, Kd, alpha, beta, Nmid);
# else:
# sn = 1
# for dimid in numpy.arange(0,dd):
# tmp = nufft_scale1(Nd[dimid], Kd[dimid], alpha[dimid], beta[dimid], Nmid[dimid])
# sn = numpy.dot(list(sn), tmp.H)
return sn
def nufft_T(N, J, K, tol, alpha, beta):
'''
equation (29) and (26)Fessler's paper
the pseudo-inverse of T
'''
import scipy.linalg
L = numpy.size(alpha) - 1
cssc = numpy.zeros((J,J));
[j1, j2] = numpy.mgrid[1:J+1, 1:J+1]
for l1 in xrange(-L,L+1):
for l2 in xrange(-L,L+1):
alf1 = alpha[abs(l1)]
if l1 < 0: alf1 = numpy.conj(alf1)
alf2 = alpha[abs(l2)]
if l2 < 0: alf2 = numpy.conj(alf2)
tmp = j2 - j1 + beta * (l1 - l2)
tmp = numpy.sinc(1.0*tmp/(1.0*K/N)) # the interpolator
cssc = cssc + alf1 * numpy.conj(alf2) * tmp;
#print([l1, l2, tmp ])
u_svd, s_svd, v_svd= scipy.linalg.svd(cssc)
smin=numpy.min(s_svd)
if smin < tol:
tol=tol
print('Poor conditioning %g => pinverse', smin)
else:
tol= 0.0
for jj in xrange(0,J):
if s_svd[jj] < tol/10:
s_svd[jj]=0
else:
s_svd[jj]=1/s_svd[jj]
s_svd= scipy.linalg.diagsvd(s_svd,len(u_svd),len(v_svd))
cssc = numpy.dot( numpy.dot(v_svd.conj().T,s_svd), u_svd.conj().T)
return cssc
def nufft_r(om, N, J, K, alpha, beta):
'''
equation (30) of Fessler's paper
'''
M = numpy.size(om) # 1D size
gam = 2.0*numpy.pi / (K*1.0)
nufft_offset0 = nufft_offset(om, J, K) # om/gam - nufft_offset , [M,1]
dk = 1.0*om/gam - nufft_offset0 # om/gam - nufft_offset , [M,1]
arg = outer_sum( -numpy.arange(1,J+1)*1.0, dk)
L = numpy.size(alpha) - 1
if L > 0:
rr = numpy.zeros((J,M))
# if J > 1:
for l1 in xrange(-L,L+1):
alf = alpha[abs(l1)]*1.0
if l1 < 0: alf = numpy.conj(alf)
r1 = numpy.sinc(1.0*(arg+1.0*l1*beta)/(1.0*K/N))
rr = 1.0*rr + alf * r1; # [J,M]
# elif J ==1:
# rr=rr+1.0
else: #L==0
rr = numpy.sinc(1.0*(arg+1.0*l1*beta)/(1.0*K/N))
return (rr,arg)
def block_outer_prod(x1, x2):
'''
multiply interpolators of different dimensions
'''
(J1,M)=x1.shape
(J2,M)=x2.shape
# print(J1,J2,M)
xx1 = x1.reshape((J1,1,M),order='F') #[J1 1 M] from [J1 M]
xx1 = numpy.tile(xx1,(1,J2,1)) #[J1 J2 M], emulating ndgrid
xx2 = x2.reshape((1,J2,M),order='F') # [1 J2 M] from [J2 M]
xx2 = numpy.tile(xx2,(J1,1,1)) # [J1 J2 M], emulating ndgrid
# ang_xx1=xx1/numpy.abs(xx1)
# ang_xx2=xx2/numpy.abs(xx2)
y= xx1* xx2
# y= ang_xx1*ang_xx2*numpy.sqrt(xx1*xx1.conj() + xx2*xx2.conj())
# RMS
return y # [J1 J2 M]
def block_outer_sum(x1, x2):
(J1,M)=x1.shape
(J2,M)=x2.shape
# print(J1,J2,M)
xx1 = x1.reshape((J1,1,M),order='F') #[J1 1 M] from [J1 M]
xx1 = numpy.tile(xx1,(1,J2,1)) #[J1 J2 M], emulating ndgrid
xx2 = x2.reshape((1,J2,M),order='F') # [1 J2 M] from [J2 M]
xx2 = numpy.tile(xx2,(J1,1,1)) # [J1 J2 M], emulating ndgrid
y= xx1+ xx2
return y # [J1 J2 M]
def crop_slice_ind(Nd):
return [slice(0, Nd[_ss]) for _ss in xrange(0,len(Nd))]
class nufft:
'''
pyNuff is ported to Python and Refined
by Jyh-Miin Lin at Cambridge University
DETAILS:
__init__(self,om, Nd, Kd,Jd): Create the class with om/Nd/Kd/Jd
om: the k-space points either on grids or outside
grids
Nd: dimension of images, e.g. (256,256) for 2D
Kd: Normally you should use Kd=2*Nd,e.g. (512,512)
of above example. However, used Kd=Nd when Jd=1
Jd: number of adjacents grids on kspace to do
interpolation
self.st: the structure storing information
self.st['Nd']: image dimension
self.st['Kd']: Kspace dimension
self.st['M']: number of data points(on k-space)
self.st['p']: interpolation kernel in
self.st['sn']: scaling in image space
self.st['w']: precomputed Cartesian Density
(the weighting in k-space)
X=self.forward(x): transforming the image x to X(points not on kspace
grids)
pseudo-code: X= st['p']FFT{x*st['sn'],Kd}/sqrt(prod(KD))
x2=self.backward(self,X):adjoint (conjugated operator) of forward
also known as (regridding)
pseudo-code: x = st['sn']*IFFT{X*st['p'].conj() ,Kd}*sqrt(prod(KD))
Note: distinguishable major modification:
1. modified of coefficient:
The coefficient of J. Fessler's version may be problematic.
While his forward projection is not scaled, and backward
projection is scaled up by (prod(Kd))-- as is wrong for
iterative reconstruction, because the result will be
scaled up by (prod(Kd))
The above coefficient is right in the sense of "adjoint"
operator, but it is wrong for iterative reconstruction!!
2. Efficient backwardforward():
see pyNufft_fast
3. Slice over higher dimension
The extraordinary property of pyNufft is the following:
x = x[[slice(0, Nd[_ss]) for _ss in range(0,numpy.size(Nd))]]
This sentence is exclusive for python, and it can scope
high dimensional array.
4.Support points on grids with Jd == 1:
when Jd = (1,1), the scaling factor st['sn'] = 1
REFERENCES
I didn't reinvented the program: it was translated from
the NUFFT in MATLAB of Jeffrey A Fessler, University of Michigan.
However, several important modifications are listed above. In
particular, the scaling factor st['scale']
Yet only the Kaiser-Bessel window was implemented.
Please refer to
"Nonuniform fast Fourier transforms using min-max interpolation."
IEEE Trans. Sig. Proc., 51(2):560-74, Feb. 2003.
'''
def __init__(self,om, Nd, Kd,Jd,n_shift):
'''
constructor of pyNufft
'''
'''
Constructor: Start from here
'''
self.debug = 0 # debug
Nd=tuple(Nd) # convert Nd to tuple for consistent structure
Jd=tuple(Jd) # convert Jd to tuple for consistent structure
Kd=tuple(Kd) # convert Kd to tuple for consistent structure
# n_shift: the fftshift position, it must be at center
# n_shift=tuple(numpy.array(Nd)/2)
# dimensionality of input space (usually 2 or 3)
dd=numpy.size(Nd)
#=====================================================================
# check input errors
#=====================================================================
st={}
ud={}
kd={}
st['sense']=0 # default sense control flag
st['sensemap']=[] # default sensemap is null
st['n_shift']=n_shift
#=======================================================================
# First, get alpha and beta: the weighting and freq
# of formula (28) of Fessler's paper
# in order to create slow-varying image space scaling
#=======================================================================
for dimid in xrange(0,dd):
(tmp_alpha,tmp_beta)=nufft_alpha_kb_fit(Nd[dimid], Jd[dimid], Kd[dimid])
st.setdefault('alpha', []).append(tmp_alpha)
st.setdefault('beta', []).append(tmp_beta)
st['tol'] = 0
st['Jd'] = Jd
st['Nd'] = Nd
st['Kd'] = Kd
M = om.shape[0]
st['M'] = M
st['om'] = om
st['sn'] = numpy.array(1.0+0.0j)
dimid_cnt=1
#=======================================================================
# create scaling factors st['sn'] given alpha/beta
# higher dimension implementation
#=======================================================================
for dimid in xrange(0,dd):
tmp = nufft_scale(Nd[dimid], Kd[dimid], st['alpha'][dimid], st['beta'][dimid])
dimid_cnt=Nd[dimid]*dimid_cnt
#=======================================================================
# higher dimension implementation: multiply over all dimension
#=======================================================================
# rms1= numpy.dot(st['sn'], tmp.T**0)
# rms2 = numpy.dot(st['sn']**0, tmp.T)
# ang_rms1 = rms1/numpy.abs(rms1)
# ang_rms2 = rms2/numpy.abs(rms2)
# st['sn'] = ang_rms1*ang_rms2* numpy.sqrt( rms1*rms1.conj() +rms2*rms2.conj() )
# # RMS
# st['sn'] = numpy.dot(st['sn'] , tmp.T )
# st['sn'] = numpy.reshape(st['sn'],(dimid_cnt,1),order='F')
# if True: # do not apply scaling
# st['sn']= numpy.ones((dimid_cnt,1),dtype=numpy.complex64)
# else:
st['sn'] = numpy.dot(st['sn'] , tmp.T )
st['sn'] = numpy.reshape(st['sn'],(dimid_cnt,1),order='F')**0.0 # JML do not apply scaling
#=======================================================================
# if numpy.size(Nd) > 1:
#=======================================================================
# high dimension, reshape for consistent out put
# order = 'F' is for fortran order otherwise it is C-type array
st['sn'] = st['sn'].reshape(Nd,order='F') # [(Nd)]
#=======================================================================
# else:
# st['sn'] = numpy.array(st['sn'],order='F')
# #=======================================================================
st['sn']=numpy.real(st['sn']) # only real scaling is relevant
# [J? M] interpolation coefficient vectors. will need kron of these later
for dimid in xrange(0,dd): # loop over dimensions
N = Nd[dimid]
J = Jd[dimid]
K = Kd[dimid]
alpha = st['alpha'][dimid]
beta = st['beta'][dimid]
#===================================================================
# formula 29 , 26 of Fessler's paper
#===================================================================
T = nufft_T(N, J, K, st['tol'], alpha, beta) # [J? J?]
#==================================================================
# formula 30 of Fessler's paper
#==================================================================
if self.debug==0:
pass
else:
print('dd',dd)
print('dimid',dimid)
(r,arg)= nufft_r(om[:,dimid], N, J, K, alpha, beta) # [J? M]
#==================================================================
# formula 25 of Fessler's paper
#==================================================================
c=numpy.dot(T,r)
#
# print('size of c, r',numpy.shape(c), numpy.shape(T),numpy.shape(r))
# import matplotlib.pyplot
# matplotlib.pyplot.plot(r[:,0:])
# matplotlib.pyplot.show()
#===================================================================
# grid intervals in radius
#===================================================================
gam = 2.0*numpy.pi/(K*1.0);
phase_scale = 1.0j * gam * (N-1.0)/2.0
phase = numpy.exp(phase_scale * arg) # [J? M] linear phase
ud[dimid] = phase * c
# indices into oversampled FFT components
# FORMULA 7
koff=nufft_offset(om[:,dimid], J, K)
# FORMULA 9
kd[dimid]= numpy.mod(outer_sum( numpy.arange(1,J+1)*1.0, koff),K)
if dimid > 0: # trick: pre-convert these indices into offsets!
# ('trick: pre-convert these indices into offsets!')
kd[dimid] = kd[dimid]*numpy.prod(Kd[0:dimid])-1
kk = kd[0] # [J1 M]
uu = ud[0] # [J1 M]
for dimid in xrange(1,dd):
Jprod = numpy.prod(Jd[:dimid+1])
kk = block_outer_sum(kk, kd[dimid])+1 # outer sum of indices
kk = kk.reshape((Jprod, M),order='F')
uu = block_outer_prod(uu, ud[dimid]) # outer product of coefficients
uu = uu.reshape((Jprod, M),order='F')
#now kk and uu are [*Jd M]
# % apply phase shift
# % pre-do Hermitian transpose of interpolation coefficients
phase = numpy.exp( 1.0j* numpy.dot(om, 1.0*numpy.array(n_shift,order='F'))).T # [1 M]
uu = uu.conj()*numpy.tile(phase,[numpy.prod(Jd),1]) #[*Jd M]
mm = numpy.arange(0,M)
mm = numpy.tile(mm,[numpy.prod(Jd),1]) # [Jd, M]
# print('shpae uu',uu[:])
# print('shpae kk',kk[:])
# print('shpae mm',mm[:])
# sn_mask=numpy.ones(st['Nd'],dtype=numpy.float16)
#########################################now remove the corners of sn############
# n_dims= numpy.size(st['Nd'])
#
# sp_rat =0.0
# for di in xrange(0,n_dims):
# sp_rat = sp_rat + (st['Nd'][di]/2)**2
#
# sp_rat = sp_rat**0.5
# x = numpy.ogrid[[slice(0, st['Nd'][_ss]) for _ss in xrange(0,n_dims)]]
#
# tmp = 0
# for di in xrange(0,n_dims):
# tmp = tmp + ( (x[di] - st['Nd'][di]/2.0)/(st['Nd'][di]/2.0) )**2
#
# tmp = (1.0*tmp)**0.5
#
# indx = tmp >=1.0
#
#
# st['sn'][indx] = numpy.mean(st['sn'][...])
#########################################now remove the corners of sn############
# st['sn']=st['sn']*sn_mask
st['p'] = scipy.sparse.csc_matrix(
(numpy.reshape(uu,(numpy.size(uu),)),
(numpy.reshape(mm,(numpy.size(mm),)), numpy.reshape(kk,(numpy.size(kk),)))),
(M,numpy.prod(Kd))
)
## Now doing the density compensation of Jackson ##
W=numpy.ones((st['M'],1))
# w=numpy.ones((numpy.prod(st['Kd']),1))
# for pppj in xrange(0,100):
# # W[W>1.0]=1.0
# # print(pppj)
# E = st['p'].dot( st['p'].conj().T.dot( W ) )
#
# W = W*(E+1.0e-17)/(E**2+1.0e-17)
W = pipe_density(st['p'],W)
# import matplotlib.pyplot
# matplotlib.pyplot.subplot(2,1,1)
# matplotlib.pyplot.plot(numpy.abs(E))
# matplotlib.pyplot.subplot(2,1,2)
# matplotlib.pyplot.plot(numpy.abs(W))
# # matplotlib.pyplot.show()
# matplotlib.pyplot.plot(numpy.abs(W))
# matplotlib.pyplot.show()
st['W'] = W
# st['w'] =
## Finish the density compensation of Jackson ##
# st['q'] = st['p']
# st['T'] = st['p'].conj().T.dot(st['p']) # huge memory leak>5G
# p_herm = st['p'].conj().T.dot(st['W'])
# print('W',numpy.shape(W))
# print('p',numpy.shape(st['p']))
# temp_w = numpy.tile(W,[1,numpy.prod(st['Kd'])])
# print('temp_w',numpy.shape(temp_w))
# st['q'] = st['p'].conj().multiply(st['p'])
# st['q'] = st['p'].conj().T.dot(p_herm.conj().T).diagonal()
# st['q'] = scipy.sparse.diags(W[:,0],offsets=0).dot(st['q'])
# st['q'] = st['q'].sum(0)
#
# st['q'] = numpy.array(st['q'] )
# # for pp in range(0,M):
# # st['q'][pp,:]=st['q'][pp,:]*W[pp,0]
#
# st['q']=numpy.reshape(st['q'],(numpy.prod(st['Kd']),1),order='F').real
st['w'] = numpy.abs(( st['p'].conj().T.dot(numpy.ones(st['W'].shape,dtype = numpy.float32))))#**2) ))
# st['q']=numpy.max(st['w'])*st['q']/numpy.max(st['q'])
import matplotlib.pyplot
# matplotlib.pyplot.imshow(numpy.reshape(st['w'],st['Kd']),
# cmap=matplotlib.cm.gray,
# norm=matplotlib.colors.Normalize(vmin=0.0, vmax=3.0))
# matplotlib.pyplot.plot(numpy.reshape(st['q'],st['Kd'])[:, 0])#,
# # cmap=matplotlib.cm.gray,
# # norm=matplotlib.colors.Normalize(vmin=0.0, vmax=3.0))
# # matplotlib.pyplot.imshow(st['sn'],
# # cmap=matplotlib.cm.gray,
# # norm=matplotlib.colors.Normalize(vmin=0.0, vmax=1.0))
# matplotlib.pyplot.show()
# matplotlib.pyplot.plot(numpy.reshape(st['w'],st['Kd'])[:, 0])#,
# # cmap=matplotlib.cm.gray,
# # norm=matplotlib.colors.Normalize(vmin=0.0, vmax=3.0))
# # matplotlib.pyplot.imshow(st['sn'],
# # cmap=matplotlib.cm.gray,
# # norm=matplotlib.colors.Normalize(vmin=0.0, vmax=1.0))
# matplotlib.pyplot.show()
self.st=st
if self.debug==0:
pass
else:
print('st sn shape',st['sn'].shape)
self.gpu_flag=0
self.__initialize_gpu()
# self.__initialize_gpu2()
self.pyfftw_flag =self.__initialize_pyfftw()
# import multiprocessing
self.threads=1#multiprocessing.cpu_count()
self.st=st
# print( 'optimize sn and p' )
# temp_c = self.Nd2Kd(st['sn'],0)
# self.st['w'] = w
def __initialize_pyfftw(self):
pyfftw_flag = 0
try:
import pyfftw
pyfftw.interfaces.cache.enable()
pyfftw.interfaces.cache.set_keepalive_time(60) # keep live 60 seconds
pyfftw_flag = 1
print('use pyfftw')
except:
print('no pyfftw, use slow fft')
pyfftw_flag = 0
return pyfftw_flag
def __initialize_gpu(self):
try:
import reikna.cluda as cluda
from reikna.fft import FFT
# dtype = dtype#numpy.complex64
data = numpy.zeros( self.st['Kd'],dtype=numpy.complex64)
# data2 = numpy.empty_like(data)
api = cluda.ocl_api()
self.thr = api.Thread.create(async=True)
self.data_dev = self.thr.to_device(data)
# self.data_rec = self.thr.to_device(data2)
axes=range(0,numpy.size(self.st['Kd']))
myfft= FFT( data, axes=axes)
self.myfft = myfft.compile(self.thr,fast_math=True)
self.gpu_flag=1
print('create gpu fft?',self.gpu_flag)
print('line 642')
W= self.st['w'][...,0]
print('line 645')
self.W = numpy.reshape(W, self.st['Kd'],order='C')
print('line 647')
# self.thr2 = api.Thread.create()
print('line 649')
self.W_dev = self.thr.to_device(self.W.astype(dtype))
self.gpu_flag=1
print('line 652')
except:
self.gpu_flag=0
print('get error, using cpu')
# def __initialize_gpu2(self):
# try:
# # import reikna.cluda as cluda
# # from reikna.fft import FFT
# from pycuda.sparse.packeted import PacketedSpMV
# spmv = PacketedSpMV(self.st['p'], options.is_symmetric, numpy.complex64)
# # dtype = dtype#numpy.complex64
# data = numpy.zeros( self.st['Kd'],dtype=numpy.complex64)
# # data2 = numpy.empty_like(data)
# api = cluda.ocl_api()
# self.thr = api.Thread.create(async=True)
# self.data_dev = self.thr.to_device(data)
# # self.data_rec = self.thr.to_device(data2)
# axes=range(0,numpy.size(self.st['Kd']))
# myfft= FFT( data, axes=axes)
# self.myfft = myfft.compile(self.thr,fast_math=True)
#
# self.gpu_flag=1
# print('create gpu fft?',self.gpu_flag)
# print('line 642')
# W= self.st['w'][...,0]
# print('line 645')
# self.W = numpy.reshape(W, self.st['Kd'],order='C')
# print('line 647')
# # self.thr2 = api.Thread.create()
# print('line 649')
# self.W_dev = self.thr.to_device(self.W.astype(dtype))
# self.gpu_flag=1
# print('line 652')
# except:
# self.gpu_flag=0
# print('get error, using cpu')
def forward(self,x):
'''
foward(x): method of class pyNufft
Compute dd-dimensional Non-uniform transform of signal/image x
where d is the dimension of the data x.
INPUT:
case 1: x: ndarray, [Nd[0], Nd[1], ... , Kd[dd-1] ]
case 2: x: ndarray, [Nd[0], Nd[1], ... , Kd[dd-1], Lprod]
OUTPUT:
X: ndarray, [M, Lprod] (Lprod=1 in case 1)
where M =st['M']
'''
st=self.st
Nd = st['Nd']
Kd = st['Kd']
dims = numpy.shape(x)
dd = numpy.size(Nd)
# print('in nufft, dims:dd',dims,dd)
# print('ndim(x)',numpy.ndim(x[:,1]))
# exceptions
if self.debug==0:
pass
else:
checker(x,Nd)
if numpy.ndim(x) == dd:
Lprod = 1
elif numpy.ndim(x) > dd: # multi-channel data
Lprod = numpy.size(x)/numpy.prod(Nd)
'''
Now transform Nd grids to Kd grids(not be reshaped)
'''
Xk=self.Nd2Kd(x, 1)
# interpolate using precomputed sparse matrix
if Lprod > 1:
X = numpy.reshape(st['p'].dot(Xk),(st['M'],)+( Lprod,),order='F')
else:
X = numpy.reshape(st['p'].dot(Xk),(st['M'],1),order='F')
if self.debug==0:
pass
else:
checker(X,st['M']) # check output
return X
def backward(self,X):
'''
backward(x): method of class pyNufft
from [M x Lprod] shaped input, compute its adjoint(conjugate) of
Non-uniform Fourier transform
INPUT:
X: ndarray, [M, Lprod] (Lprod=1 in case 1)
where M =st['M']
OUTPUT:
x: ndarray, [Nd[0], Nd[1], ... , Kd[dd-1], Lprod]
'''
# extract attributes from structure
st=self.st
Nd = st['Nd']
Kd = st['Kd']
if self.debug==0:
pass
else:
checker(X,st['M']) # check X of correct shape
dims = numpy.shape(X)
Lprod= numpy.prod(dims[1:])
# how many channel * slices
if numpy.size(dims) == 1:
Lprod = 1
else:
Lprod = dims[1]
# print('Xshape',X.shape)
# print('stp.shape',st['p'].shape)
Xk_all = st['p'].getH().dot(X)
# Multiply X with interpolator st['p'] [prod(Kd) Lprod]
'''
Now transform Kd grids to Nd grids(not be reshaped)
'''
x = self.Kd2Nd(Xk_all, 1)
if self.debug==0:
pass
else:
checker(x,Nd) # check output
return x
def Nd2Kd(self,x, weight_flag):
'''
Now transform Nd grids to Kd grids(not be reshaped)
'''
#print('661 x.shape',x.shape)
st=self.st
Nd = st['Nd']
Kd = st['Kd']
dims = numpy.shape(x)
dd = numpy.size(Nd)
# print('in nufft, dims:dd',dims,dd)
# print('ndim(x)',numpy.ndim(x[:,1]))
# checker
if self.debug==0:
pass
else:
checker(x,Nd)
# if numpy.ndim(x) == dd:
# Lprod = 1
# elif numpy.ndim(x) > dd: # multi-channel data
# Lprod = numpy.size(x)/numpy.prod(Nd)
if numpy.ndim(x) == dd:
if weight_flag == 1:
x = x * st['sn']
else:
pass
Xk = self.emb_fftn(x, Kd,range(0,dd))
Xk = numpy.reshape(Xk, (numpy.prod(Kd),),order='F')
else:# otherwise, collapse all excess dimensions into just one
xx = numpy.reshape(x, [numpy.prod(Nd), numpy.prod(dims[(dd):])],order='F') # [*Nd *L]
L = numpy.shape(xx)[1]
# print('L=',L)
# print('Lprod',Lprod)
Xk = numpy.zeros( (numpy.prod(Kd), L),dtype=dtype) # [*Kd *L]
for ll in xrange(0,L):
xl = numpy.reshape(xx[:,ll], Nd,order='F') # l'th signal
if weight_flag == 1:
xl = xl * st['sn'] # scaling factors
else:
pass
Xk[:,ll] = numpy.reshape(self.emb_fftn(xl, Kd,range(0,dd)),
(numpy.prod(Kd),),order='F')
if self.debug==0:
pass
else:
checker(Xk,numpy.prod(Kd))
return Xk
def Kd2Nd(self,Xk_all,weight_flag):
st=self.st
Nd = st['Nd']
Kd = st['Kd']
dd = len(Nd)
if self.debug==0:
pass
else:
checker(Xk_all,numpy.prod(Kd)) # check X of correct shape
dims = numpy.shape(Xk_all)
Lprod= numpy.prod(dims[1:]) # how many channel * slices
if numpy.size(dims) == 1:
Lprod = 1
else:
Lprod = dims[1]
x=numpy.zeros(Kd+(Lprod,),dtype=dtype) # [*Kd *L]
# if Lprod > 1:
Xk = numpy.reshape(Xk_all, Kd+(Lprod,) , order='F')
for ll in xrange(0,Lprod): # ll = 0, 1,... Lprod-1
x[...,ll] = self.emb_ifftn(Xk[...,ll],Kd,range(0,dd))#.flatten(order='F'))
x = x[crop_slice_ind(Nd)]
if weight_flag == 0:
pass
else: #weight_flag =1 scaling factors
snc = st['sn'].conj()
for ll in xrange(0,Lprod): # ll = 0, 1,... Lprod-1
x[...,ll] = x[...,ll]*snc #% scaling factors
if self.debug==0:
pass # turn off checker
else:
checker(x,Nd) # checking size of x divisible by Nd
return x
def gpufftn(self, data_dev):
'''
gpufftn: an interface to external gpu fftn:
not working to date: awaiting more reliable gpu codes
'''
# self.data_dev = self.thr.to_device(output_x.astype(dtype))
self.myfft( data_dev, data_dev)
return data_dev#.get()
def gpuifftn(self, data_dev):
'''
gpufftn: an interface to external gpu fftn:
not working to date: awaiting more reliable gpu codes
'''
# self.data_dev = self.thr.to_device(output_x.astype(dtype))
self.myfft( data_dev, data_dev, inverse=1)
return data_dev#.get()
def emb_fftn(self, input_x, output_dim, act_axes):
'''
embedded fftn: abstraction of fft for future gpu computing
'''
output_x=numpy.zeros(output_dim, dtype=dtype)
#print('output_dim',input_dim,output_dim,range(0,numpy.size(input_dim)))
# output_x[[slice(0, input_x.shape[_ss]) for _ss in range(0,len(input_x.shape))]] = input_x
output_x[crop_slice_ind(input_x.shape)] = input_x
# print('GPU flag',self.gpu_flag)
# print('pyfftw flag',self.pyfftw_flag)
# if self.gpu_flag == 1:
try:
# print('using GPU')
# print('using GPU interface')
# self.data_dev = self.ctx.to_device(output_x.astype(dtype))
# self.myfft(self.res_dev, self.data_dev, -1)
# output_x=self.res_dev.get()
# self.data_dev =
self.thr.to_device(output_x.astype(dtype), dest=self.data_dev)
output_x=self.gpufftn(self.data_dev).get()
except:
# elif self.gpu_flag ==0:
# elif self.pyfftw_flag == 1:
try:
# print('using pyfftw interface')
# print('threads=',self.threads)
output_x=pyfftw.interfaces.scipy_fftpack.fftn(output_x, output_dim, act_axes,
threads=self.threads,overwrite_x=True)
except:
# else:
# print('using OLD interface')
output_x=scipy.fftpack.fftn(output_x, output_dim, act_axes)
return output_x
# def emb_ifftn(self, input_x, output_dim, act_axes):
# '''
# embedded ifftn: abstraction of ifft for future gpu computing
# '''
#
# # output_x=input_x
# output_x=self.emb_fftn(input_x.conj(), output_dim, act_axes).conj()/numpy.prod(output_dim)
#
# return output_x
def emb_ifftn(self, input_x, output_dim, act_axes):
'''
embedded fftn: abstraction of fft for future gpu computing
'''
output_x=numpy.zeros(output_dim, dtype=dtype)
#print('output_dim',input_dim,output_dim,range(0,numpy.size(input_dim)))
# output_x[[slice(0, input_x.shape[_ss]) for _ss in range(0,len(input_x.shape))]] = input_x
output_x[crop_slice_ind(input_x.shape)] = input_x
# print('GPU flag',self.gpu_flag)
# print('pyfftw flag',self.pyfftw_flag)
# if self.gpu_flag == 1:
try:
# print('using GPU')
# print('using GPU interface')
# self.data_dev = self.ctx.to_device(output_x.astype(dtype))
# self.myfft(self.res_dev, self.data_dev, -1)
# output_x=self.res_dev.get()
# self.data_dev =
self.thr.to_device(output_x.astype(dtype), dest=self.data_dev)
output_x=self.gpuifftn(self.data_dev).get()
except:
# elif self.pyfftw_flag == 1:
try:
# print('using pyfftw interface')
# print('threads=',self.threads)
output_x=pyfftw.interfaces.scipy_fftpack.ifftn(output_x, output_dim, act_axes,
threads=self.threads,overwrite_x=True)
except:
# else:
# print('using OLD interface')
output_x=scipy.fftpack.ifftn(output_x, output_dim, act_axes)
return output_x
|
jyhmiinlin/cineFSE
|
CsTransform/fessler_nufft.py
|
Python
|
gpl-3.0
| 39,917
|
[
"Gaussian"
] |
fb24bd66be55f1cf2ae1a50bb3219dfc9c4b5ff5540b1ac46ab2d3fa01eca0e4
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# --- BEGIN_HEADER ---
#
# arcresources - list arc resources and queues
# Copyright (C) 2003-2014 The MiG Project lead by Brian Vinter
#
# This file is part of MiG.
#
# MiG is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# MiG is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# -- END_HEADER ---
#
"""Display the ARC queues accessible for submission by this server"""
import os
import time
import shared.returnvalues as returnvalues
from shared.base import client_id_dir
from shared.functional import validate_input_and_cert
from shared.init import initialize_main_variables, find_entry
try:
import shared.arcwrapper as arc
except Exception, exc:
# Ignore errors and let it crash if ARC is enabled without the lib
pass
def signature():
"""Signature of the main function"""
defaults = {'benchmark': 'false'}
return ['html_form', defaults]
# shared functions to name things:
def q_anchor(q):
return ('__'.join([q.name] + q.cluster.hostname.split(".")))
def q_displayname(q):
return ('%s on %s' % (q.name, q.cluster.alias))
# HARDCODED STRING to name the zero-install ARC runtime environment
# We already use a hardcoded string in jobscriptgenerator. Merge/configure?
zero_install_arc = 'ENV/ZERO-INSTALL'
def display_arc_queue(queue):
"""Format and print detailed information about an ARC queue.
"""
html = '<div id=%(n)s class="queue"><a name="%(n)s"></a>\n' % \
{'n':q_anchor(queue)}
html += \
'<table class=resources><tr class=title><td colspan=2>' + \
'<h3>%s</h3></td>' % q_displayname(queue)
html += '<td>Status: %s</td></tr>\n' % queue.status
# figure out the real CPU characteristics...
# The information "cores per node" is provided per-cluster in ARC.
# through the field cpu_distribution (which is a mapping of
# number_of_cores -> number_of_nodes. There we only use the first
# of possibly several values
d = dict(queue.cluster.cpu_distribution)
if d.keys():
cores = d.keys()[0]
else:
cores = 1
def row(col1, col2=None, col3=None):
if col2 and col3:
return ('<tr><td>%s<td>%s<td>%s</tr>\n' % (col1, col2, col3))
elif col2:
return ('<tr><td>%s<td colspan=2>%s</tr>\n' % (col1, col2))
else:
return ('<tr><td colspan=3>%s</tr>\n' % (col1))
html += \
row('Architecture: %s' % queue.cluster.architecture,
'Max. runnable jobs: %s' % queue.max_running,
'Running Jobs: %s' % queue.grid_running)
if (queue.total_cpus == -1):
cpus = queue.cluster.total_cpus
else:
cpus = queue.total_cpus
html += \
row('Total Cores: %s (%s cores/node)' % (cpus, cores),
'Max. time per job: %s sec.' % queue.max_wall_time,
'Queued Jobs: %s' % queue.grid_queued)
html += \
row('%s' % queue.node_cpu,
' ', '(%s)' % queue.mds_validfrom)
if zero_install_arc in map(str, queue.cluster.runtime_environments):
html += row('Node Memory: %s' % queue.node_memory,
'Provides Zero-Install runtime environment')
else:
html += row('Node Memory: %s' % queue.node_memory)
html += '</table></div>'
return html
def queue_resource(queue):
"""Return a 'resource' dictionary for an ARC queue.
Information mapping is straightforward, and most of it is
independent of other parts. Exception: the name has to follow the
format <queue.name>:<queue.cluster.hostname> to match submit page
and mrsltoxrsl translation"""
resource = {'object_type' :'resource',
'name' : queue.name + ':' + queue.cluster.hostname,
'PUBLICNAME' : 'ARC: ' + \
queue.name + ' on ' + queue.cluster.alias,
'MEMORY' : queue.node_memory,
# information not available for queues, and
# queue.cluster.session_dir_total is astronomic!
# '%.3f' % (float(queue.cluster.session_dir_total)/2**30),
'DISK' : '',
# this would actually need a precise mapping between
# ARC and MiG, as done for the translation
'ARCHITECTURE': queue.cluster.architecture,
# indicating whether the queue active/inactive
'STATUS' : queue.status
}
# instead of a view link, we indicate "ARC"
resource['viewreslink'] = {'object_type': 'link',
'destination': '#%s' % q_anchor(queue),
'class': 'infolink arclink '
+ queue.cluster.alias, # HACK for sorting
'title': 'Show queue details',
'text': '(details)'}
# 'NODECOUNT' : queue.total_cpus is sometimes -1.
# ... we use another provided value, queue.cluster.total_cpus,
# even though this one is not always correct either (not all CPUs
# might be assigned to the queue)
if (queue.total_cpus == -1):
resource['NODECOUNT'] = queue.cluster.total_cpus
else:
resource['NODECOUNT'] = queue.total_cpus
# ARC does not provide this readily, only through cpu_distribution
# (which is a mapping of number_of_cores -> number_of_nodes. There
# is no way to reserve cores on the same node, we set it to 1
resource['CPUCOUNT'] = 1
resource['RUNTIMEENVIRONMENT'] = []
z_i = 'ENV/ZERO-INSTALL' # hard-wired name, same as in jobscriptgenerator
if z_i in map(str, queue.cluster.runtime_environments):
resource['RUNTIMEENVIRONMENT'] = ['ZERO-INSTALL (ARC)']
return resource
def main(client_id, user_arguments_dict):
"""Main function used by front end"""
(configuration, logger, output_objects, op_name) = \
initialize_main_variables(client_id, op_header=False)
defaults = signature()[1]
(validate_status, accepted) = validate_input_and_cert(
user_arguments_dict,
defaults,
output_objects,
client_id,
configuration,
allow_rejects=False,
)
if not validate_status:
return (accepted, returnvalues.CLIENT_ERROR)
user_dir = os.path.join(configuration.user_home,
client_id_dir(client_id))
title_entry = find_entry(output_objects, 'title')
title_entry['text'] = 'ARC Queues'
output_objects.append({'object_type': 'header', 'text'
: 'Available ARC queues'})
if not configuration.site_enable_griddk:
output_objects.append({'object_type': 'text', 'text':
'''Grid.dk features are disabled on this site.
Please contact the Grid admins %s if you think they should be enabled.
''' % configuration.admin_email})
return (output_objects, returnvalues.OK)
# could factor out from here, to be usable from outside
if not configuration.arc_clusters:
output_objects.append({'object_type': 'error_text', 'text':
'No ARC support!'})
return (output_objects, returnvalues.ERROR)
try:
session = arc.Ui(user_dir)
queues = session.getQueues()
except arc.NoProxyError, err:
output_objects.append({'object_type': 'error_text', 'text'
: 'Error while retrieving: %s' % err.what()
})
output_objects += arc.askProxy()
return (output_objects, returnvalues.ERROR)
except Exception, err:
logger.error('Exception while retrieving ARC resources\n%s' % err)
output_objects.append({'object_type':'warning', 'text'
:'Could not retrieve information: %s' % err})
return(output_objects, returnvalues.ERROR)
res_list = {'object_type': 'resource_list', 'resources':[]}
for q in queues:
res_list['resources'].append(queue_resource(q))
output_objects.append(res_list)
output_objects.append({'object_type': 'sectionheader', 'text'
: 'Queue details'})
# queue details (current usage and some machine information)
for q in queues:
output_objects.append({'object_type': 'html_form', 'text'
: display_arc_queue(q) })
# end of "factoring out"
return (output_objects, returnvalues.OK)
|
heromod/migrid
|
mig/shared/functionality/arcresources.py
|
Python
|
gpl-2.0
| 9,096
|
[
"Brian"
] |
f278ecfade3c89d0b4f0f7c97f1123adedf7b8c976029676b2defdc721d14360
|
import sys
sys.path.insert(1, "../../")
import h2o, tests
def metric_accessors():
cars = h2o.import_file(path=tests.locate("smalldata/junit/cars_20mpg.csv"))
r = cars[0].runif()
train = cars[r > .2]
valid = cars[r <= .2]
# regression
response_col = "economy"
distribution = "gaussian"
predictors = ["displacement","power","weight","acceleration","year"]
gbm = h2o.gbm(y=train[response_col],
x=train[predictors],
validation_y=valid[response_col],
validation_x=valid[predictors],
nfolds=3,
distribution=distribution,
fold_assignment="Random")
# mse
mse1 = gbm.mse(train=True, valid=False, xval=False)
assert isinstance(mse1, float)
mse2 = gbm.mse(train=False, valid=True, xval=False)
assert isinstance(mse2, float)
mse3 = gbm.mse(train=False, valid=False, xval=True)
assert isinstance(mse3, float)
mse = gbm.mse(train=True, valid=True, xval=False)
assert "train" in mse.keys() and "valid" in mse.keys(), "expected training and validation metrics to be returned, but got {0}".format(mse.keys())
assert len(mse) == 2, "expected only training and validation metrics to be returned, but got {0}".format(mse.keys())
assert isinstance(mse["train"], float) and isinstance(mse["valid"], float), "expected training and validation metrics to be floats, but got {0} and {1}".format(type(mse["train"]), type(mse["valid"]))
assert mse["valid"] == mse2
mse = gbm.mse(train=True, valid=False, xval=True)
assert "train" in mse.keys() and "xval" in mse.keys(), "expected training and cross validation metrics to be returned, but got {0}".format(mse.keys())
assert len(mse) == 2, "expected only training and cross validation metrics to be returned, but got {0}".format(mse.keys())
assert isinstance(mse["train"], float) and isinstance(mse["xval"], float), "expected training and cross validation metrics to be floats, but got {0} and {1}".format(type(mse["train"]), type(mse["xval"]))
assert mse["xval"] == mse3
mse = gbm.mse(train=True, valid=True, xval=True)
assert "train" in mse.keys() and "valid" in mse.keys() and "xval" in mse.keys(), "expected training, validation, and cross validation metrics to be returned, but got {0}".format(mse.keys())
assert len(mse) == 3, "expected training, validation and cross validation metrics to be returned, but got {0}".format(mse.keys())
assert isinstance(mse["train"], float) and isinstance(mse["valid"], float) and isinstance(mse["xval"], float), "expected training, validation, and cross validation metrics to be floats, but got {0}, {1}, and {2}".format(type(mse["train"]), type(mse["valid"]), type(mse["xval"]))
mse = gbm.mse(train=False, valid=False, xval=False) # default: return training metrics
assert isinstance(mse, float)
assert mse == mse1
mse = gbm.mse(train=False, valid=True, xval=True)
assert "valid" in mse.keys() and "xval" in mse.keys(), "expected validation and cross validation metrics to be returned, but got {0}".format(mse.keys())
assert len(mse) == 2, "expected validation and cross validation metrics to be returned, but got {0}".format(mse.keys())
assert isinstance(mse["valid"], float) and isinstance(mse["xval"], float), "validation and cross validation metrics to be floats, but got {0} and {1}".format(type(mse["valid"]), type(mse["xval"]))
# r2
r21 = gbm.r2(train=True, valid=False, xval=False)
assert isinstance(r21, float)
r22 = gbm.r2(train=False, valid=True, xval=False)
assert isinstance(r22, float)
r23 = gbm.r2(train=False, valid=False, xval=True)
assert isinstance(r23, float)
r2 = gbm.r2(train=True, valid=True, xval=False)
assert "train" in r2.keys() and "valid" in r2.keys(), "expected training and validation metrics to be returned, but got {0}".format(r2.keys())
assert len(r2) == 2, "expected only training and validation metrics to be returned, but got {0}".format(r2.keys())
assert isinstance(r2["train"], float) and isinstance(r2["valid"], float), "expected training and validation metrics to be floats, but got {0} and {1}".format(type(r2["train"]), type(r2["valid"]))
assert r2["valid"] == r22
r2 = gbm.r2(train=True, valid=False, xval=True)
assert "train" in r2.keys() and "xval" in r2.keys(), "expected training and cross validation metrics to be returned, but got {0}".format(r2.keys())
assert len(r2) == 2, "expected only training and cross validation metrics to be returned, but got {0}".format(r2.keys())
assert isinstance(r2["train"], float) and isinstance(r2["xval"], float), "expected training and cross validation metrics to be floats, but got {0} and {1}".format(type(r2["train"]), type(r2["xval"]))
assert r2["xval"] == r23
r2 = gbm.r2(train=True, valid=True, xval=True)
assert "train" in r2.keys() and "valid" in r2.keys() and "xval" in r2.keys(), "expected training, validation, and cross validation metrics to be returned, but got {0}".format(r2.keys())
assert len(r2) == 3, "expected training, validation and cross validation metrics to be returned, but got {0}".format(r2.keys())
assert isinstance(r2["train"], float) and isinstance(r2["valid"], float) and isinstance(r2["xval"], float), "expected training, validation, and cross validation metrics to be floats, but got {0}, {1}, and {2}".format(type(r2["train"]), type(r2["valid"]), type(r2["xval"]))
r2 = gbm.r2(train=False, valid=False, xval=False) # default: return training metrics
assert isinstance(r2, float)
assert r2 == r21
r2 = gbm.r2(train=False, valid=True, xval=True)
assert "valid" in r2.keys() and "xval" in r2.keys(), "expected validation and cross validation metrics to be returned, but got {0}".format(r2.keys())
assert len(r2) == 2, "expected validation and cross validation metrics to be returned, but got {0}".format(r2.keys())
assert isinstance(r2["valid"], float) and isinstance(r2["xval"], float), "validation and cross validation metrics to be floats, but got {0} and {1}".format(type(r2["valid"]), type(r2["xval"]))
# mean_residual_deviance
mean_residual_deviance1 = gbm.mean_residual_deviance(train=True, valid=False, xval=False)
assert isinstance(mean_residual_deviance1, float)
mean_residual_deviance2 = gbm.mean_residual_deviance(train=False, valid=True, xval=False)
assert isinstance(mean_residual_deviance2, float)
mean_residual_deviance3 = gbm.mean_residual_deviance(train=False, valid=False, xval=True)
assert isinstance(mean_residual_deviance3, float)
mean_residual_deviance = gbm.mean_residual_deviance(train=True, valid=True, xval=False)
assert "train" in mean_residual_deviance.keys() and "valid" in mean_residual_deviance.keys(), "expected training and validation metrics to be returned, but got {0}".format(mean_residual_deviance.keys())
assert len(mean_residual_deviance) == 2, "expected only training and validation metrics to be returned, but got {0}".format(mean_residual_deviance.keys())
assert isinstance(mean_residual_deviance["train"], float) and isinstance(mean_residual_deviance["valid"], float), "expected training and validation metrics to be floats, but got {0} and {1}".format(type(mean_residual_deviance["train"]), type(mean_residual_deviance["valid"]))
assert mean_residual_deviance["valid"] == mean_residual_deviance2
mean_residual_deviance = gbm.mean_residual_deviance(train=True, valid=False, xval=True)
assert "train" in mean_residual_deviance.keys() and "xval" in mean_residual_deviance.keys(), "expected training and cross validation metrics to be returned, but got {0}".format(mean_residual_deviance.keys())
assert len(mean_residual_deviance) == 2, "expected only training and cross validation metrics to be returned, but got {0}".format(mean_residual_deviance.keys())
assert isinstance(mean_residual_deviance["train"], float) and isinstance(mean_residual_deviance["xval"], float), "expected training and cross validation metrics to be floats, but got {0} and {1}".format(type(mean_residual_deviance["train"]), type(mean_residual_deviance["xval"]))
assert mean_residual_deviance["xval"] == mean_residual_deviance3
mean_residual_deviance = gbm.mean_residual_deviance(train=True, valid=True, xval=True)
assert "train" in mean_residual_deviance.keys() and "valid" in mean_residual_deviance.keys() and "xval" in mean_residual_deviance.keys(), "expected training, validation, and cross validation metrics to be returned, but got {0}".format(mean_residual_deviance.keys())
assert len(mean_residual_deviance) == 3, "expected training, validation and cross validation metrics to be returned, but got {0}".format(mean_residual_deviance.keys())
assert isinstance(mean_residual_deviance["train"], float) and isinstance(mean_residual_deviance["valid"], float) and isinstance(mean_residual_deviance["xval"], float), "expected training, validation, and cross validation metrics to be floats, but got {0}, {1}, and {2}".format(type(mean_residual_deviance["train"]), type(mean_residual_deviance["valid"]), type(mean_residual_deviance["xval"]))
mean_residual_deviance = gbm.mean_residual_deviance(train=False, valid=False, xval=False) # default: return training metrics
assert isinstance(mean_residual_deviance, float)
assert mean_residual_deviance == mean_residual_deviance1
mean_residual_deviance = gbm.mean_residual_deviance(train=False, valid=True, xval=True)
assert "valid" in mean_residual_deviance.keys() and "xval" in mean_residual_deviance.keys(), "expected validation and cross validation metrics to be returned, but got {0}".format(mean_residual_deviance.keys())
assert len(mean_residual_deviance) == 2, "expected validation and cross validation metrics to be returned, but got {0}".format(mean_residual_deviance.keys())
assert isinstance(mean_residual_deviance["valid"], float) and isinstance(mean_residual_deviance["xval"], float), "validation and cross validation metrics to be floats, but got {0} and {1}".format(type(mean_residual_deviance["valid"]), type(mean_residual_deviance["xval"]))
# binomial
cars = h2o.import_file(path=tests.locate("smalldata/junit/cars_20mpg.csv"))
cars["economy_20mpg"] = cars["economy_20mpg"].asfactor()
r = cars[0].runif()
train = cars[r > .2]
valid = cars[r <= .2]
response_col = "economy_20mpg"
distribution = "bernoulli"
predictors = ["displacement","power","weight","acceleration","year"]
gbm = h2o.gbm(y=train[response_col], x=train[predictors], validation_y=valid[response_col], validation_x=valid[predictors], nfolds=3, distribution=distribution, fold_assignment="Random")
# auc
auc1 = gbm.auc(train=True, valid=False, xval=False)
assert isinstance(auc1, float)
auc2 = gbm.auc(train=False, valid=True, xval=False)
assert isinstance(auc2, float)
auc3 = gbm.auc(train=False, valid=False, xval=True)
assert isinstance(auc3, float)
auc = gbm.auc(train=True, valid=True, xval=False)
assert "train" in auc.keys() and "valid" in auc.keys(), "expected training and validation metrics to be returned, but got {0}".format(auc.keys())
assert len(auc) == 2, "expected only training and validation metrics to be returned, but got {0}".format(auc.keys())
assert isinstance(auc["train"], float) and isinstance(auc["valid"], float), "expected training and validation metrics to be floats, but got {0} and {1}".format(type(auc["train"]), type(auc["valid"]))
assert auc["valid"] == auc2
auc = gbm.auc(train=True, valid=False, xval=True)
assert "train" in auc.keys() and "xval" in auc.keys(), "expected training and cross validation metrics to be returned, but got {0}".format(auc.keys())
assert len(auc) == 2, "expected only training and cross validation metrics to be returned, but got {0}".format(auc.keys())
assert isinstance(auc["train"], float) and isinstance(auc["xval"], float), "expected training and cross validation metrics to be floats, but got {0} and {1}".format(type(auc["train"]), type(auc["xval"]))
assert auc["xval"] == auc3
auc = gbm.auc(train=True, valid=True, xval=True)
assert "train" in auc.keys() and "valid" in auc.keys() and "xval" in auc.keys(), "expected training, validation, and cross validation metrics to be returned, but got {0}".format(auc.keys())
assert len(auc) == 3, "expected training, validation and cross validation metrics to be returned, but got {0}".format(auc.keys())
assert isinstance(auc["train"], float) and isinstance(auc["valid"], float) and isinstance(auc["xval"], float), "expected training, validation, and cross validation metrics to be floats, but got {0}, {1}, and {2}".format(type(auc["train"]), type(auc["valid"]), type(auc["xval"]))
auc = gbm.auc(train=False, valid=False, xval=False) # default: return training metrics
assert isinstance(auc, float)
assert auc == auc1
auc = gbm.auc(train=False, valid=True, xval=True)
assert "valid" in auc.keys() and "xval" in auc.keys(), "expected validation and cross validation metrics to be returned, but got {0}".format(auc.keys())
assert len(auc) == 2, "expected validation and cross validation metrics to be returned, but got {0}".format(auc.keys())
assert isinstance(auc["valid"], float) and isinstance(auc["xval"], float), "validation and cross validation metrics to be floats, but got {0} and {1}".format(type(auc["valid"]), type(auc["xval"]))
# roc
(fprs1, tprs1) = gbm.roc(train=True, valid=False, xval=False)
assert isinstance(fprs1, list)
assert isinstance(tprs1, list)
(fprs2, tprs2) = gbm.roc(train=False, valid=True, xval=False)
assert isinstance(fprs2, list)
assert isinstance(tprs2, list)
(fprs3, tprs3) = gbm.roc(train=False, valid=False, xval=True)
assert isinstance(fprs3, list)
assert isinstance(tprs3, list)
roc = gbm.roc(train=True, valid=True, xval=False)
assert "train" in roc.keys() and "valid" in roc.keys(), "expected training and validation metrics to be returned, but got {0}".format(roc.keys())
assert len(roc) == 2, "expected only training and validation metrics to be returned, but got {0}".format(roc.keys())
assert isinstance(roc["train"], tuple) and isinstance(roc["valid"], tuple), "expected training and validation metrics to be tuples, but got {0} and {1}".format(type(roc["train"]), type(roc["valid"]))
assert roc["valid"][0] == fprs2
assert roc["valid"][1] == tprs2
roc = gbm.roc(train=True, valid=False, xval=True)
assert "train" in roc.keys() and "xval" in roc.keys(), "expected training and cross validation metrics to be returned, but got {0}".format(roc.keys())
assert len(roc) == 2, "expected only training and cross validation metrics to be returned, but got {0}".format(roc.keys())
assert isinstance(roc["train"], tuple) and isinstance(roc["xval"], tuple), "expected training and cross validation metrics to be tuples, but got {0} and {1}".format(type(roc["train"]), type(roc["xval"]))
assert roc["xval"][0] == fprs3
assert roc["xval"][1] == tprs3
roc = gbm.roc(train=True, valid=True, xval=True)
assert "train" in roc.keys() and "valid" in roc.keys() and "xval" in roc.keys(), "expected training, validation, and cross validation metrics to be returned, but got {0}".format(roc.keys())
assert len(roc) == 3, "expected training, validation and cross validation metrics to be returned, but got {0}".format(roc.keys())
assert isinstance(roc["train"], tuple) and isinstance(roc["valid"], tuple) and isinstance(roc["xval"], tuple), "expected training, validation, and cross validation metrics to be tuples, but got {0}, {1}, and {2}".format(type(roc["train"]), type(roc["valid"]), type(roc["xval"]))
(fprs, tprs) = gbm.roc(train=False, valid=False, xval=False) # default: return training metrics
assert isinstance(fprs, list)
assert isinstance(tprs, list)
assert fprs == fprs1
assert tprs == tprs1
roc = gbm.roc(train=False, valid=True, xval=True)
assert "valid" in roc.keys() and "xval" in roc.keys(), "expected validation and cross validation metrics to be returned, but got {0}".format(roc.keys())
assert len(roc) == 2, "expected validation and cross validation metrics to be returned, but got {0}".format(roc.keys())
assert isinstance(roc["valid"], tuple) and isinstance(roc["xval"], tuple), "validation and cross validation metrics to be tuples, but got {0} and {1}".format(type(roc["valid"]), type(roc["xval"]))
# logloss
logloss1 = gbm.logloss(train=True, valid=False, xval=False)
assert isinstance(logloss1, float)
logloss2 = gbm.logloss(train=False, valid=True, xval=False)
assert isinstance(logloss2, float)
logloss3 = gbm.logloss(train=False, valid=False, xval=True)
assert isinstance(logloss3, float)
logloss = gbm.logloss(train=True, valid=True, xval=False)
assert "train" in logloss.keys() and "valid" in logloss.keys(), "expected training and validation metrics to be returned, but got {0}".format(logloss.keys())
assert len(logloss) == 2, "expected only training and validation metrics to be returned, but got {0}".format(logloss.keys())
assert isinstance(logloss["train"], float) and isinstance(logloss["valid"], float), "expected training and validation metrics to be floats, but got {0} and {1}".format(type(logloss["train"]), type(logloss["valid"]))
assert logloss["valid"] == logloss2
logloss = gbm.logloss(train=True, valid=False, xval=True)
assert "train" in logloss.keys() and "xval" in logloss.keys(), "expected training and cross validation metrics to be returned, but got {0}".format(logloss.keys())
assert len(logloss) == 2, "expected only training and cross validation metrics to be returned, but got {0}".format(logloss.keys())
assert isinstance(logloss["train"], float) and isinstance(logloss["xval"], float), "expected training and cross validation metrics to be floats, but got {0} and {1}".format(type(logloss["train"]), type(logloss["xval"]))
assert logloss["xval"] == logloss3
logloss = gbm.logloss(train=True, valid=True, xval=True)
assert "train" in logloss.keys() and "valid" in logloss.keys() and "xval" in logloss.keys(), "expected training, validation, and cross validation metrics to be returned, but got {0}".format(logloss.keys())
assert len(logloss) == 3, "expected training, validation and cross validation metrics to be returned, but got {0}".format(logloss.keys())
assert isinstance(logloss["train"], float) and isinstance(logloss["valid"], float) and isinstance(logloss["xval"], float), "expected training, validation, and cross validation metrics to be floats, but got {0}, {1}, and {2}".format(type(logloss["train"]), type(logloss["valid"]), type(logloss["xval"]))
logloss = gbm.logloss(train=False, valid=False, xval=False) # default: return training metrics
assert isinstance(logloss, float)
assert logloss == logloss1
logloss = gbm.logloss(train=False, valid=True, xval=True)
assert "valid" in logloss.keys() and "xval" in logloss.keys(), "expected validation and cross validation metrics to be returned, but got {0}".format(logloss.keys())
assert len(logloss) == 2, "expected validation and cross validation metrics to be returned, but got {0}".format(logloss.keys())
assert isinstance(logloss["valid"], float) and isinstance(logloss["xval"], float), "validation and cross validation metrics to be floats, but got {0} and {1}".format(type(logloss["valid"]), type(logloss["xval"]))
# giniCoef
giniCoef1 = gbm.giniCoef(train=True, valid=False, xval=False)
assert isinstance(giniCoef1, float)
giniCoef2 = gbm.giniCoef(train=False, valid=True, xval=False)
assert isinstance(giniCoef2, float)
giniCoef3 = gbm.giniCoef(train=False, valid=False, xval=True)
assert isinstance(giniCoef3, float)
giniCoef = gbm.giniCoef(train=True, valid=True, xval=False)
assert "train" in giniCoef.keys() and "valid" in giniCoef.keys(), "expected training and validation metrics to be returned, but got {0}".format(giniCoef.keys())
assert len(giniCoef) == 2, "expected only training and validation metrics to be returned, but got {0}".format(giniCoef.keys())
assert isinstance(giniCoef["train"], float) and isinstance(giniCoef["valid"], float), "expected training and validation metrics to be floats, but got {0} and {1}".format(type(giniCoef["train"]), type(giniCoef["valid"]))
assert giniCoef["valid"] == giniCoef2
giniCoef = gbm.giniCoef(train=True, valid=False, xval=True)
assert "train" in giniCoef.keys() and "xval" in giniCoef.keys(), "expected training and cross validation metrics to be returned, but got {0}".format(giniCoef.keys())
assert len(giniCoef) == 2, "expected only training and cross validation metrics to be returned, but got {0}".format(giniCoef.keys())
assert isinstance(giniCoef["train"], float) and isinstance(giniCoef["xval"], float), "expected training and cross validation metrics to be floats, but got {0} and {1}".format(type(giniCoef["train"]), type(giniCoef["xval"]))
assert giniCoef["xval"] == giniCoef3
giniCoef = gbm.giniCoef(train=True, valid=True, xval=True)
assert "train" in giniCoef.keys() and "valid" in giniCoef.keys() and "xval" in giniCoef.keys(), "expected training, validation, and cross validation metrics to be returned, but got {0}".format(giniCoef.keys())
assert len(giniCoef) == 3, "expected training, validation and cross validation metrics to be returned, but got {0}".format(giniCoef.keys())
assert isinstance(giniCoef["train"], float) and isinstance(giniCoef["valid"], float) and isinstance(giniCoef["xval"], float), "expected training, validation, and cross validation metrics to be floats, but got {0}, {1}, and {2}".format(type(giniCoef["train"]), type(giniCoef["valid"]), type(giniCoef["xval"]))
giniCoef = gbm.giniCoef(train=False, valid=False, xval=False) # default: return training metrics
assert isinstance(giniCoef, float)
assert giniCoef == giniCoef1
giniCoef = gbm.giniCoef(train=False, valid=True, xval=True)
assert "valid" in giniCoef.keys() and "xval" in giniCoef.keys(), "expected validation and cross validation metrics to be returned, but got {0}".format(giniCoef.keys())
assert len(giniCoef) == 2, "expected validation and cross validation metrics to be returned, but got {0}".format(giniCoef.keys())
assert isinstance(giniCoef["valid"], float) and isinstance(giniCoef["xval"], float), "validation and cross validation metrics to be floats, but got {0} and {1}".format(type(giniCoef["valid"]), type(giniCoef["xval"]))
# F1
F11 = gbm.F1(train=True, valid=False, xval=False)
F12 = gbm.F1(train=False, valid=True, xval=False)
F13 = gbm.F1(train=False, valid=False, xval=True)
F1 = gbm.F1(train=True, valid=True, xval=False)
F1 = gbm.F1(train=True, valid=False, xval=True)
F1 = gbm.F1(train=True, valid=True, xval=True)
F1 = gbm.F1(train=False, valid=False, xval=False) # default: return training metrics
F1 = gbm.F1(train=False, valid=True, xval=True)
# F0point5
F0point51 = gbm.F0point5(train=True, valid=False, xval=False)
F0point52 = gbm.F0point5(train=False, valid=True, xval=False)
F0point53 = gbm.F0point5(train=False, valid=False, xval=True)
F0point5 = gbm.F0point5(train=True, valid=True, xval=False)
F0point5 = gbm.F0point5(train=True, valid=False, xval=True)
F0point5 = gbm.F0point5(train=True, valid=True, xval=True)
F0point5 = gbm.F0point5(train=False, valid=False, xval=False) # default: return training metrics
F0point5 = gbm.F0point5(train=False, valid=True, xval=True)
# F2
F21 = gbm.F2(train=True, valid=False, xval=False)
F22 = gbm.F2(train=False, valid=True, xval=False)
F23 = gbm.F2(train=False, valid=False, xval=True)
F2 = gbm.F2(train=True, valid=True, xval=False)
F2 = gbm.F2(train=True, valid=False, xval=True)
F2 = gbm.F2(train=True, valid=True, xval=True)
F2 = gbm.F2(train=False, valid=False, xval=False) # default: return training metrics
F2 = gbm.F2(train=False, valid=True, xval=True)
# accuracy
accuracy1 = gbm.accuracy(train=True, valid=False, xval=False)
accuracy2 = gbm.accuracy(train=False, valid=True, xval=False)
accuracy3 = gbm.accuracy(train=False, valid=False, xval=True)
accuracy = gbm.accuracy(train=True, valid=True, xval=False)
accuracy = gbm.accuracy(train=True, valid=False, xval=True)
accuracy = gbm.accuracy(train=True, valid=True, xval=True)
accuracy = gbm.accuracy(train=False, valid=False, xval=False) # default: return training metrics
accuracy = gbm.accuracy(train=False, valid=True, xval=True)
# error
error1 = gbm.error(train=True, valid=False, xval=False)
error2 = gbm.error(train=False, valid=True, xval=False)
error3 = gbm.error(train=False, valid=False, xval=True)
error = gbm.error(train=True, valid=True, xval=False)
error = gbm.error(train=True, valid=False, xval=True)
error = gbm.error(train=True, valid=True, xval=True)
error = gbm.error(train=False, valid=False, xval=False) # default: return training metrics
error = gbm.error(train=False, valid=True, xval=True)
# precision
precision1 = gbm.precision(train=True, valid=False, xval=False)
precision2 = gbm.precision(train=False, valid=True, xval=False)
precision3 = gbm.precision(train=False, valid=False, xval=True)
precision = gbm.precision(train=True, valid=True, xval=False)
precision = gbm.precision(train=True, valid=False, xval=True)
precision = gbm.precision(train=True, valid=True, xval=True)
precision = gbm.precision(train=False, valid=False, xval=False) # default: return training metrics
precision = gbm.precision(train=False, valid=True, xval=True)
# mcc
mcc1 = gbm.mcc(train=True, valid=False, xval=False)
mcc2 = gbm.mcc(train=False, valid=True, xval=False)
mcc3 = gbm.mcc(train=False, valid=False, xval=True)
mcc = gbm.mcc(train=True, valid=True, xval=False)
mcc = gbm.mcc(train=True, valid=False, xval=True)
mcc = gbm.mcc(train=True, valid=True, xval=True)
mcc = gbm.mcc(train=False, valid=False, xval=False) # default: return training metrics
mcc = gbm.mcc(train=False, valid=True, xval=True)
# max_per_class_error
max_per_class_error1 = gbm.max_per_class_error(train=True, valid=False, xval=False)
max_per_class_error2 = gbm.max_per_class_error(train=False, valid=True, xval=False)
max_per_class_error3 = gbm.max_per_class_error(train=False, valid=False, xval=True)
max_per_class_error = gbm.max_per_class_error(train=True, valid=True, xval=False)
max_per_class_error = gbm.max_per_class_error(train=True, valid=False, xval=True)
max_per_class_error = gbm.max_per_class_error(train=True, valid=True, xval=True)
max_per_class_error = gbm.max_per_class_error(train=False, valid=False, xval=False) # default: return training metrics
max_per_class_error = gbm.max_per_class_error(train=False, valid=True, xval=True)
# confusion_matrix
confusion_matrix1 = gbm.confusion_matrix(train=True, valid=False, xval=False)
confusion_matrix2 = gbm.confusion_matrix(train=False, valid=True, xval=False)
confusion_matrix3 = gbm.confusion_matrix(train=False, valid=False, xval=True)
confusion_matrix = gbm.confusion_matrix(train=True, valid=True, xval=False)
confusion_matrix = gbm.confusion_matrix(train=True, valid=False, xval=True)
confusion_matrix = gbm.confusion_matrix(train=True, valid=True, xval=True)
confusion_matrix = gbm.confusion_matrix(train=False, valid=False, xval=False) # default: return training metrics
confusion_matrix = gbm.confusion_matrix(train=False, valid=True, xval=True)
# # plot
# plot1 = gbm.plot(train=True, valid=False, xval=False)
# plot2 = gbm.plot(train=False, valid=True, xval=False)
# plot3 = gbm.plot(train=False, valid=False, xval=True)
# plot = gbm.plot(train=True, valid=True, xval=False)
# plot = gbm.plot(train=True, valid=False, xval=True)
# plot = gbm.plot(train=True, valid=True, xval=True)
# plot = gbm.plot(train=False, valid=False, xval=False) # default: return training metrics
# plot = gbm.plot(train=False, valid=True, xval=True)
# # tpr
# tpr1 = gbm.tpr(train=True, valid=False, xval=False)
# tpr2 = gbm.tpr(train=False, valid=True, xval=False)
# tpr3 = gbm.tpr(train=False, valid=False, xval=True)
# tpr = gbm.tpr(train=True, valid=True, xval=False)
# tpr = gbm.tpr(train=True, valid=False, xval=True)
# tpr = gbm.tpr(train=True, valid=True, xval=True)
# tpr = gbm.tpr(train=False, valid=False, xval=False) # default: return training metrics
# tpr = gbm.tpr(train=False, valid=True, xval=True)
#
# # tnr
# tnr1 = gbm.tnr(train=True, valid=False, xval=False)
# tnr2 = gbm.tnr(train=False, valid=True, xval=False)
# tnr3 = gbm.tnr(train=False, valid=False, xval=True)
# tnr = gbm.tnr(train=True, valid=True, xval=False)
# tnr = gbm.tnr(train=True, valid=False, xval=True)
# tnr = gbm.tnr(train=True, valid=True, xval=True)
# tnr = gbm.tnr(train=False, valid=False, xval=False) # default: return training metrics
# tnr = gbm.tnr(train=False, valid=True, xval=True)
#
# # fnr
# fnr1 = gbm.fnr(train=True, valid=False, xval=False)
# fnr2 = gbm.fnr(train=False, valid=True, xval=False)
# fnr3 = gbm.fnr(train=False, valid=False, xval=True)
# fnr = gbm.fnr(train=True, valid=True, xval=False)
# fnr = gbm.fnr(train=True, valid=False, xval=True)
# fnr = gbm.fnr(train=True, valid=True, xval=True)
# fnr = gbm.fnr(train=False, valid=False, xval=False) # default: return training metrics
# fnr = gbm.fnr(train=False, valid=True, xval=True)
#
# # fpr
# fpr1 = gbm.fpr(train=True, valid=False, xval=False)
# fpr2 = gbm.fpr(train=False, valid=True, xval=False)
# fpr3 = gbm.fpr(train=False, valid=False, xval=True)
# fpr = gbm.fpr(train=True, valid=True, xval=False)
# fpr = gbm.fpr(train=True, valid=False, xval=True)
# fpr = gbm.fpr(train=True, valid=True, xval=True)
# fpr = gbm.fpr(train=False, valid=False, xval=False) # default: return training metrics
# fpr = gbm.fpr(train=False, valid=True, xval=True)
# multinomial
cars = h2o.import_file(path=tests.locate("smalldata/junit/cars_20mpg.csv"))
cars["cylinders"] = cars["cylinders"].asfactor()
r = cars[0].runif()
train = cars[r > .2]
valid = cars[r <= .2]
response_col = "cylinders"
distribution = "multinomial"
predictors = ["displacement","power","weight","acceleration","year"]
gbm = h2o.gbm(y=train[response_col],
x=train[predictors],
validation_y=valid[response_col],
validation_x=valid[predictors],
nfolds=3,
distribution=distribution,
fold_assignment="Random")
# mse
mse1 = gbm.mse(train=True, valid=False, xval=False)
assert isinstance(mse1, float)
mse2 = gbm.mse(train=False, valid=True, xval=False)
assert isinstance(mse2, float)
mse3 = gbm.mse(train=False, valid=False, xval=True)
assert isinstance(mse3, float)
mse = gbm.mse(train=True, valid=True, xval=False)
assert "train" in mse.keys() and "valid" in mse.keys(), "expected training and validation metrics to be returned, but got {0}".format(mse.keys())
assert len(mse) == 2, "expected only training and validation metrics to be returned, but got {0}".format(mse.keys())
assert isinstance(mse["train"], float) and isinstance(mse["valid"], float), "expected training and validation metrics to be floats, but got {0} and {1}".format(type(mse["train"]), type(mse["valid"]))
assert mse["valid"] == mse2
mse = gbm.mse(train=True, valid=False, xval=True)
assert "train" in mse.keys() and "xval" in mse.keys(), "expected training and cross validation metrics to be returned, but got {0}".format(mse.keys())
assert len(mse) == 2, "expected only training and cross validation metrics to be returned, but got {0}".format(mse.keys())
assert isinstance(mse["train"], float) and isinstance(mse["xval"], float), "expected training and cross validation metrics to be floats, but got {0} and {1}".format(type(mse["train"]), type(mse["xval"]))
assert mse["xval"] == mse3
mse = gbm.mse(train=True, valid=True, xval=True)
assert "train" in mse.keys() and "valid" in mse.keys() and "xval" in mse.keys(), "expected training, validation, and cross validation metrics to be returned, but got {0}".format(mse.keys())
assert len(mse) == 3, "expected training, validation and cross validation metrics to be returned, but got {0}".format(mse.keys())
assert isinstance(mse["train"], float) and isinstance(mse["valid"], float) and isinstance(mse["xval"], float), "expected training, validation, and cross validation metrics to be floats, but got {0}, {1}, and {2}".format(type(mse["train"]), type(mse["valid"]), type(mse["xval"]))
mse = gbm.mse(train=False, valid=False, xval=False) # default: return training metrics
assert isinstance(mse, float)
assert mse == mse1
mse = gbm.mse(train=False, valid=True, xval=True)
assert "valid" in mse.keys() and "xval" in mse.keys(), "expected validation and cross validation metrics to be returned, but got {0}".format(mse.keys())
assert len(mse) == 2, "expected validation and cross validation metrics to be returned, but got {0}".format(mse.keys())
assert isinstance(mse["valid"], float) and isinstance(mse["xval"], float), "validation and cross validation metrics to be floats, but got {0} and {1}".format(type(mse["valid"]), type(mse["xval"]))
# logloss
logloss1 = gbm.logloss(train=True, valid=False, xval=False)
assert isinstance(logloss1, float)
logloss2 = gbm.logloss(train=False, valid=True, xval=False)
assert isinstance(logloss2, float)
logloss3 = gbm.logloss(train=False, valid=False, xval=True)
assert isinstance(logloss3, float)
logloss = gbm.logloss(train=True, valid=True, xval=False)
assert "train" in logloss.keys() and "valid" in logloss.keys(), "expected training and validation metrics to be returned, but got {0}".format(logloss.keys())
assert len(logloss) == 2, "expected only training and validation metrics to be returned, but got {0}".format(logloss.keys())
assert isinstance(logloss["train"], float) and isinstance(logloss["valid"], float), "expected training and validation metrics to be floats, but got {0} and {1}".format(type(logloss["train"]), type(logloss["valid"]))
assert logloss["valid"] == logloss2
logloss = gbm.logloss(train=True, valid=False, xval=True)
assert "train" in logloss.keys() and "xval" in logloss.keys(), "expected training and cross validation metrics to be returned, but got {0}".format(logloss.keys())
assert len(logloss) == 2, "expected only training and cross validation metrics to be returned, but got {0}".format(logloss.keys())
assert isinstance(logloss["train"], float) and isinstance(logloss["xval"], float), "expected training and cross validation metrics to be floats, but got {0} and {1}".format(type(logloss["train"]), type(logloss["xval"]))
assert logloss["xval"] == logloss3
logloss = gbm.logloss(train=True, valid=True, xval=True)
assert "train" in logloss.keys() and "valid" in logloss.keys() and "xval" in logloss.keys(), "expected training, validation, and cross validation metrics to be returned, but got {0}".format(logloss.keys())
assert len(logloss) == 3, "expected training, validation and cross validation metrics to be returned, but got {0}".format(logloss.keys())
assert isinstance(logloss["train"], float) and isinstance(logloss["valid"], float) and isinstance(logloss["xval"], float), "expected training, validation, and cross validation metrics to be floats, but got {0}, {1}, and {2}".format(type(logloss["train"]), type(logloss["valid"]), type(logloss["xval"]))
logloss = gbm.logloss(train=False, valid=False, xval=False) # default: return training metrics
assert isinstance(logloss, float)
assert logloss == logloss1
logloss = gbm.logloss(train=False, valid=True, xval=True)
assert "valid" in logloss.keys() and "xval" in logloss.keys(), "expected validation and cross validation metrics to be returned, but got {0}".format(logloss.keys())
assert len(logloss) == 2, "expected validation and cross validation metrics to be returned, but got {0}".format(logloss.keys())
assert isinstance(logloss["valid"], float) and isinstance(logloss["xval"], float), "validation and cross validation metrics to be floats, but got {0} and {1}".format(type(logloss["valid"]), type(logloss["xval"]))
# hit_ratio_table
hit_ratio_table1 = gbm.hit_ratio_table(train=True, valid=False, xval=False)
hit_ratio_table2 = gbm.hit_ratio_table(train=False, valid=True, xval=False)
hit_ratio_table3 = gbm.hit_ratio_table(train=False, valid=False, xval=True)
hit_ratio_table = gbm.hit_ratio_table(train=True, valid=True, xval=False)
hit_ratio_table = gbm.hit_ratio_table(train=True, valid=False, xval=True)
hit_ratio_table = gbm.hit_ratio_table(train=True, valid=True, xval=True)
hit_ratio_table = gbm.hit_ratio_table(train=False, valid=False, xval=False) # default: return training metrics
hit_ratio_table = gbm.hit_ratio_table(train=False, valid=True, xval=True)
# clustering
iris = h2o.import_file(path=tests.locate("smalldata/iris/iris.csv"))
km = h2o.kmeans(x=iris[0:4],
nfolds=3,
k=3)
# betweenss
betweenss1 = km.betweenss(train=True, valid=False, xval=False)
assert isinstance(betweenss1, float)
betweenss3 = km.betweenss(train=False, valid=False, xval=True)
assert isinstance(betweenss3, float)
betweenss = km.betweenss(train=True, valid=False, xval=True)
assert "train" in betweenss.keys() and "xval" in betweenss.keys(), "expected training and cross validation metrics to be returned, but got {0}".format(betweenss.keys())
assert len(betweenss) == 2, "expected only training and cross validation metrics to be returned, but got {0}".format(betweenss.keys())
assert isinstance(betweenss["train"], float) and isinstance(betweenss["xval"], float), "expected training and cross validation metrics to be floats, but got {0} and {1}".format(type(betweenss["train"]), type(betweenss["xval"]))
assert betweenss["xval"] == betweenss3
betweenss = km.betweenss(train=False, valid=False, xval=False) # default: return training metrics
assert isinstance(betweenss, float)
assert betweenss == betweenss1
# totss
totss1 = km.totss(train=True, valid=False, xval=False)
assert isinstance(totss1, float)
totss3 = km.totss(train=False, valid=False, xval=True)
assert isinstance(totss3, float)
totss = km.totss(train=True, valid=False, xval=True)
assert "train" in totss.keys() and "xval" in totss.keys(), "expected training and cross validation metrics to be returned, but got {0}".format(totss.keys())
assert len(totss) == 2, "expected only training and cross validation metrics to be returned, but got {0}".format(totss.keys())
assert isinstance(totss["train"], float) and isinstance(totss["xval"], float), "expected training and cross validation metrics to be floats, but got {0} and {1}".format(type(totss["train"]), type(totss["xval"]))
assert totss["xval"] == totss3
totss = km.totss(train=False, valid=False, xval=False) # default: return training metrics
assert isinstance(totss, float)
assert totss == totss1
# tot_withinss
tot_withinss1 = km.tot_withinss(train=True, valid=False, xval=False)
assert isinstance(tot_withinss1, float)
tot_withinss3 = km.tot_withinss(train=False, valid=False, xval=True)
assert isinstance(tot_withinss3, float)
tot_withinss = km.tot_withinss(train=True, valid=False, xval=True)
assert "train" in tot_withinss.keys() and "xval" in tot_withinss.keys(), "expected training and cross validation metrics to be returned, but got {0}".format(tot_withinss.keys())
assert len(tot_withinss) == 2, "expected only training and cross validation metrics to be returned, but got {0}".format(tot_withinss.keys())
assert isinstance(tot_withinss["train"], float) and isinstance(tot_withinss["xval"], float), "expected training and cross validation metrics to be floats, but got {0} and {1}".format(type(tot_withinss["train"]), type(tot_withinss["xval"]))
assert tot_withinss["xval"] == tot_withinss3
tot_withinss = km.tot_withinss(train=False, valid=False, xval=False) # default: return training metrics
assert isinstance(tot_withinss, float)
assert tot_withinss == tot_withinss1
# withinss
withinss1 = km.withinss(train=True, valid=False, xval=False)
withinss3 = km.withinss(train=False, valid=False, xval=True)
withinss = km.withinss(train=True, valid=False, xval=True)
withinss = km.withinss(train=False, valid=False, xval=False) # default: return training metrics
# centroid_stats
centroid_stats1 = km.centroid_stats(train=True, valid=False, xval=False)
centroid_stats3 = km.centroid_stats(train=False, valid=False, xval=True)
centroid_stats = km.centroid_stats(train=True, valid=False, xval=True)
centroid_stats = km.centroid_stats(train=False, valid=False, xval=False) # default: return training metrics
# size
size1 = km.size(train=True, valid=False, xval=False)
size3 = km.size(train=False, valid=False, xval=True)
size = km.size(train=True, valid=False, xval=True)
size = km.size(train=False, valid=False, xval=False) # default: return training metrics
if __name__ == "__main__":
tests.run_test(sys.argv, metric_accessors)
|
kyoren/https-github.com-h2oai-h2o-3
|
h2o-py/tests/testdir_misc/pyunit_metric_accessors.py
|
Python
|
apache-2.0
| 42,219
|
[
"Gaussian"
] |
7bfb26fb734e7b6b7a5a3f1b6044071d3ddb6ef6851c14a12dedf438633f463a
|
#!/usr/bin/env python
#
# This file is part of g_mmpbsa.
#
# Author: Rajendra Kumar
#
# Copyright (C) 2013-2015 Rashmi Kumari and Andrew Lynn
#
# g_mmpbsa is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# g_mmpbsa is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with g_mmpbsa. If not, see <http://www.gnu.org/licenses/>.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#
#
import os, sys, shlex, subprocess
from subprocess import Popen, PIPE
import re
import argparse
from modeller import *
from modeller.optimizers import molecular_dynamics, conjugate_gradients
from modeller.automodel import autosched
def main():
parser = ParseOptions()
args = CheckInput(parser)
residues2mutate = get_residue_list_for_scan(args)
wildtype_path = os.path.abspath(args.dirWildType)
mutation_path = os.path.abspath(args.dirMutations)
file_list = [ os.path.join(wildtype_path, f) for f in os.listdir(wildtype_path) if os.path.isfile(os.path.join(wildtype_path, f)) ]
file_list.sort()
mt = Mutations(file_list)
for residue in residues2mutate:
sys.stdout.write("\n----------------------------------------------------")
sys.stdout.write("\nMutating residue no. %s of chain %s to %s " % (residue.pos, residue.chain, residue.name))
sys.stdout.write("\n----------------------------------------------------\n")
sys.stdout.flush()
# Add residue into object
mt.add_mutation(residue)
# Mutate all frames using modeller
mt.mutate_frames(mutation_path, args)
# Generate trajectory, topology and tpr file of mutated protein complex
mt.generate_traj(mutation_path, args)
# Remove residue from mutations
mt.del_mutations()
sys.stdout.write("+++++++++++++++++++ FINISHED +++++++++++++++++++++++\n")
sys.stdout.flush()
class resid2mutate:
def __init__(self, name, pos, chain):
self.name = name
self.pos = pos
self.chain = chain
class Mutations:
def __init__(self, file_list):
self.file_list = file_list
self.residues=[]
def add_mutation(self, residues):
self.residues.append(residues)
def del_mutations(self):
for i in range(len(self.residues)):
self.residues.pop()
def dirname(self):
dirname=""
for residue in self.residues:
dirname = dirname + residue.name + residue.pos + residue.chain + "-"
return dirname[:-1]
def mutate_frames(self, mutation_path, args):
try:
os.makedirs(mutation_path)
except OSError:
if not os.path.isdir(mutation_path):
raise
dest_path = os.path.join(mutation_path, self.dirname())
try:
os.makedirs(dest_path)
except OSError:
if not os.path.isdir(dest_path):
raise
cd = ChDir(dest_path)
n = 1
for a_file in self.file_list:
sys.stdout.write("\r ...Mutating Frame: %d/%d" % (n, len(self.file_list)))
sys.stdout.flush()
# Change of atomname CD to CD1 of ILE, modeller does not detect CD atomname
CD_2_CD1_for_ILE(a_file, "modeller_in.pdb")
# Mutations of selected residue by modeller
mutate_a_frame("modeller_in.pdb", os.path.basename(a_file), self.residues)
os.remove("modeller_in.pdb")
n=n+1
sys.stdout.write("\n ...Finished\n")
sys.stdout.flush()
del cd
def generate_traj(self, mutation_path, args):
try:
os.path.exists(mutation_path)
except OSError:
if os.path.isdir(mutation_path):
raise
dest_path = os.path.join(mutation_path, self.dirname())
try:
os.path.exists(dest_path)
except OSError:
if os.path.isdir(dest_path):
raise
cd = ChDir(dest_path)
#Generate gro and toplogy file for alanine scanning
gen_top_gro_for_alascan(args, self.residues, self.file_list)
#Energy minimization
n = 1
for a_file in self.file_list:
sys.stdout.write("\r ...Energy Minimization Frame: %d/%d" % (n, len(self.file_list)))
sys.stdout.flush()
energy_minimization(args, a_file)
n=n+1
sys.stdout.write("\n ...Finished\n")
sys.stdout.flush()
sys.stdout.write("\r ...Generating trajectory and tpr file")
sys.stdout.flush()
# Generate trajectory and tpr from energy minimized mutated frames
gen_traj_tpr(args)
sys.stdout.write("\n ...Finished\n")
sys.stdout.flush()
del cd
def gen_traj_tpr(args):
files = os.listdir(os.getcwd())
frames = []
# Deleting top and itp files
for a_file in files:
if a_file.endswith("em.gro"):
frames.append(a_file)
frames.sort()
# Running trjcat
command = 'trjcat -f '
for frame in frames:
command = command + frame + ' '
command = '{0} -cat -o {1}' .format(command, 'traj_temp.xtc')
p = Popen(shlex.split(command), stdin=PIPE, stdout=PIPE, stderr=PIPE)
stdout, stderr = p.communicate()
if p.returncode != 0:
print stdout, stderr
sys.exit(1)
# Generating tpr file
gen_tpr(args, files[0])
# Running trjconv to set time in trajectory
command = 'trjcat -f '
for frame in frames:
command = command + frame + ' '
command = 'trjconv -f traj_temp.xtc -o trajout.xtc -t0 {0} -timestep {1}' .format(args.time0, args.timestep)
p = Popen(shlex.split(command), stdin=PIPE, stdout=PIPE, stderr=PIPE)
stdout, stderr = p.communicate()
if p.returncode != 0:
print stdout, stderr
sys.exit(1)
for i in range(1, len(frames)):
os.remove(frames[i])
os.remove('traj_temp.xtc')
def gen_top_gro_for_alascan(args, residues, file_list):
# Update protonation states of the selected residues
n = 0
for aFile in file_list:
n=n+1
sys.stdout.write("\r ...Updating protonation state for frame %d/%d" % (n, len(file_list)) )
sys.stdout.flush()
InFile = os.path.basename(aFile)
update_prot_state(args, InFile)
sys.stdout.write("\n ...Finished\n")
sys.stdout.flush()
# Generating pdb file using pdb2gmx, recovering hydrogen atoms and generating periodic box
n=0
for aFile in file_list:
n=n+1
sys.stdout.write("\r ...Generating pdb file using pdb2gmx, recovering hydrogen atoms and generating periodic box for frame %d/%d" % (n, len(file_list)) )
sys.stdout.flush()
InFile = os.path.basename(aFile)
OutFile1 = '{0}_nobox.pdb' .format(os.path.splitext(InFile)[0])
OutFile2 = '{0}.gro' .format(os.path.splitext(InFile)[0])
# Running pdb2gmx
command = 'pdb2gmx -f {0} -water tip3p -o {1}' .format(InFile, OutFile1)
p = Popen(shlex.split(command), stdin=PIPE, stdout=PIPE, stderr=PIPE)
p.stdin.write('{0}\n' .format(args.force_field))
stdout, stderr = p.communicate()
if p.returncode != 0:
print stdout, stderr
sys.exit(1)
# Recover coordinates of hydrogen atoms
if(not args.no_orig_h_pos):
recover_hydrogen_coords(residues, aFile, OutFile1)
# Generating dodecahedron box
command = 'editconf -f {0} -bt dodecahedron -d 1 -c -o {1}' .format(OutFile1, OutFile2)
p = Popen(shlex.split(command), stdin=PIPE, stdout=PIPE, stderr=PIPE)
p.stdin.write('0\n0\n')
stdout, stderr = p.communicate()
if p.returncode != 0:
print stdout, stderr
sys.exit(1)
files = os.listdir(os.getcwd())
#Deleting top and itp files
for top in files:
if top.endswith(".top") or top.endswith(".itp"):
os.remove(top)
os.remove(OutFile1)
sys.stdout.write("\n ...Finished\n")
sys.stdout.flush()
#Generate topolgy using pdb2gmx
sys.stdout.write("\r ...Generating topology files using pdb2gmx...")
sys.stdout.flush()
InFile = os.path.basename(file_list[0])
command = ['pdb2gmx', '-f', InFile, '-water', 'tip3p', '-o', 'temp.gro']
p = Popen(command, stdin=PIPE, stdout=PIPE, stderr=PIPE)
p.stdin.write('{0}\n' .format(args.force_field))
stdout, stderr = p.communicate()
if p.returncode != 0:
print stdout, stderr
sys.exit(1)
os.remove('temp.gro')
for aFile in file_list:
InFile = os.path.basename(aFile)
os.remove(InFile)
sys.stdout.write("\n ...Finished")
sys.stdout.flush()
def energy_minimization(args, filename):
pdbfile = os.path.basename(filename)
InFile = '{0}.gro' .format(os.path.splitext(pdbfile)[0])
OutFile = '{0}_em.gro' .format(os.path.splitext(pdbfile)[0])
# Generating tpr file
gen_tpr(args, InFile)
# Running mdrun for minimization
command = 'mdrun -s topol.tpr -c {0}' .format(OutFile)
p = Popen(shlex.split(command), stdin=PIPE, stdout=PIPE, stderr=PIPE)
stdout, stderr = p.communicate()
if p.returncode != 0:
print stdout, stderr
sys.exit(1)
#Deleting files generated during minimization
files = os.listdir(os.getcwd())
for tfile in files:
if tfile.endswith(".trr") or tfile.endswith(".tpr") or tfile.endswith(".edr") or tfile.endswith(".log") or tfile.endswith(".mdp"):
os.remove(tfile)
os.remove(InFile)
def gen_tpr(args, infile):
try:
fmdp = open("em.mdp", 'w')
except IOError:
print "\nCould not write em.mdp\n"
raise
fmdp.write("constraints = none\n")
fmdp.write("integrator = steep\n")
fmdp.write("nsteps = {0}\n" .format(args.em_nsteps))
fmdp.write("emtol = {0}\n" .format(args.emtol))
fmdp.write("emstep = {0}\n" .format(args.emstep))
fmdp.write("nstxout = 1\n")
fmdp.write("nstcgsteep = 10\n")
fmdp.write("energygrps = system\n")
fmdp.write("nstcomm = 1\n")
fmdp.write("ns_type = {0}\n" .format(args.em_ns_type))
fmdp.write("rlist = {0}\n" .format(args.em_cutoff))
fmdp.write("rcoulomb = {0}\n" .format(args.em_cutoff))
fmdp.write("rvdw = {0}\n" .format(args.em_cutoff))
fmdp.write("Tcoupl = no\n")
fmdp.write("Pcoupl = no\n")
fmdp.write("gen_vel = no\n")
fmdp.close()
command = 'grompp -f em.mdp -c {0} -p topol.top -o topol.tpr' .format(infile)
p = Popen(shlex.split(command), stdin=PIPE, stdout=PIPE, stderr=PIPE)
stdout, stderr = p.communicate()
if p.returncode != 0:
print stdout, stderr
sys.exit(1)
def CD_2_CD1_for_ILE(infile, outfile):
#Opening input pdb file
try:
finPDB = open(infile, 'r')
except IOError:
print '\nCould not open File: {0}\n' .format(infile)
raise
PDB = []
for line in finPDB:
PDB.append(line)
finPDB.close()
for i in range(len(PDB)):
tmp = re.split('\s+', PDB[i])
if (tmp[0]=='ATOM') and (tmp[3]=='ILE') and (tmp[2] == 'CD'):
PDB[i] = PDB[i].replace('CD ', 'CD1')
#Writing output PDB file
try:
fout = open(outfile, 'w')
except IOError:
print '\nCould not open File: {0}\n' .format(filename)
raise
for line in PDB:
fout.write(line)
fout.close()
def recover_hydrogen_coords(residues, infile, outfile):
#Opening input pdb file
try:
finPDB = open(infile, 'r')
except IOError:
print '\nCould not open File: {0}\n' .format(infile)
raise
inPDB = []
for line in finPDB:
inPDB.append(line)
finPDB.close()
#Opening output pdb file
try:
foutPDB = open(outfile, 'r')
except IOError:
print '\nCould not open File: {0}\n' .format(outfile)
raise
outPDB = []
for line in foutPDB:
outPDB.append(line)
foutPDB.close()
os.remove(outfile)
mutateIN = False
mutateOUT = False
for i in range(len(inPDB)):
tmpIN = re.split('\s+', inPDB[i])
if (tmpIN[0] != 'ATOM'):
continue
#Checking for mutated residues
for residue in residues:
if (residue.pos == tmpIN[5]) and (residue.chain == tmpIN[4]):
mutateIN = True
break
if mutateIN:
mutateIN = False
continue
## Maximum atom difference between mutated and wild-type
natoms=len(residues)*20
if (i<=natoms):
back = 0
else:
back = i-natoms
####
for j in range(back, len(outPDB)):
tmpOUT = re.split('\s+', outPDB[j])
if (tmpOUT[0] !='ATOM'):
continue
#Checking for mutated residues
for residue in residues:
if (residue.pos == tmpOUT[5]) and (residue.chain == tmpOUT[4]):
mutateOUT = True
break
if mutateOUT:
mutateOUT = False
continue
#Replacing coordinates of hydrogen atoms
if ( (re.search(r"^H", tmpOUT[2]) != None) or (re.search(r"^\dH", tmpOUT[2]) != None)) and (tmpOUT[2] == tmpIN[2]) and (tmpOUT[4] == tmpIN[4]) and (tmpOUT[5] == tmpIN[5]):
outPDB[j] = inPDB[i]
break
#Writing output PDB file
try:
fout = open(outfile, 'w')
except IOError:
print '\nCould not open File: {0}\n' .format(outfile)
raise
for line in outPDB:
fout.write(line)
fout.close()
def CheckInput(parser):
args = parser.parse_args()
if args.residue_range==None:
print "ERROR: Enter -rr or --residue_range!!!\n\n"
parser.print_help()
sys.exit(1)
return args
def get_residue_list_for_scan(args):
line = args.residue_range
residues2mutate = []
chains = line.split(';')
for value in chains:
rlist = []
chain, all_resids = value.split(':')
resids = all_resids.split(',')
for resid in resids:
if re.search(r"-", resid) != None:
rstart, rend = resid.split('-')
rlist = rlist + range(int(rstart), int(rend)+1)
else:
rlist.append(resid)
for i in range(len(rlist)):
residues2mutate.append(resid2mutate(args.residue_name, str(rlist[i]), chain))
#for i in range(len(residues2mutate)):
#print residues2mutate[i].name, residues2mutate[i].pos, residues2mutate[i].chain
#sys.exit(0)
return residues2mutate
def update_prot_state(args, filename):
#Get residue list from command prompt
def get_resid_list(line):
resid, chain = [], []
csv = line.split(';')
for csv_value in csv:
ch, rr = csv_value.split(':')
rlist = rr.split(',')
for i in range(len(rlist)):
chain.append(ch)
resid.append(rlist[i])
return resid, chain
#Replace specific residue
def replace_residue(PDB, resid, chain, old, new):
for r in range(len(resid)):
for i in range(len(PDB)):
tmp = re.split('\s+', PDB[i])
if (tmp[0]=='ATOM') and (resid[r]==tmp[5]) and (chain[r]==tmp[4]):
PDB[i] = PDB[i].replace(old, new)
return PDB
#Replace HIS with HIE
def replace_HIS_to_HIE(PDB):
for i in range(len(PDB)):
tmp = re.split('\s+', PDB[i])
if (tmp[0]=='ATOM') and (tmp[3]=='HIS'):
PDB[i] = PDB[i].replace('HIS', 'HIE')
return PDB
#Opening input pdb file
try:
finPDB = open(filename, 'r')
except IOError:
print '\nCould not open File: {0}\n' .format(infile)
raise
PDB = []
for line in finPDB:
PDB.append(line)
finPDB.close()
os.remove(filename)
#Replacing HIS with HIP
if (args.hip != None):
resid, chain = get_resid_list(args.hip)
PDB = replace_residue(PDB, resid, chain, 'HIS', 'HIP')
#Replacing HIS with HID
if (args.hid != None):
resid, chain = get_resid_list(args.hid)
PDB = replace_residue(PDB, resid, chain, 'HIS', 'HID')
#Replacing HIS with HIE
replace_HIS_to_HIE(PDB)
#Replacing GLU with GLH
if (args.glh != None):
resid, chain = get_resid_list(args.glh)
PDB = replace_residue(PDB, resid, chain, 'GLU', 'GLH')
#Replacing ASP with ASH
if (args.ash != None):
resid, chain = get_resid_list(args.ash)
PDB = replace_residue(PDB, resid, chain, 'ASP', 'ASH')
#Replacing LYS with LYN
if (args.lyn != None):
resid, chain = get_resid_list(args.lyn)
PDB = replace_residue(PDB, resid, chain, 'LYS', 'LYN')
#Replacing CYS with CYM
if (args.cym != None):
resid, chain = get_resid_list(args.cym)
PDB = replace_residue(PDB, resid, chain, 'CYS', 'CYM')
#Writing output PDB file
try:
fout = open(filename, 'w')
except IOError:
print '\nCould not open File: {0}\n' .format(filename)
raise
for line in PDB:
fout.write(line)
fout.close()
def ParseOptions():
parser = argparse.ArgumentParser()
parser.add_argument("-drWT", "--dirWildType", help='Directory containing wild-type frames from MD trajectory', action="store", default='wildtype', metavar='wildtype')
parser.add_argument("-drMT", "--dirMutations", help='Directory containing mutated frames from MD trajectory', action="store", default='mutations', metavar='mutations')
parser.add_argument("-ff", "--force_field", help='Force-field number to choose in pdb2gmx. Default value is 6, which corrosponds to AMBER99SB-ILDN force-field in standard GROMACS package', action="store", default='6', metavar='6')
parser.add_argument("-rr", "--residue_range", help='Input residue range and/or list with respective chain for mutating residues to ALA. e.g. -rr \'A:27,45,67-70;B:40,43,78-80\'', action="store")
parser.add_argument("-rnm", "--residue_name", help='Target residue name for mutation. e.g. for Alanine scanning: -rnm \'ALA\'', action="store", default="ALA", metavar="ALA")
parser.add_argument("-nrch", "--no_orig_h_pos", help='Do not recover original coordinates of hydrogen atoms.', action="store_true", default=False)
parser.add_argument("-hip", "--hip", help='HIS residues need to be protonated as HIP. Usage: e.g. -hip \'A:23,34,56;B:32,56\'', action="store", default=None, metavar=None)
parser.add_argument("-hid", "--hid", help='HIS residues need to be as HID. Usage: e.g. -hid \'A:23,34,56;B:32,56\'', action="store", default=None, metavar=None)
parser.add_argument("-glh", "--glh", help='GLU residues need to be protonated as GLH. Usage: e.g. -glh \'A:23,34,56;B:32,56\'', action="store", default=None, metavar=None)
parser.add_argument("-ash", "--ash", help='ASP residues need to be protonated as ASH. Usage: e.g. -ash \'A:23,34,56;B:32,56\'', action="store", default=None, metavar=None)
parser.add_argument("-lyn", "--lyn", help='LYS residues need to be neutral as LYN. Usage: e.g. -lyn \'A:23,34,56;B:32,56\'', action="store", default=None, metavar=None)
parser.add_argument("-cym", "--cym", help='CYS residues need to be deprotonated as CYM. Usage: e.g. -cym \'A:23,34,56;B:32,56\'', action="store", default=None, metavar=None)
parser.add_argument("-emnsteps", "--em_nsteps", help='Number of minimization steps (nsteps)', action="store", default=10000, type=int, metavar=10000)
parser.add_argument("-emct", "--em_cutoff", help='Cut-off in nm during energy minimization', action="store", default=1.4, type=float, metavar=1.4)
parser.add_argument("-emtol", "--emtol", help='Tolerance value (emtol) for energy minimization', action="store", default=10, type=float, metavar=10)
parser.add_argument("-emstep", "--emstep", help='Minimization step (emstep) for energy minimization', action="store", default=0.01, type=float, metavar=0.01)
parser.add_argument("-emnstyp", "--em_ns_type", help='Cut-off scheme during energy minimization', action="store", default="grid", metavar="grid")
parser.add_argument("-t0", "--time0", help='Starting time (ps) of mutated trajectory', action="store", default=0, type=float, metavar=0)
parser.add_argument("-tstep", "--timestep", help='Time step (ps) between frames of mutated trajectory', action="store", default=500, type=float, metavar=500)
return parser
#def mutate(modelname, respos, restyp, chain):
def mutate_a_frame(modelname, outfile, residues):
def optimize(atmsel, sched):
#conjugate gradient
for step in sched:
step.optimize(atmsel, max_iterations=200, min_atom_shift=0.001)
#md
refine(atmsel)
cg = conjugate_gradients()
cg.optimize(atmsel, max_iterations=200, min_atom_shift=0.001)
#molecular dynamics
def refine(atmsel):
# at T=1000, max_atom_shift for 4fs is cca 0.15 A.
md = molecular_dynamics(cap_atom_shift=0.39, md_time_step=4.0, md_return='FINAL')
init_vel = True
for (its, equil, temps) in ((200, 20, (150.0, 250.0, 400.0, 700.0, 1000.0)), (200, 600, (1000.0, 800.0, 600.0, 500.0, 400.0, 300.0))):
for temp in temps:
md.optimize(atmsel, init_velocities=init_vel, temperature=temp, max_iterations=its, equilibrate=equil)
init_vel = False
#use homologs and dihedral library for dihedral angle restraints
def make_restraints(mdl1, aln):
rsr = mdl1.restraints
rsr.clear()
s = selection(mdl1)
for typ in ('stereo', 'phi-psi_binormal'):
rsr.make(s, restraint_type=typ, aln=aln, spline_on_site=True)
for typ in ('omega', 'chi1', 'chi2', 'chi3', 'chi4'):
rsr.make(s, restraint_type=typ+'_dihedral', spline_range=4.0,spline_dx=0.3, spline_min_points = 5, aln=aln, spline_on_site=True)
log.level(output=0, warnings=0, errors=1)
#log.verbose()
# Set a different value for rand_seed to get a different final model
env = environ(rand_seed=-49837)
env.io.hetatm = True
#soft sphere potential
env.edat.dynamic_sphere=False
#lennard-jones potential (more accurate)
env.edat.dynamic_lennard=True
env.edat.contact_shell = 4.0
env.edat.update_dynamic = 0.39
# Read customized topology file with phosphoserines (or standard one)
env.libs.topology.read(file='$(LIB)/top_heav.lib')
# Read customized CHARMM parameter library with phosphoserines (or standard one)
env.libs.parameters.read(file='$(LIB)/par.lib')
# Read the original PDB file and copy its sequence to the alignment array:
mdl1 = model(env, file=modelname)
ali = alignment(env)
ali.append_model(mdl1, atom_files=modelname, align_codes=modelname)
for residue in residues:
#set up the mutate residue selection segment
#s = selection(mdl1.chains[chain].residues[respos])
s = selection(mdl1.chains[residue.chain].residues[residue.pos])
#perform the mutate residue operation
#s.mutate(residue_type=restyp)
s.mutate(residue_type=residue.name)
#get two copies of the sequence. A modeller trick to get things set up
ali.append_model(mdl1, align_codes=modelname)
# Generate molecular topology for mutant
mdl1.clear_topology()
mdl1.generate_topology(ali[-1])
# Transfer all the coordinates you can from the template native structure
# to the mutant (this works even if the order of atoms in the native PDB
# file is not standard):
#here we are generating the model by reading the template coordinates
mdl1.transfer_xyz(ali)
# Build the remaining unknown coordinates
mdl1.build(initialize_xyz=False, build_method='INTERNAL_COORDINATES')
#yes model2 is the same file as model1. It's a modeller trick.
mdl2 = model(env, file=modelname)
#required to do a transfer_res_numb
#ali.append_model(mdl2, atom_files=modelname, align_codes=modelname)
#transfers from "model 2" to "model 1"
mdl1.res_num_from(mdl2,ali)
#It is usually necessary to write the mutated sequence out and read it in
#before proceeding, because not all sequence related information about MODEL
#is changed by this command (e.g., internal coordinates, charges, and atom
#types and radii are not updated).
mdl1.write(file=outfile+'.tmp')
mdl1.read(file=outfile+'.tmp')
#set up restraints before computing energy
#we do this a second time because the model has been written out and read in,
#clearing the previously set restraints
make_restraints(mdl1, ali)
#a non-bonded pair has to have at least as many selected atoms
mdl1.env.edat.nonbonded_sel_atoms=1
sched = autosched.loop.make_for_model(mdl1)
#only optimize the selected residue (in first pass, just atoms in selected
#residue, in second pass, include nonbonded neighboring atoms)
#set up the mutate residue selection segment
for residue in residues:
#s = selection(mdl1.chains[chain].residues[respos])
s = selection(mdl1.chains[residue.chain].residues[residue.pos])
mdl1.restraints.unpick_all()
mdl1.restraints.pick(s)
s.energy()
s.randomize_xyz(deviation=4.0)
mdl1.env.edat.nonbonded_sel_atoms=2
optimize(s, sched)
#feels environment (energy computed on pairs that have at least one member
#in the selected)
mdl1.env.edat.nonbonded_sel_atoms=1
optimize(s, sched)
s.energy()
#give a proper name
mdl1.write(file=outfile)
#delete the temporary file
os.remove(outfile+'.tmp')
class ChDir:
def __init__(self, newPath):
self.savedPath = os.getcwd()
os.chdir(newPath)
def __del__(self):
os.chdir(self.savedPath)
if __name__=="__main__":
main()
|
RashmiKumari/g_mmpbsa
|
tools/mutate_traj_tpr.py
|
Python
|
gpl-3.0
| 25,256
|
[
"CHARMM",
"Gromacs"
] |
203a47b423c565b46883687cc85c92af5bd0ce48e49f6182732224ae01e4fde7
|
#-------------------------------------------------------------------------------
# Cloud-COPASI
# Copyright (c) 2013 Edward Kent.
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the GNU Public License v3.0
# which accompanies this distribution, and is available at
# http://www.gnu.org/licenses/gpl.html
#-------------------------------------------------------------------------------
from cloud_copasi.web_interface.task_plugins.base import BaseTask, BaseTaskForm
from cloud_copasi.web_interface.models import Task, CondorJob, CondorPool
from cloud_copasi.web_interface.models import Subtask
from django.forms import Form
from django import forms
from cloud_copasi import settings
from copasi_model import PRCopasiModel # Use the task-specific copasi model in this directory
import os, math
import logging
from django.http.response import HttpResponse, HttpResponseRedirect
from django.core.urlresolvers import reverse_lazy
from cloud_copasi.condor import condor_spec
from string import Template
from cloud_copasi.web_interface.task_plugins import load_balancing
import re
from django.utils.timezone import now
log = logging.getLogger(__name__)
os.environ['HOME'] = settings.STORAGE_DIR #This needs to be set to a writable directory
import matplotlib
matplotlib.use('Agg') #Use this so matplotlib can be used on a headless server. Otherwise requires DISPLAY env variable to be set.
import matplotlib.pyplot as plt
from matplotlib.pyplot import annotate
internal_type = ('parameter_estimation_repeat', 'Parameter estimation repeat')
class TaskForm(BaseTaskForm):
#Any extra fields for the task submission form
parameter_estimation_data = forms.FileField(required=True, help_text='Select either a single data file, or if more than one data file is required, upload a .zip file containing multiple data files')
repeats = forms.IntegerField(required=True, min_value=1, help_text='The number of repeats to perform')
custom_report = forms.BooleanField(required=False, label='Use a custom report', help_text='Select this to use a custom report instead of the automatically generated one. If you select this, Condor-COPASI may not be able to process the output data, and the job will fail. However, you will still be able download the unprocessed results for manual processing. For output processing to work, you must create a report with custom fields added before the fields that would otherwise be automatically generated (Best Parameters, Best Value, CPU Time and Function Evaluations).')
skip_load_balancing_step = forms.BooleanField(required=False, help_text='Select this to skip the automatic load balancing step, and make the run time of each parallel job as short as possible. <span class="bold">Use with caution! This has the potential to overload the Condor system with huge numbers of parallel jobs.</span> Not applicable for some job types - see documentation for further details.')
class TaskPlugin(BaseTask):
subtasks = 2
def __init__(self, task):
self.use_load_balancing = not task.get_custom_field('skip_load_balancing_step')
self.data_files = task.get_custom_field('data_files')
if self.use_load_balancing:
self.subtasks = 4
else:
self.subtasks = 3
task.set_custom_field('repeats_per_job', 1)
super(TaskPlugin, self).__init__(task)
self.copasi_model = PRCopasiModel(os.path.join(self.task.directory, self.task.original_model))
self.repeats = self.task.get_custom_field('repeats')
self.custom_report = self.task.get_custom_field('custom_report')
repeats = self.repeats
def validate(self):
#TODO:Abstract this to a new COPASI class in this plugin package
return self.copasi_model.is_valid('PR')
def initialize_subtasks(self):
#Create new subtask objects, and save them
if self.use_load_balancing:
#Create the load balancing module
self.create_new_subtask('lb')
#The main module
self.create_new_subtask('main')
#And a subtask to process any results
self.create_new_subtask('process', local=True)
self.create_new_subtask('file')
def prepare_subtask(self, index):
"""Prepare the indexed subtask"""
if index == 1:
if self.use_load_balancing:
return self.process_lb_subtask()
else:
return self.process_main_subtask()
elif index == 2:
if self.use_load_balancing:
return self.process_main_subtask()
else:
return self.process_results_subtask()
elif index == 3:
if self.use_load_balancing:
return self.process_results_subtask()
else:
return self.create_optimal_file()
elif index == 4:
if not self.use_load_balancing:
raise Exception('No subtasks remaining')
else:
return self.create_optimal_file()
def process_lb_subtask(self):
#Prepare the necessary files to run the load balancing task on condor
filenames = self.copasi_model.prepare_pr_load_balancing()
#Construct the model files for this task
timeout = str(settings.IDEAL_JOB_TIME * 60)
if self.task.get_custom_field('rank'):
rank = str(self.task.get_custom_field('rank'))
else:
rank = ''
#model_filename = self.task.original_model
copasi_binary_dir, copasi_binary = os.path.split(settings.COPASI_LOCAL_BINARY)
#write the load balancing script
load_balacing_script_template = Template(load_balancing.load_balancing_string)
load_balancing_script_string = load_balacing_script_template.substitute(timeout=timeout,
copasi_binary='./' + copasi_binary,
copasi_file_1 = ('load_balancing_1.cps'),
copasi_file_10 = ('load_balancing_10.cps'),
copasi_file_100 = ('load_balancing_100.cps'),
copasi_file_1000 = ('load_balancing_1000.cps'),
)
load_balancing_script_filename = 'load_balance.sh'
load_balancing_file = open(os.path.join(self.task.directory, load_balancing_script_filename), 'w')
load_balancing_file.write(load_balancing_script_string)
load_balancing_file.close()
copasi_files_string = ''
for repeat in [1, 10, 100, 1000]:
copasi_files_string += 'load_balancing_%d.cps, ' % repeat
for data_file in self.data_files:
copasi_files_string += data_file + ', '
copasi_files_string = copasi_files_string.rstrip(', ') #Remove final comma
load_balancing_condor_template = Template(condor_spec.condor_string_header + condor_spec.load_balancing_spec_string)
load_balancing_condor_string = load_balancing_condor_template.substitute(pool_type=self.task.condor_pool.pool_type,
pool_address = self.task.condor_pool.address,
script = load_balancing_script_filename,
copasi_binary=settings.COPASI_LOCAL_BINARY,
arguments = str(timeout),
rank=rank,
copasi_files=copasi_files_string,
)
#write to the condor file
condor_file = open(os.path.join(self.task.directory, 'load_balancing.job'), 'w')
condor_file.write(load_balancing_condor_string)
condor_file.close()
subtask=self.get_subtask(1)
subtask.spec_file = 'load_balancing.job'
subtask.status = 'waiting'
subtask.set_custom_field('std_output_file', 'load_balancing.out')
subtask.set_custom_field('std_err_file', 'load_balancing.err')
subtask.set_custom_field('log_file', 'load_balancing.log')
subtask.set_custom_field('job_output', '')
subtask.set_custom_field('copasi_model', 'load_balancing.cps')
subtask.save()
return subtask
def process_main_subtask(self):
#Get the correct subtask
if self.use_load_balancing:
subtask = self.get_subtask(2)
lb_job = CondorJob.objects.get(subtask=self.get_subtask(1))
#Read the load_balancing.out file
output = open(os.path.join(subtask.task.directory, lb_job.std_output_file), 'r')
for line in output.readlines():
line = line.rstrip('\n')
if line != '':
repeats_str, time_str = line.split(' ')
try:
lb_repeats = int(repeats_str)
time = float(time_str)
except Exception, e:
log.exception(e)
lb_repeats = 1
time = settings.IDEAL_JOB_TIME
time_per_run = time / lb_repeats
#Work out the number of repeats per job. If this is more than the original number of repeats specified, then just use the original number
repeats_per_job = min(int(round(settings.IDEAL_JOB_TIME * 60 / time_per_run)), self.repeats)
if repeats_per_job < 1:
repeats_per_job = 1
else:
subtask = self.get_subtask(1)
repeats_per_job = 1
#If no load balancing step required:
model_files = self.copasi_model.prepare_pr_jobs(self.repeats, repeats_per_job, subtask.index, self.custom_report)
condor_pool = self.task.condor_pool
condor_job_file = self.copasi_model.prepare_pr_condor_job(condor_pool.pool_type,
condor_pool.address,
len(model_files),
subtask.index,
self.data_files,
rank='')
log.debug('Prepared copasi files %s'%model_files)
log.debug('Prepared condor job %s' %condor_job_file)
model_count = len(model_files)
self.task.set_custom_field('model_count', model_count)
subtask.spec_file = condor_job_file
subtask.status = 'ready'
subtask.save()
return subtask
def process_results_subtask(self):
if self.use_load_balancing:
main_subtask = self.get_subtask(2)
subtask = self.get_subtask(3)
else:
main_subtask = self.get_subtask(1)
subtask = self.get_subtask(2)
assert isinstance(subtask, Subtask)
subtask.start_time = now()
#Go through and collate the results
#This is reasonably computationally simple, so we run locally
directory = self.task.directory
main_jobs = CondorJob.objects.filter(subtask=main_subtask)
results_files = [job.job_output for job in main_jobs]
success = self.copasi_model.process_pr_results(results_files, self.custom_report)
if not success:
self.task.results_view = False
self.task.results_download = False
#Delete the final subtask
if self.use_load_balancing:
final_subtask_index = 4
else:
final_subtask_index = 3
final_subtask = Subtask.objects.filter(task=self.task).get(index=final_subtask_index)
log.debug('deleting model creation subtask since no results could be identified in output')
final_subtask.delete()
else:
self.task.results_view = True
self.task.results_download = True
self.task.save()
subtask.status = 'finished'
subtask.finish_time = now()
subtask.set_run_time(time_delta=subtask.finish_time - subtask.start_time)
subtask.save()
return subtask
def create_optimal_file(self):
"""Create a copasi file containing the best values
"""
if self.use_load_balancing:
subtask = self.get_subtask(4)
else:
subtask = self.get_subtask(3)
optimal_model = self.copasi_model.create_pr_best_value_model(subtask.index, custom_report=self.custom_report)
condor_pool = self.task.condor_pool
optimal_condor_job_file = self.copasi_model.prepare_pr_optimal_model_condor_job(condor_pool.pool_type,
condor_pool.address,
1,
subtask.index,
self.data_files,
rank='')
subtask.status = 'ready'
subtask.spec_file = optimal_condor_job_file
subtask.set_custom_field('job_output', '')
subtask.save()
return subtask
#===========================================================================
# Results view code, including a form to update the plot
#===========================================================================
def get_results_view_template_name(self, request):
"""Return a string with the HTML code to be used in the task results view page
"""
#Get the name of the page we're displaying. If not specified, assume main
page_name = request.GET.get('name', 'main')
if page_name == 'main':
return self.get_template_name('results_view')
else: return ''
def get_results_view_data(self, request):
#Get the name of the page we're displaying. If not specified, assume main
page_name = request.GET.get('name', 'main')
if page_name == 'main':
model = self.copasi_model
results = model.get_or_best_value()
best_value = results[0][1]
best_params = results[1:]
output = {'best_value' : best_value,
'best_params' : best_params,
}
return output
def get_results_download_data(self, request):
page_name = request.GET.get('name', 'main')
if page_name == 'main':
#Return the file results.txt
filename = os.path.join(self.task.directory, 'results.txt')
if not os.path.isfile(filename):
request.session['errors'] = [('Cannot Return Output', 'There was an internal error processing the results file')]
return HttpResponseRedirect(reverse_lazy('task_details', kwargs={'task_id':self.task.id}))
result_file = open(filename, 'r')
response = HttpResponse(result_file, content_type='text/tab-separated-values')
response['Content-Disposition'] = 'attachment; filename=%s_results.txt' % (self.task.name.replace(' ', '_'))
response['Content-Length'] = os.path.getsize(filename)
return response
elif page_name == 'raw_results':
filename = os.path.join(self.task.directory, 'raw_results.txt')
if not os.path.isfile(filename):
request.session['errors'] = [('Cannot Return Output', 'There was an internal error processing the results file')]
return HttpResponseRedirect(reverse_lazy('task_details', kwargs={'task_id':self.task.id}))
result_file = open(filename, 'r')
response = HttpResponse(result_file, content_type='text/tab-separated-values')
response['Content-Disposition'] = 'attachment; filename=%s_raw_results.txt' % (self.task.name.replace(' ', '_'))
response['Content-Length'] = os.path.getsize(filename)
return response
elif page_name == 'optimal_model':
subtask_count = Subtask.objects.filter(task=self.task).count()
optimal_filename = 'run_auto_copasi_%d.0.cps' % subtask_count
filename = os.path.join(self.task.directory, optimal_filename)
if not os.path.isfile(filename):
request.session['errors'] = [('Cannot Return Output', 'There was an internal error processing the results file')]
return HttpResponseRedirect(reverse_lazy('task_details', kwargs={'task_id':self.task.id}))
result_file = open(filename, 'r')
response = HttpResponse(result_file, content_type='application/xml')
response['Content-Disposition'] = 'attachment; filename=%s_optimal_model.cps' % (self.task.name.replace(' ', '_'))
response['Content-Length'] = os.path.getsize(filename)
return response
|
edkent/cloud-copasi
|
cloud_copasi/web_interface/task_plugins/plugins/parameter_estimation_repeat/plugin.py
|
Python
|
gpl-3.0
| 18,223
|
[
"COPASI"
] |
3cecd1f91eaa14974bf91d3f57b8b775590ee1312af95836805b3405a91cc6cf
|
import six
from fastidious.expressions import (Not, ZeroOrMoreExpr, LabeledExpr, SeqExpr,
ChoiceExpr, MaybeExpr, OneOrMoreExpr,
ExprProxi)
from fastidious.compiler.astutils import Visitor
from fastidious.compiler.action.base import Action
class LinkError(Exception):
pass
class ParserGraphVisitor(Visitor):
def __init__(self, vertical=False):
self.content = six.StringIO()
rankdir = "TB" if vertical else "LR"
self.content.write("""digraph astgraph {
node [fontsize=12, fontname="Courier", height=.1];
ranksep=.3;
rankdir=%s;
edge [arrowsize=.5, fontname="Courier"]
""" % rankdir)
self.current_id = 0
self.nodes = {}
self.bypasses = {}
self.missing_bypasses = set()
self.links = set()
def node_name(self, node):
if node not in self.nodes:
self.current_id += 1
self.nodes[node] = self.current_id
return "node_%s" % self.nodes[node]
def cluster_name(self, node):
if node not in self.nodes:
self.current_id += 1
self.nodes[node] = self.current_id
return "cluster_%s" % self.nodes[node]
def link(self, node1, node2, label=None):
if isinstance(node1, ExprProxi):
node1 = node1.proxied
return self.link(node1, node2, label)
elif isinstance(node2, ExprProxi):
node2 = node2.expr
return self.link(node1, node2, label)
elif isinstance(node1, (LabeledExpr, Not, OneOrMoreExpr)):
node1 = node1.expr
return self.link(node1, node2, label)
elif isinstance(node2, (LabeledExpr, Not, OneOrMoreExpr)):
node2 = node2.expr
return self.link(node1, node2, label)
elif isinstance(node1, ZeroOrMoreExpr):
node1 = node1.expr
try:
self.link(self.bypasses[node1], node2)
except KeyError:
self.missing_bypasses.add((node1, node2, None))
return self.link(node1, node2, label)
elif isinstance(node1, MaybeExpr):
node1 = node1.expr
try:
self.link(self.bypasses[node1], node2, label="?")
except KeyError:
self.missing_bypasses.add((node1, node2, "?"))
return self.link(node1, node2, label)
elif isinstance(node2, (MaybeExpr, ZeroOrMoreExpr)):
node2 = node2.expr
self.bypasses[node2] = node1
return self.link(node1, node2, label)
elif isinstance(node1, SeqExpr):
node1 = node1.exprs[-1]
return self.link(node1, node2, label)
elif isinstance(node2, SeqExpr):
node2 = node2.exprs[0]
return self.link(node1, node2, label)
elif isinstance(node1, ChoiceExpr):
for n in node1.exprs:
self.link(n, node2, label)
return
elif isinstance(node2, ChoiceExpr):
for n in node2.exprs:
self.link(node1, n, label)
return
if node1 not in self.nodes:
raise LinkError(node1)
if node2 not in self.nodes:
raise LinkError(node2)
if (node1, node2, label) in self.links:
return
if label is not None:
labelstr = ' [label="%s"]' % label
else:
labelstr = ""
self.content.write(
" %s -> %s%s\n" % (self.node_name(node1),
self.node_name(node2), labelstr))
self.links.add((node1, node2, label))
def visit_rule(self, node):
s = ' %s [label="%s", shape="rect", style=bold]\n' % (
self.node_name(node), node.name)
self.content.write(s)
self.visit(node.expr)
self.link(node, node.expr)
if isinstance(node.action, Action):
label = node.action.__string__()
else:
label = node.action if node.action else " "
dummy = object()
s = ' %s [label="%s", shape="box"]\n' % (self.node_name(dummy), label)
self.content.write(s)
self.link(node.expr, dummy)
for n1, n2, label in self.missing_bypasses:
self.link(self.bypasses[n1], n2, label)
self.missing_bypasses = set()
def visit_seqexpr(self, node):
lastnode = None
for expr in node.exprs:
self.visit(expr)
if lastnode is not None:
self.link(lastnode, expr)
lastnode = expr
def visit_choiceexpr(self, node):
for e in node.exprs:
self.visit(e)
def visit_labeledexpr(self, node):
self.content.write(""" subgraph %s {
label="%s";
color=grey;
""" % (self.cluster_name(node), node.name))
self.visit(node.expr)
self.content.write(" }\n")
def visit_oneormoreexpr(self, node):
self.visit(node.expr)
self.link(node.expr, node.expr, "+")
def visit_zeroormoreexpr(self, node):
self.visit(node.expr)
self.link(node.expr, node.expr, "*")
def generic_visit(self, node):
label = node.as_grammar()
label = label.replace("\\'", "'")
label = label.replace("\\", "\\\\")
label = label.replace('"', '\\"')
s = ' %s [label="%s"]\n' % (self.node_name(node), label)
self.content.write(s)
def visit_not(self, node):
self.content.write(""" subgraph %s {
label="!";
style="dashed";
""" % (self.cluster_name(node), ))
self.visit(node.expr)
self.content.write(" }\n")
def visit_maybeexpr(self, node):
self.visit(node.expr)
def generate_dot(self, nodes):
for node in nodes[::-1]:
self.visit(node)
self.content.write("}\n")
return self.content.getvalue()
class ParserGraphVisitorExpander(ParserGraphVisitor):
def __init__(self, start_node):
super(ParserGraphVisitorExpander, self).__init__(vertical=True)
self.start_node = start_node
def gendot(nodes, expand_nodes=False, start_node=None):
if not expand_nodes:
v = ParserGraphVisitor()
return v.generate_dot(nodes)
else:
return ParserGraphVisitorExpander(start_node).generate_dot(nodes)
|
lisael/fastidious
|
fastidious/compilers/gendot.py
|
Python
|
gpl-3.0
| 6,338
|
[
"VisIt"
] |
c97c38418e0ea783d604d2d875bc3a19c59244c03fa8a047a6013383edaf5d89
|
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Module for Google Connection and Authentication classes.
Information about setting up your Google OAUTH2 credentials:
For libcloud, there are two basic methods for authenticating to Google using
OAUTH2: Service Accounts and Client IDs for Installed Applications.
Both are initially set up from the Cloud Console Console -
https://cloud.google.com/console
Setting up Service Account authentication (note that you need the PyCrypto
package installed to use this):
- Go to the Console
- Go to your project and then to "APIs & auth" on the left
- Click on "Credentials"
- Click on "Create New Client ID..."
- Select "Service account" and click on "Create Client ID"
- Download the Private Key (should happen automatically). The key you download
is in JSON format.
- Move the .json file to a safe location.
- Optionally, you may choose to Generate a PKCS12 key from the Console.
It needs to be converted to the PEM format. Please note, the PKCS12 format
is deprecated and may be removed in a future release.
- Convert the key using OpenSSL (the default password is 'notasecret').
- Move the .pem file to a safe location.
- To Authenticate, you will need to pass the Service Account's "Email
address" in as the user_id and the path to the .pem file as the key.
Setting up Installed Application authentication:
- Go to the Console
- Go to your project and then to "APIs & auth" on the left
- Click on "Credentials"
- Select "Installed application" and "Other" then click on
"Create Client ID"
- To Authenticate, pass in the "Client ID" as the user_id and the "Client
secret" as the key
- The first time that you do this, the libcloud will give you a URL to
visit. Copy and paste the URL into a browser.
- When you go to the URL it will ask you to log in (if you aren't already)
and ask you if you want to allow the project access to your account.
- Click on Accept and you will be given a code.
- Paste that code at the prompt given to you by the Google libcloud
connection.
- At that point, a token & refresh token will be stored in your home
directory and will be used for authentication.
Please remember to secure your keys and access tokens.
"""
from __future__ import with_statement
try:
import simplejson as json
except ImportError:
import json
import base64
import errno
import time
import datetime
import logging
import os
import socket
import sys
from libcloud.utils.connection import get_response_object
from libcloud.utils.py3 import b, httplib, urlencode, urlparse, PY3
from libcloud.common.base import (ConnectionUserAndKey, JsonResponse,
PollingConnection)
from libcloud.common.types import (ProviderError,
LibcloudError)
try:
from Crypto.Hash import SHA256
from Crypto.PublicKey import RSA
from Crypto.Signature import PKCS1_v1_5
import Crypto.Random
Crypto.Random.atfork()
except ImportError:
# The pycrypto library is unavailable
SHA256 = None
RSA = None
PKCS1_v1_5 = None
UTC_TIMESTAMP_FORMAT = '%Y-%m-%dT%H:%M:%SZ'
def _utcnow():
"""
Mocked in libcloud.test.common.google.GoogleTestCase.
"""
return datetime.datetime.utcnow()
def _utc_timestamp(datetime_obj):
return datetime_obj.strftime(UTC_TIMESTAMP_FORMAT)
def _from_utc_timestamp(timestamp):
return datetime.datetime.strptime(timestamp, UTC_TIMESTAMP_FORMAT)
def _get_gce_metadata(path=''):
try:
url = 'http://metadata/computeMetadata/v1/' + path.lstrip('/')
headers = {'Metadata-Flavor': 'Google'}
response = get_response_object(url, headers=headers)
return response.status, '', response.body
except Exception as e:
return -1, str(e), None
class GoogleAuthError(LibcloudError):
"""Generic Error class for various authentication errors."""
def __init__(self, value):
self.value = value
def __repr__(self):
return repr(self.value)
class GoogleBaseError(ProviderError):
def __init__(self, value, http_code, code, driver=None):
self.code = code
super(GoogleBaseError, self).__init__(value, http_code, driver)
class InvalidRequestError(GoogleBaseError):
pass
class JsonParseError(GoogleBaseError):
pass
class ResourceNotFoundError(GoogleBaseError):
def __init__(self, value, http_code, code, driver=None):
self.code = code
if isinstance(value, dict) and 'message' in value and \
value['message'].count('/') == 1 and \
value['message'].count('projects/') == 1:
value['message'] = value['message'] + ". A missing project " \
"error may be an authentication issue. " \
"Please ensure your auth credentials match " \
"your project. "
super(GoogleBaseError, self).__init__(value, http_code, driver)
class QuotaExceededError(GoogleBaseError):
pass
class ResourceExistsError(GoogleBaseError):
pass
class ResourceInUseError(GoogleBaseError):
pass
class GoogleResponse(JsonResponse):
"""
Google Base Response class.
"""
def success(self):
"""
Determine if the request was successful.
For the Google response class, tag all responses as successful and
raise appropriate Exceptions from parse_body.
:return: C{True}
"""
return True
def _get_error(self, body):
"""
Get the error code and message from a JSON response.
Return just the first error if there are multiple errors.
:param body: The body of the JSON response dictionary
:type body: ``dict``
:return: Tuple containing error code and message
:rtype: ``tuple`` of ``str`` or ``int``
"""
if 'errors' in body['error']:
err = body['error']['errors'][0]
else:
err = body['error']
if 'code' in err:
code = err.get('code')
message = err.get('message')
else:
code = err.get('reason', None)
message = body.get('error_description', err)
return (code, message)
def parse_body(self):
"""
Parse the JSON response body, or raise exceptions as appropriate.
:return: JSON dictionary
:rtype: ``dict``
"""
if len(self.body) == 0 and not self.parse_zero_length_body:
return self.body
json_error = False
try:
body = json.loads(self.body)
except:
# If there is both a JSON parsing error and an unsuccessful http
# response (like a 404), we want to raise the http error and not
# the JSON one, so don't raise JsonParseError here.
body = self.body
json_error = True
valid_http_codes = [
httplib.OK,
httplib.CREATED,
httplib.ACCEPTED,
httplib.CONFLICT,
]
if self.status in valid_http_codes:
if json_error:
raise JsonParseError(body, self.status, None)
elif 'error' in body:
(code, message) = self._get_error(body)
if code == 'QUOTA_EXCEEDED':
raise QuotaExceededError(message, self.status, code)
elif code == 'RESOURCE_ALREADY_EXISTS':
raise ResourceExistsError(message, self.status, code)
elif code == 'alreadyExists':
raise ResourceExistsError(message, self.status, code)
elif code.startswith('RESOURCE_IN_USE'):
raise ResourceInUseError(message, self.status, code)
else:
raise GoogleBaseError(message, self.status, code)
else:
return body
elif self.status == httplib.NOT_FOUND:
if (not json_error) and ('error' in body):
(code, message) = self._get_error(body)
else:
message = body
code = None
raise ResourceNotFoundError(message, self.status, code)
elif self.status == httplib.BAD_REQUEST:
if (not json_error) and ('error' in body):
(code, message) = self._get_error(body)
else:
message = body
code = None
raise InvalidRequestError(message, self.status, code)
else:
if (not json_error) and ('error' in body):
(code, message) = self._get_error(body)
else:
message = body
code = None
raise GoogleBaseError(message, self.status, code)
class GoogleBaseDriver(object):
name = "Google API"
class GoogleBaseAuthConnection(ConnectionUserAndKey):
"""
Base class for Google Authentication. Should be subclassed for specific
types of authentication.
"""
driver = GoogleBaseDriver
responseCls = GoogleResponse
name = 'Google Auth'
host = 'accounts.google.com'
auth_path = '/o/oauth2/auth'
def __init__(self, user_id, key=None, scopes=None,
redirect_uri='urn:ietf:wg:oauth:2.0:oob',
login_hint=None, **kwargs):
"""
:param user_id: The email address (for service accounts) or Client ID
(for installed apps) to be used for authentication.
:type user_id: ``str``
:param key: The RSA Key (for service accounts) or file path containing
key or Client Secret (for installed apps) to be used for
authentication.
:type key: ``str``
:param scopes: A list of urls defining the scope of authentication
to grant.
:type scopes: ``list``
:keyword redirect_uri: The Redirect URI for the authentication
request. See Google OAUTH2 documentation for
more info.
:type redirect_uri: ``str``
:keyword login_hint: Login hint for authentication request. Useful
for Installed Application authentication.
:type login_hint: ``str``
"""
scopes = scopes or []
self.scopes = " ".join(scopes)
self.redirect_uri = redirect_uri
self.login_hint = login_hint
super(GoogleBaseAuthConnection, self).__init__(user_id, key, **kwargs)
def add_default_headers(self, headers):
headers['Content-Type'] = "application/x-www-form-urlencoded"
headers['Host'] = self.host
return headers
def _token_request(self, request_body):
"""
Return an updated token from a token request body.
:param request_body: A dictionary of values to send in the body of the
token request.
:type request_body: ``dict``
:return: A dictionary with updated token information
:rtype: ``dict``
"""
data = urlencode(request_body)
try:
response = self.request('/o/oauth2/token', method='POST',
data=data)
except AttributeError:
raise GoogleAuthError('Invalid authorization response, please '
'check your credentials and time drift.')
token_info = response.object
if 'expires_in' in token_info:
expire_time = _utcnow() + datetime.timedelta(
seconds=token_info['expires_in'])
token_info['expire_time'] = _utc_timestamp(expire_time)
return token_info
def refresh_token(self, token_info):
"""
Refresh the current token.
Fetch an updated refresh token from internal metadata service.
:param token_info: Dictionary containing token information.
(Not used, but here for compatibility)
:type token_info: ``dict``
:return: A dictionary containing updated token information.
:rtype: ``dict``
"""
return self.get_new_token()
class GoogleInstalledAppAuthConnection(GoogleBaseAuthConnection):
"""Authentication connection for "Installed Application" authentication."""
def get_code(self):
"""
Give the user a URL that they can visit to authenticate and obtain a
code. This method will ask for that code that the user can paste in.
Mocked in libcloud.test.common.google.GoogleTestCase.
:return: Code supplied by the user after authenticating
:rtype: ``str``
"""
auth_params = {'response_type': 'code',
'client_id': self.user_id,
'redirect_uri': self.redirect_uri,
'scope': self.scopes,
'state': 'Libcloud Request'}
if self.login_hint:
auth_params['login_hint'] = self.login_hint
data = urlencode(auth_params)
url = 'https://%s%s?%s' % (self.host, self.auth_path, data)
print('\nPlease Go to the following URL and sign in:')
print(url)
if PY3:
code = input('Enter Code: ')
else:
code = raw_input('Enter Code: ')
return code
def get_new_token(self):
"""
Get a new token. Generally used when no previous token exists or there
is no refresh token
:return: Dictionary containing token information
:rtype: ``dict``
"""
# Ask the user for a code
code = self.get_code()
token_request = {'code': code,
'client_id': self.user_id,
'client_secret': self.key,
'redirect_uri': self.redirect_uri,
'grant_type': 'authorization_code'}
return self._token_request(token_request)
def refresh_token(self, token_info):
"""
Use the refresh token supplied in the token info to get a new token.
:param token_info: Dictionary containing current token information
:type token_info: ``dict``
:return: A dictionary containing updated token information.
:rtype: ``dict``
"""
if 'refresh_token' not in token_info:
return self.get_new_token()
refresh_request = {'refresh_token': token_info['refresh_token'],
'client_id': self.user_id,
'client_secret': self.key,
'grant_type': 'refresh_token'}
new_token = self._token_request(refresh_request)
if 'refresh_token' not in new_token:
new_token['refresh_token'] = token_info['refresh_token']
return new_token
class GoogleServiceAcctAuthConnection(GoogleBaseAuthConnection):
"""Authentication class for "Service Account" authentication."""
def __init__(self, user_id, key, *args, **kwargs):
"""
Check to see if PyCrypto is available, and convert key file path into a
key string if the key is in a file.
:param user_id: Email address to be used for Service Account
authentication.
:type user_id: ``str``
:param key: The RSA Key or path to file containing the key.
:type key: ``str``
"""
if SHA256 is None:
raise GoogleAuthError('PyCrypto library required for '
'Service Account Authentication.')
# Check to see if 'key' is a file and read the file if it is.
if key.find("PRIVATE KEY---") == -1:
# key is a file
keypath = os.path.expanduser(key)
is_file_path = os.path.exists(keypath) and os.path.isfile(keypath)
if not is_file_path:
raise ValueError("Missing (or not readable) key "
"file: '%s'" % key)
with open(keypath, 'r') as f:
contents = f.read()
try:
key = json.loads(contents)
key = key['private_key']
except ValueError:
key = contents
logger = logging.getLogger(__name__)
logger.warn('%s not in JSON format. This format is '
'deprecated. Please download a JSON key '
'from the Cloud Console.' % keypath)
super(GoogleServiceAcctAuthConnection, self).__init__(
user_id, key, *args, **kwargs)
def get_new_token(self):
"""
Get a new token using the email address and RSA Key.
:return: Dictionary containing token information
:rtype: ``dict``
"""
# The header is always the same
header = {'alg': 'RS256', 'typ': 'JWT'}
header_enc = base64.urlsafe_b64encode(b(json.dumps(header)))
# Construct a claim set
claim_set = {'iss': self.user_id,
'scope': self.scopes,
'aud': 'https://accounts.google.com/o/oauth2/token',
'exp': int(time.time()) + 3600,
'iat': int(time.time())}
claim_set_enc = base64.urlsafe_b64encode(b(json.dumps(claim_set)))
# The message contains both the header and claim set
message = b'.'.join((header_enc, claim_set_enc))
# Then the message is signed using the key supplied
key = RSA.importKey(self.key)
hash_func = SHA256.new(message)
signer = PKCS1_v1_5.new(key)
signature = base64.urlsafe_b64encode(signer.sign(hash_func))
# Finally the message and signature are sent to get a token
jwt = b'.'.join((message, signature))
request = {'grant_type': 'urn:ietf:params:oauth:grant-type:jwt-bearer',
'assertion': jwt}
return self._token_request(request)
class GoogleGCEServiceAcctAuthConnection(GoogleBaseAuthConnection):
"""Authentication class for self-authentication when used with a GCE
instance that supports serviceAccounts.
"""
def get_new_token(self):
"""
Get a new token from the internal metadata service.
:return: Dictionary containing token information
:rtype: ``dict``
"""
path = '/instance/service-accounts/default/token'
http_code, http_reason, token_info = _get_gce_metadata(path)
if http_code == httplib.NOT_FOUND:
raise ValueError("Service Accounts are not enabled for this "
"GCE instance.")
if http_code != httplib.OK:
raise ValueError("Internal GCE Authorization failed: "
"'%s'" % str(http_reason))
token_info = json.loads(token_info)
if 'expires_in' in token_info:
expire_time = _utcnow() + datetime.timedelta(
seconds=token_info['expires_in'])
token_info['expire_time'] = _utc_timestamp(expire_time)
return token_info
class GoogleAuthType(object):
"""
SA (Service Account),
IA (Installed Application),
GCE (Auth from a GCE instance with service account enabled)
GCS_S3 (Cloud Storage S3 interoperability authentication)
"""
SA = 'SA'
IA = 'IA'
GCE = 'GCE'
GCS_S3 = 'GCS_S3'
ALL_TYPES = [SA, IA, GCE, GCS_S3]
OAUTH2_TYPES = [SA, IA, GCE]
@classmethod
def guess_type(cls, user_id):
if cls._is_sa(user_id):
return cls.SA
elif cls._is_gce():
return cls.GCE
elif cls._is_gcs_s3(user_id):
return cls.GCS_S3
else:
return cls.IA
@classmethod
def is_oauth2(cls, auth_type):
return auth_type in cls.OAUTH2_TYPES
@staticmethod
def _is_gce():
"""
Checks if we can access the GCE metadata server.
Mocked in libcloud.test.common.google.GoogleTestCase.
"""
http_code, http_reason, body = _get_gce_metadata()
if http_code == httplib.OK and body:
return True
return False
@staticmethod
def _is_gcs_s3(user_id):
"""
Checks S3 key format: 20 alphanumeric chars starting with GOOG.
"""
return len(user_id) == 20 and user_id.startswith('GOOG')
@staticmethod
def _is_sa(user_id):
return user_id.endswith('.gserviceaccount.com')
class GoogleOAuth2Credential(object):
default_credential_file = '~/.google_libcloud_auth'
def __init__(self, user_id, key, auth_type=None, credential_file=None,
scopes=None, **kwargs):
self.auth_type = auth_type or GoogleAuthType.guess_type(user_id)
if self.auth_type not in GoogleAuthType.ALL_TYPES:
raise GoogleAuthError('Invalid auth type: %s' % self.auth_type)
if not GoogleAuthType.is_oauth2(self.auth_type):
raise GoogleAuthError(('Auth type %s cannot be used with OAuth2' %
self.auth_type))
self.user_id = user_id
self.key = key
default_credential_file = '.'.join([self.default_credential_file,
user_id])
self.credential_file = credential_file or default_credential_file
# Default scopes to read/write for compute, storage, and dns.
self.scopes = scopes or [
'https://www.googleapis.com/auth/compute',
'https://www.googleapis.com/auth/devstorage.full_control',
'https://www.googleapis.com/auth/ndev.clouddns.readwrite',
]
self.token = self._get_token_from_file()
if self.auth_type == GoogleAuthType.GCE:
self.oauth2_conn = GoogleGCEServiceAcctAuthConnection(
self.user_id, self.scopes, **kwargs)
elif self.auth_type == GoogleAuthType.SA:
self.oauth2_conn = GoogleServiceAcctAuthConnection(
self.user_id, self.key, self.scopes, **kwargs)
elif self.auth_type == GoogleAuthType.IA:
self.oauth2_conn = GoogleInstalledAppAuthConnection(
self.user_id, self.key, self.scopes, **kwargs)
else:
raise GoogleAuthError('Invalid auth_type: %s' %
str(self.auth_type))
if self.token is None:
self.token = self.oauth2_conn.get_new_token()
self._write_token_to_file()
@property
def access_token(self):
if self.token_expire_utc_datetime < _utcnow():
self._refresh_token()
return self.token['access_token']
@property
def token_expire_utc_datetime(self):
return _from_utc_timestamp(self.token['expire_time'])
def _refresh_token(self):
self.token = self.oauth2_conn.refresh_token(self.token)
self._write_token_to_file()
def _get_token_from_file(self):
"""
Read credential file and return token information.
Mocked in libcloud.test.common.google.GoogleTestCase.
:return: Token information dictionary, or None
:rtype: ``dict`` or ``None``
"""
token = None
filename = os.path.realpath(os.path.expanduser(self.credential_file))
try:
with open(filename, 'r') as f:
data = f.read()
token = json.loads(data)
except IOError:
pass
return token
def _write_token_to_file(self):
"""
Write token to credential file.
Mocked in libcloud.test.common.google.GoogleTestCase.
"""
filename = os.path.realpath(os.path.expanduser(self.credential_file))
data = json.dumps(self.token)
with os.fdopen(os.open(filename, os.O_CREAT | os.O_WRONLY,
int('600', 8)), 'w') as f:
f.write(data)
class GoogleBaseConnection(ConnectionUserAndKey, PollingConnection):
"""Base connection class for interacting with Google APIs."""
driver = GoogleBaseDriver
responseCls = GoogleResponse
host = 'www.googleapis.com'
poll_interval = 2.0
timeout = 180
def __init__(self, user_id, key=None, auth_type=None,
credential_file=None, scopes=None, **kwargs):
"""
Determine authentication type, set up appropriate authentication
connection and get initial authentication information.
:param user_id: The email address (for service accounts) or Client ID
(for installed apps) to be used for authentication.
:type user_id: ``str``
:param key: The RSA Key (for service accounts) or file path containing
key or Client Secret (for installed apps) to be used for
authentication.
:type key: ``str``
:keyword auth_type: See GoogleAuthType class for list and description
of accepted values.
If not supplied, auth_type will be guessed based
on value of user_id or if the code is running
on a GCE instance.
:type auth_type: ``str``
:keyword credential_file: Path to file for caching authentication
information.
:type credential_file: ``str``
:keyword scopes: List of OAuth2 scope URLs. The empty default sets
read/write access to Compute, Storage, and DNS.
:type scopes: ``list``
"""
super(GoogleBaseConnection, self).__init__(user_id, key, **kwargs)
self.oauth2_credential = GoogleOAuth2Credential(
user_id, key, auth_type, credential_file, scopes, **kwargs)
python_ver = '%s.%s.%s' % (sys.version_info[0], sys.version_info[1],
sys.version_info[2])
ver_platform = 'Python %s/%s' % (python_ver, sys.platform)
self.user_agent_append(ver_platform)
def add_default_headers(self, headers):
"""
@inherits: :class:`Connection.add_default_headers`
"""
headers['Content-Type'] = 'application/json'
headers['Host'] = self.host
return headers
def pre_connect_hook(self, params, headers):
"""
Check to make sure that token hasn't expired. If it has, get an
updated token. Also, add the token to the headers.
@inherits: :class:`Connection.pre_connect_hook`
"""
headers['Authorization'] = ('Bearer ' +
self.oauth2_credential.access_token)
return params, headers
def encode_data(self, data):
"""Encode data to JSON"""
return json.dumps(data)
def request(self, *args, **kwargs):
"""
@inherits: :class:`Connection.request`
"""
# Adds some retry logic for the occasional
# "Connection Reset by peer" error.
retries = 4
tries = 0
while tries < (retries - 1):
try:
return super(GoogleBaseConnection, self).request(
*args, **kwargs)
except socket.error:
e = sys.exc_info()[1]
if e.errno == errno.ECONNRESET:
tries = tries + 1
else:
raise e
# One more time, then give up.
return super(GoogleBaseConnection, self).request(*args, **kwargs)
def has_completed(self, response):
"""
Determine if operation has completed based on response.
:param response: JSON response
:type response: I{responseCls}
:return: True if complete, False otherwise
:rtype: ``bool``
"""
if response.object['status'] == 'DONE':
return True
else:
return False
def get_poll_request_kwargs(self, response, context, request_kwargs):
"""
@inherits: :class:`PollingConnection.get_poll_request_kwargs`
"""
return {'action': response.object['selfLink']}
def morph_action_hook(self, action):
"""
Update action to correct request path.
In many places, the Google API returns a full URL to a resource.
This will strip the scheme and host off of the path and just return
the request. Otherwise, it will prepend the base request_path to
the action.
:param action: The action to be called in the http request
:type action: ``str``
:return: The modified request based on the action
:rtype: ``str``
"""
if action.startswith('https://'):
u = urlparse.urlsplit(action)
request = urlparse.urlunsplit(('', '', u[2], u[3], u[4]))
else:
request = self.request_path + action
return request
|
cryptickp/libcloud
|
libcloud/common/google.py
|
Python
|
apache-2.0
| 29,712
|
[
"VisIt"
] |
8d9eaa6125f26a0705125de05badb9a5f30539a67283c9de113a5a8474eb717d
|
# coding: utf-8
"""Wrappers for ABINIT main executables"""
from __future__ import unicode_literals, division, print_function
import os
from subprocess import Popen, PIPE
from monty.os.path import which
from pymatgen.util.string_utils import list_strings
from six.moves import map, cStringIO
import logging
logger = logging.getLogger(__name__)
__author__ = "Matteo Giantomassi"
__copyright__ = "Copyright 2013, The Materials Project"
__version__ = "0.1"
__maintainer__ = "Matteo Giantomassi"
__email__ = "gmatteo at gmail.com"
__status__ = "Development"
__date__ = "$Feb 21, 2013M$"
__all__ = [
"Mrgscr",
"Mrggkk",
"Mrgddb",
]
class ExecError(Exception):
"""Error class raised by :class`ExecWrapper`"""
class ExecWrapper(object):
"""This class runs an executable in a subprocess."""
Error = ExecError
def __init__(self, executable=None, verbose=0):
"""
Args:
executable: path to the executable.
verbose: Verbosity level.
"""
if executable is None:
executable = self.name
self.executable = which(executable)
self.verbose = int(verbose)
if self.executable is None:
raise self.Error("Cannot find %s in $PATH\n Use export PATH=/dir_with_exec:$PATH" % executable)
assert os.path.basename(self.executable) == self.name
def __str__(self):
return "%s" % self.executable
def set_mpi_runner(self, mpi_runner="mpirun"):
# TODO better treatment of mpirunner syntax.
self._mpi_runner = mpi_runner
@property
def mpi_runner(self):
try:
return self._mpi_runner
except AttributeError:
return ""
@property
def name(self):
return self._name
def execute(self, cwd=None):
# Try to execute binary without and with mpirun.
try:
self._execute(cwd=cwd, with_mpirun=True)
except self.Error:
self._execute(cwd=cwd, with_mpirun=False)
def _execute(self, cwd=None, with_mpirun=False):
"""
Execute the executable in a subprocess.
"""
args = [self.executable, "<", self.stdin_fname, ">", self.stdout_fname, "2>", self.stderr_fname]
if self.mpi_runner and with_mpirun:
args.insert(0, self.mpi_runner)
self.cmd_str = " ".join(args)
p = Popen(self.cmd_str, shell=True, stdout=PIPE, stderr=PIPE, cwd=cwd)
self.stdout_data, self.stderr_data = p.communicate()
self.returncode = p.returncode
if self.returncode != 0:
with open(self.stdout_fname, "r") as out, open(self.stderr_fname, "r") as err:
self.stdout_data = out.read()
self.stderr_data = err.read()
if self.verbose > 3:
print("*** stdout: ***\n", self.stdout_data)
print("*** stderr ***\n", self.stderr_data)
raise self.Error("%s returned %s\n cmd_str: %s" % (self, self.returncode, self.cmd_str))
class Mrgscr(ExecWrapper):
_name = "mrgscr"
def merge_qpoints(self, files_to_merge, out_prefix, cwd=None):
"""
Execute mrgscr in a subprocess to merge files_to_merge. Produce new file with prefix out_prefix
If cwd is not None, the child's current directory will be changed to cwd before it is executed.
"""
# We work with absolute paths.
files_to_merge = [os.path.abspath(s) for s in list_strings(files_to_merge)]
nfiles = len(files_to_merge)
if self.verbose:
print("Will merge %d files with output_prefix %s" % (nfiles, out_prefix))
for (i, f) in enumerate(files_to_merge):
print(" [%d] %s" % (i, f))
if nfiles == 1:
raise self.Error("merge_qpoints does not support nfiles == 1")
self.stdin_fname, self.stdout_fname, self.stderr_fname = (
"mrgscr.stdin", "mrgscr.stdout", "mrgscr.stderr")
if cwd is not None:
self.stdin_fname, self.stdout_fname, self.stderr_fname = \
map(os.path.join, 3 * [cwd], [self.stdin_fname, self.stdout_fname, self.stderr_fname])
inp = cStringIO()
inp.write(str(nfiles) + "\n") # Number of files to merge.
inp.write(out_prefix + "\n") # Prefix for the final output file:
for filename in files_to_merge:
inp.write(filename + "\n") # List with the files to merge.
inp.write("1\n") # Option for merging q-points.
self.stdin_data = [s for s in inp.getvalue()]
with open(self.stdin_fname, "w") as fh:
fh.writelines(self.stdin_data)
self.execute(cwd=cwd)
class Mrggkk(ExecWrapper):
_name = "mrggkk"
def merge(self, gswfk_file, dfpt_files, gkk_files, out_gkk, binascii=0, cwd=None):
"""
Merge GGK files, return the absolute path of the new database.
Args:
gswfk_file: Ground-state WFK filename
dfpt_files: List of 1WFK files to merge.
gkk_files: List of GKK files to merge.
out_gkk: Name of the output GKK file
binascii: Integer flat. 0 --> binary output, 1 --> ascii formatted output
cwd: Directory where the subprocess will be executed.
"""
raise NotImplementedError("This method should be tested")
out_gkk = out_gkk if cwd is None else os.path.join(os.path.abspath(cwd), out_gkk)
# We work with absolute paths.
gswfk_file = absath(gswfk_file)
dfpt_files = [os.path.abspath(s) for s in list_strings(dfpt_files)]
gkk_files = [os.path.abspath(s) for s in list_strings(gkk_files)]
print("Will merge %d 1WF files, %d GKK file in output %s" %
(len(dfpt_nfiles), len_gkk_files, out_gkk))
if self.verbose:
for i, f in enumerate(dfpt_files): print(" [%d] 1WF %s" % (i, f))
for i, f in enumerate(gkk_files): print(" [%d] GKK %s" % (i, f))
self.stdin_fname, self.stdout_fname, self.stderr_fname = (
"mrggkk.stdin", "mrggkk.stdout", "mrggkk.stderr")
if cwd is not None:
self.stdin_fname, self.stdout_fname, self.stderr_fname = \
map(os.path.join, 3 * [cwd], [self.stdin_fname, self.stdout_fname, self.stderr_fname])
inp = cStringIO()
inp.write(out_gkk + "\n") # Name of the output file
inp.write(str(binascii) + "\n") # Integer flag: 0 --> binary output, 1 --> ascii formatted output
inp.write(gswfk_file + "\n") # Name of the groud state wavefunction file WF
#dims = len(dfpt_files, gkk_files, ?)
dims = " ".join([str(d) for d in dims])
inp.write(dims + "\n") # Number of 1WF, of GKK files, and number of 1WF files in all the GKK files
# Names of the 1WF files...
for fname in dfpt_files:
inp.write(fname + "\n")
# Names of the GKK files...
for fname in gkk_files:
inp.write(fname + "\n")
self.stdin_data = [s for s in inp.getvalue()]
with open(self.stdin_fname, "w") as fh:
fh.writelines(self.stdin_data)
self.execute(cwd=cwd)
return out_gkk
class Mrgddb(ExecWrapper):
_name = "mrgddb"
def merge(self, ddb_files, out_ddb, description, cwd=None):
"""Merge DDB file, return the absolute path of the new database."""
# We work with absolute paths.
ddb_files = [os.path.abspath(s) for s in list_strings(ddb_files)]
out_ddb = out_ddb if cwd is None else os.path.join(os.path.abspath(cwd), out_ddb)
print("Will merge %d files into output DDB %s" % (len(ddb_files), out_ddb))
if self.verbose:
for i, f in enumerate(ddb_files):
print(" [%d] %s" % (i, f))
# Handle the case of a single file since mrgddb uses 1 to denote GS files!
if len(ddb_files) == 1:
with open(ddb_files[0], "r") as inh, open(out_ddb, "w") as out:
for line in inh:
out.write(line)
return out_ddb
self.stdin_fname, self.stdout_fname, self.stderr_fname = "mrgddb.stdin", "mrgddb.stdout", "mrgddb.stderr"
if cwd is not None:
self.stdin_fname, self.stdout_fname, self.stderr_fname = \
map(os.path.join, 3 * [cwd], [self.stdin_fname, self.stdout_fname, self.stderr_fname])
inp = cStringIO()
inp.write(out_ddb + "\n") # Name of the output file.
inp.write(str(description) + "\n") # Description.
inp.write(str(len(ddb_files)) + "\n") # Number of input DDBs.
# Names of the DDB files.
for fname in ddb_files:
inp.write(fname + "\n")
self.stdin_data = [s for s in inp.getvalue()]
with open(self.stdin_fname, "w") as fh:
fh.writelines(self.stdin_data)
self.execute(cwd=cwd)
return out_ddb
|
Dioptas/pymatgen
|
pymatgen/io/abinitio/wrappers.py
|
Python
|
mit
| 9,009
|
[
"ABINIT",
"pymatgen"
] |
fbb5697970d966388a888180878495a3e7b14751fa780d6801a0777d42f7979b
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# A copy of the GNU General Public License is available at
# http://www.gnu.org/licenses/gpl-3.0.html
"""Find homologous proteins and extract them in a multifasta."""
from __future__ import print_function
import ConfigParser
import argparse
import sys
import os
import subprocess
import re
import bisect
import multiprocessing as mp
import time
import csv
__author__ = "Amine Ghozlane"
__copyright__ = "Copyright 2014, INRA"
__credits__ = ["Amine Ghozlane"]
__license__ = "GPL"
__version__ = "1.0.0"
__maintainer__ = "Amine Ghozlane"
__email__ = "[email protected]"
__status__ = "Developpement"
class Inconfig:
def __init__(self, config_file, results):
"""Instantiate Inconfig object
Arguments:
config_file: Configuration file path
results: Result path
"""
self.hdict = {}
self.inconfig_file = '{0}hconfig.cfg'.format(results)
self.config = ConfigParser.RawConfigParser()
if(config_file is not None):
self.inconfig_file = config_file
self.readconfig()
elif(os.path.isfile(self.inconfig_file)):
self.readconfig()
else:
self.writeconfig()
self.readconfig()
def readconfig(self):
"""Read config data
"""
# If config parser empty
if(not os.path.isfile(self.inconfig_file)):
self.writeconfig()
# Read config file
self.config.read(self.inconfig_file)
# Get parameter value
self.hdict["ssearch"] = self.config.get('Homology_config','ssearch')
self.hdict["jackhmmer"] = self.config.get('Homology_config',
'jackhmmer')
#self.hdict["hhsearch"] = self.config.get('Homology_config', 'hhsearch')
self.hdict["psiblast"] = self.config.get('Homology_config', 'psiblast')
self.hdict["blastp"] = self.config.get('Homology_config', 'blastp')
self.hdict["hmmsearch"] = self.config.get('Homology_config', 'hmmsearch')
self.hdict["hmmscan"] = self.config.get('Homology_config', 'hmmscan')
self.hdict["clustalo"] = self.config.get('Alignment_config', 'clustalo')
def writeconfig(self):
"""Write Inconfig config
"""
self.config.add_section('Homology_config')
# -O option is not working...
self.config.set('Homology_config', 'ssearch',
"%path_softssearch36 %query %database "
"-E %e_value -p -m 8 -T %proc -s BP62 > %output")
self.config.set('Homology_config', 'jackhmmer',
"%path_softjackhmmer --tblout %output --cpu %proc "
"-E %e_value %query %database > log_jackhmmer.txt")
#self.config.set('Homology_config', 'hhsearch', "nothing")
self.config.set('Homology_config', 'psiblast', "%path_softpsiblast "
"-query %query -db %database -out %output "
"-evalue %e_value -outfmt 6 -num_threads %proc "
"-num_iterations 3")
self.config.set('Homology_config', 'blastp', "%path_softblastp "
"-query %query -db %database -out %output "
"-evalue %e_value -outfmt 6 -num_threads %proc "
"-max_target_seqs 1")
self.config.set('Homology_config', 'hmmsearch', "%path_softhmmsearch "
" --tblout %output --cpu %proc -E %e_value "
"%hmm_db %query > log_hmmsearch.txt")
self.config.set('Homology_config', 'hmmscan', "%path_softhmmscan "
" --tblout %output --cpu %proc -E %e_value "
"%hmm_db %query > log_hmmscan.txt")
self.config.add_section('Alignment_config')
self.config.set('Alignment_config', 'clustalo',
"%path_softclustalo -i %multifasta -o %output "
"--auto -t Protein --outfmt=fa")
# Write data
try:
# Writing our configuration file to 'example.cfg'
with open(self.inconfig_file, 'wt') as configfile:
self.config.write(configfile)
except IOError:
sys.exit("Error : cannot open file {0}".format(self.inconfig_file))
def isfile(path):
"""Check if path is an existing file.
Arguments:
path: Path to the file
"""
# from Jonathan Barnoud
if not os.path.isfile(path):
if os.path.isdir(path):
msg = "{0} is a directory".format(path)
else:
msg = "{0} does not exist.".format(path)
raise argparse.ArgumentTypeError(msg)
return path
def isdir(path):
"""Check if path is an existing file.
Arguments:
path: Path to the file
"""
if not os.path.isdir(path):
if os.path.isfile(path):
msg = "{0} is a file.".format(path)
else:
msg = "{0} does not exist.".format(path)
raise argparse.ArgumentTypeError(msg)
return path
def getArguments():
"""Retrieves the arguments of the program.
Returns: An object that contains the arguments
"""
# Parsing arguments
parser = argparse.ArgumentParser(description=__doc__, usage=
"Hfinder.py -q query.fasta "
"-d database.fasta")
parser.add_argument('-q', dest='query', type=isfile, required=True,
help='Path to the query file.')
parser.add_argument('-qm', dest='query_hmm', type=isfile,
help='Path to the database file.')
parser.add_argument('-d', dest='database', type=isfile, required=True,
help='Path to the database file (fasta).')
parser.add_argument('-db', dest='database_blast', type=str,
help='Path to the database file (blast).')
parser.add_argument('-s', dest='software', type=str,
choices=["blastp", "psiblast", "jackhmmer", "ssearch",
"hmmsearch", "hmmscan"],
nargs='+', default=["blastp", "jackhmmer", "ssearch"],
help='Select protein homology software.')
parser.add_argument('-p', dest='path_software', type=isdir,
nargs='+', help='Path to the software.')
parser.add_argument('-w', dest='path_clustalo', type=isdir, default=None,
help='Path to clustalo software.')
#parser.add_argument('-rev', dest='reverse', action='store_true',
#default=False)
parser.add_argument('-r', dest='results', type=isdir, default="." + os.sep,
help='Path to result directory.')
parser.add_argument('-n', dest='nbest', type=int, default=0,
help='Number of best.')
parser.add_argument('-t', dest='thread', type=int, default=mp.cpu_count(),
help='Number of thread.')
parser.add_argument('-b', dest='behavior', default=[],
choices=["force_computation", "extract",
"cumulative", "check"],
nargs='+', help='Behavior : force computation (if '
'previous result), extract sequence, check identity.')
parser.add_argument('-e', dest='e_value', type=float, default=0.001,
help='E-value threshold.')
parser.add_argument('-idmin', dest='filter_identity_minimum', type=float,
default=None,
help='Filter on identity minimum threshold.')
parser.add_argument('-idmax', dest='filter_identity_maximum', type=float,
default=None,
help='Filter on identity maximum threshold.')
parser.add_argument('-simmin', dest='filter_similarity_minimum', type=float,
default=None,
help='Filter on similarity minimum threshold.')
parser.add_argument('-simmax', dest='filter_similarity_maximum', type=float,
default=None,
help='Filter on similarity maximum threshold.')
parser.add_argument('-covmin', dest='filter_coverage_minimum', type=float,
default=None,
help='Filter on coverage minimum threshold.')
parser.add_argument('-covmax', dest='filter_coverage_maximum', type=float,
default=None,
help='Filter on coverage maximum threshold.')
parser.add_argument('-lmin', dest='filter_length_minimum', type=int,
default=None,
help='Filter on sequence length minimum threshold.')
parser.add_argument('-lmax', dest='filter_length_maximum', type=int,
default=None,
help='Filter on sequence length maximum threshold.')
parser.add_argument('-c', dest='config', type=isfile,
help='Path to configuration file.')
args = parser.parse_args()
return args
def run_command(cmd):
"""Run command
Arguments:
cmd: Command to run
"""
try:
# Execution de la ligne de commande
retcode = subprocess.call(cmd, shell=True)
# Cas aucun retour du soft
if retcode == None:
sys.exit("Child was terminated")
except OSError, e:
sys.exit("Execution failed: {0}".format(e))
except:
sys.exit("There is something wrong with the command: {0}".format(cmd))
def replace_motif(build_command, path_soft, query, database, database_hmm,
output, thread, e_value):
"""
"""
print(build_command, file=sys.stderr)
if path_soft:
build_command = build_command.replace('%path_soft', path_soft)
else:
build_command = build_command.replace('%path_soft', "")
build_command = build_command.replace('%proc', str(thread))
build_command = build_command.replace('%query', query)
build_command = build_command.replace('%database', database)
if not database_hmm:
database_hmm = ""
build_command = build_command.replace('%hmm_db', database_hmm)
build_command = build_command.replace('%output', output)
build_command = build_command.replace('%e_value', str(e_value))
print(build_command, file=sys.stderr)
return build_command
def extract_path(soft, path_software, num):
"""
"""
result = ""
try:
result = path_software[num]
except IndexError:
print("List software empty for {0}".format(soft))
return result
def path_empty(soft, software, num):
"""
"""
return ""
def extract_homology(output_file, soft): #, reverse):
"""
"""
homologous = []
interest = 1
#if reverse:
#interest = 0
#regex = get_regex_by_soft(soft)
try:
with open(output_file, "rt") as output:
if soft == "hmmsearch" or soft == "hmmscan":
if soft == "hmmsearch":
regex_hmm = re.compile(r"(\S+)\s+")
else:
regex_hmm = re.compile(r"\S+\s+\S+\s+(\S+)\s+")
for line in output:
if line[0] != "#":
hmm_match = regex_hmm.match(line)
if hmm_match:
homologous += [hmm_match.group(1)]
else:
output_reader = csv.reader(output, delimiter="\t")
for line in output_reader:
if len(line) == 12:
homologous += [line[interest]]
except IOError:
sys.exit("Error : cannot read {0}".format(output_file))
return homologous
def get_unique(data_list):
"""Get unique data
Arguments:
data_list: list of data
"""
return {}.fromkeys(data_list).keys()
def get_element(input_list, name):
"""Search name in input list
Arguments:
input_list: List
name: Search criteria
"""
# Searching the node with its name
i = bisect.bisect_left(input_list, name)
# Object has been found
if(i != len(input_list) and input_list[i] == name):
return True
return False
def register_fasta(results, query_file, listhomology, output):
"""
"""
reader = False
try:
with open(output, "wt") as multifasta_out:
# Extract homologous
with open(query_file, "rt") as query:
for line in query:
if line.startswith(">"):
reader = False
title = line[1:].replace("\n", "").replace("\r", "")
if " " in title:
title = title.split(" ")[0]
# Select homologous
if get_element(listhomology, title):
multifasta_out.write(line)
reader = True
elif reader and len(line) > 0:
multifasta_out.write(line)
except IOError as e:
sys.exit("Something went wrong with the output\n{0}".format(e))
def create_fasta(compair):
"""
"""
try:
with open(compair[5], 'wt') as fasta_file:
fasta_file.write(">{1[0]}{0}{1[1]}{0}"
">{1[2]}{0}{1[3]}{0}".format(os.linesep, compair))
except IOError:
sys.exit("Error: cannot open file {0}".format(compair[5]))
def remove_temp_files(temp_files):
"""
"""
try:
for temp in temp_files:
os.remove(temp)
except OSError, e:
print("Error: cannot remove file {0}".format(e), file=sys.stderr)
def extract_sequence(multifasta_file, listhomology=None):
"""
"""
data = []
seq = False
try:
with open(multifasta_file, "rt") as multifasta:
for line in multifasta:
if line.startswith(">"):
reader = False
title = line[1:].replace("\n", "").replace("\r", "")
if " " in title:
title = title.split(" ")[0]
if listhomology:
if get_element(listhomology, title):
data += [[title, ""]]
seq = True
else:
title = ""
seq = False
else:
data += [[title, ""]]
seq = True
elif seq and len(line) > 0:
data[-1][1] += line.replace("\n", "").replace("\r", "")
else:
seq = False
assert(len(data) > 0)
except IOError:
sys.exit("Error : cannot open file {0}".format(multifasta_file))
except AssertionError:
sys.exit("Error : no sequence extracted from {0}".format(multifasta_file))
return data
def replace_sequence(cmd, path_soft, multifasta_file, alignment_file):
"""
"""
if path_soft:
cmd = cmd.replace('%path_soft', path_soft)
else:
cmd = cmd.replace('%path_soft', "")
cmd = cmd.replace('%multifasta', multifasta_file)
cmd = cmd.replace('%output', alignment_file)
return cmd
def map_sequence(results, cmd, query, database, listhomology, path_clustalo):
"""
"""
query_data = extract_sequence(query)
database_data = extract_sequence(database, listhomology)
idcompair = 0
comparison = []
for query in query_data:
for database in database_data:
if(query[0] != database[0]):
# query query_seq database database_seq output_file path_clustalo
multifasta_file = "{0}{1}{2}.fasta".format(results, os.sep,
idcompair)
alignment_file = "{0}{1}clustalo_{2}.fasta".format(results,
os.sep,
idcompair)
comparison += [query + database +
[replace_sequence(cmd, path_clustalo,
multifasta_file, alignment_file),
multifasta_file, alignment_file]]
idcompair += 1
return comparison
def extract_data(alignment_file):
"""
"""
data_aln = {}
keys = []
try:
with open(alignment_file) as align:
for line in align:
if line.startswith(">"):
head = line[1:].replace("\n", "").replace("\r", "")
data_aln[head] = ""
keys += [head]
elif len(line) > 0 :
data_aln[head] += line.replace("\n", "").replace("\r", "")
assert(data_aln != {} and len(data_aln) == 2)
except IOError:
sys.exit("Error cannot open {0}".format(alignment_file))
except AssertionError:
sys.exit("Nothing extracted or illegal length (!=2 alignment) "
"from {0}".format(alignment_file))
if len(data_aln[keys[0]]) != len(data_aln[keys[1]]):
sys.exit("The length of the alignment are different :\n"
"{0[0]}:{1} and {0[1]}:{2}".format(keys,
len(data_aln[keys[0]]),
len(data_aln[keys[1]])))
return data_aln
def estimate_parameters(query, database, seq_template, seq_aln):
""" Compute identity, similarity and coverage
"""
similarAA = ['AG', 'AP', 'AS', 'AT', 'DE', 'DN', 'DQ', 'ED', 'EN', 'EQ',
'FW', 'FY', 'GA', 'GP', 'GS', 'GT', 'HK', 'HR', 'IL', 'IM',
'IV', 'KH', 'KR', 'LI', 'LM', 'LV', 'MI', 'ML', 'MV', 'ND',
'NE', 'NQ', 'PA', 'PG', 'PS', 'PT', 'QD', 'QE', 'QN', 'RH',
'RK', 'SA', 'SG', 'SP', 'ST', 'TA', 'TG', 'TP', 'TS', 'VI',
'VL', 'VM', 'WF', 'WY', 'YF', 'YW']
aligned = 0.0
id_aa = 0.0
similar = 0.0
s1 = seq_template.replace("-", "")
s2 = seq_aln.replace("-", "")
for i in xrange(len(seq_template)):
if(seq_template[i] == "-" or seq_aln[i] == "-"):
pass
elif(seq_template[i] == seq_aln[i] and seq_template[i] is not "X"
and seq_aln[i] != "X"):
id_aa += 1.0
aligned += 1.0
similar += 1.0
elif(get_element(similarAA, seq_template[i] + seq_aln[i])):
similar += 1.0
aligned += 1.0
elif(seq_template[i] != "X" and seq_aln[i] != "X"):
aligned += 1.0
# identity = 100.0*count/(float(len(aln1.translate(None,"-."))
# +len(aln2.translate(None,"-.")))/2.0)
similarity = 100.0 * similar / float(min(len(s1), len(s2)))
identity = 100.0 * id_aa / float(min(len(s1), len(s2)))
coverage = 100.0 * aligned / float(max(len(s1), len(s2)))
return [query, database, len(s1), len(s2), round(identity, 2),
round(similarity,2), round(coverage, 2)]
#return {"query":query, "database":database, "identity":identity,
#"similarity":similarity, "coverage":coverage,"query_len": len(s1),
#"database_length":len(s2)}
def align_sequences(compair):
"""Thread action
"""
# Write multifasta file
create_fasta(compair)
# Run alignment
run_command(compair[4])
# Extract aligment
data_alignment = extract_data(compair[6])
# Calculate parameters
# query_seq database_seq
result_ali = estimate_parameters(compair[0], compair[2],
data_alignment[compair[0]],
data_alignment[compair[2]])
# Remove files
remove_temp_files(compair[5:7])
return result_ali
def compute_sequence_properties(results, conf_data, path_clustalo, query,
database, listhomology, thread):
"""Compute sequence identity
"""
print("Get comparison")
# generator not available because multiprocessing pickling
comparaison = map_sequence(results, conf_data.hdict["clustalo"],
query, database, listhomology, path_clustalo)
print("Extraction done. {0} comparisons to do.".format(len(comparaison)))
startTime = time.time()
pool = mp.Pool(processes=thread)
asyncResult = pool.map_async(align_sequences, comparaison)
result_list = asyncResult.get()
#result_list = []
#for comp in comparaison:
#result_list += [align_sequences(comp)]
print("Comparison done. Time {0}s.".format(time.time() - startTime))
return result_list
def get_unique_element(data_list):
"""Get the unique elements in list
"""
return {}.fromkeys(data_list).keys()
def get_sequence_length(fasta_file, listhomology):
"""Get sequence length of homologous
"""
sequence_length = {}
interest = False
header = ""
try:
with open(fasta_file, "rt") as fasta:
for line in fasta:
if line.startswith(">"):
if interest:
sequence_length[header] = len(sequence)
header = line[1:].replace("\n", "").replace("\r", "").split(" ")[0]
if " " in header:
header = header.split(" ")[0]
# if element in list of homologous
interest = get_element(listhomology, header)
sequence = ""
elif interest and len(line) > 0:
sequence += line.replace("\n", "").replace("\r", "")
if interest:
sequence_length[header] = len(sequence)
assert(len(sequence_length.keys()) == len(listhomology))
except IOError:
sys.exit("Error cannot open {0}".format(fasta_file))
except AssertionError:
sys.exit("The length of every blast hit has not "
"been found in {0}".format(fasta_file))
return sequence_length
def write_hit_length(results, selected, not_selected, sequence_length,
output_file=None):
"""Write hit length
"""
if not output_file:
output_file = results + os.sep + soft + "_hit_length.csv"
try:
with open(output_file, "wt") as output:
output_writer = csv.writer(output,delimiter="\t")
output_writer.writerow(["Hit", "Length", "Selected"])
for element in selected:
output_writer.writerow([element, sequence_length[element],
1])
for element in not_selected:
output_writer.writerow([element, sequence_length[element],
0])
except IOError:
sys.exit("Error cannot open {0}".format(output_file))
def filter_sequence_length(results, fasta_file, listhomology, length_min,
length_max, soft):
"""
"""
selected = []
# Calculate hit length
sequence_length = get_sequence_length(fasta_file, listhomology)
# Filter hits
for element in listhomology:
select_min = True
select_max = True
if length_min:
select_min = (sequence_length[element] >= length_min)
if length_max:
select_max = (sequence_length[element] <= length_max)
if select_min and select_max:
selected.append(element)
# Write hit homology
not_selected = set(listhomology) - set(selected)
output_file = results + os.sep + soft + "_hit_length.csv"
print("Write hit sequence length in {0}".format(output_file))
write_hit_length(results, selected, not_selected, sequence_length,
output_file)
return selected
def write_check_data(results, result_dict, origin, nbest, selection=None):
"""Write the different properties of selected homologues
"""
output_file = results + origin + "_hit_properties.csv"
try:
with open(output_file, "wt") as output:
output_writer = csv.writer(output, delimiter='\t')
header = ["Reference", "Query", "Query_length", "Reference_length",
"Identity", "Similarity", "Coverage"]
if selection:
header += ["Selected"]
# Write header
output_writer.writerow(header)
for query in result_dict:
if selection:
if query in selection:
subset_data = [[query] + hit + [1]
for hit in result_dict[query]]
else:
subset_data = [[query] + hit + [0]
for hit in result_dict[query]]
else:
subset_data = [[query] + hit for hit in result_dict[query]]
# Sort depending on the identity and coverage
subset_data.sort(key=lambda x: x[4] + x[6], reverse=True)
if nbest > 0:
short_set = subset_data[0: nbest]
else:
short_set = subset_data
for hit in short_set:
output_writer.writerows(short_set)
except IOError:
sys.exit("Error : cannot open file {0}".format(output))
def order_dict(result_stat):
"""Order the information
"""
result_dict = {}
for hit in result_stat:
if hit[1] in result_dict:
result_dict[hit[1]] += [[hit[0]] + hit[2:]]
else:
result_dict[hit[1]] = [[hit[0]] + hit[2:]]
return result_dict
def check_parameters(result_dict, element, idmin, idmax, simmin, simmax, covmin,
covmax):
for hit in result_dict[element]:
select_id_min = True
select_id_max = True
select_sim_min = True
select_sim_max = True
select_cov_min = True
select_cov_max = True
if idmin:
select_id_min = (hit[3] >= idmin)
if idmax:
select_id_max = (hit[3] <= idmax)
if idmin:
select_cov_min = (hit[5] >= covmin)
if idmax:
select_cov_max = (hit[5] <= covmax)
if(select_id_min and select_id_max
and select_cov_min and select_cov_max):
return True
return False
def filter_sequence_align(listhomology, result_dict, idmin, idmax, simmin,
simmax, covmin, covmax):
selected = []
for element in listhomology:
# We could do better with sort, but latter maybe
if check_parameters(result_dict, element, idmin, idmax, simmin,
simmax, covmin, covmax):
selected.append(element)
return selected
#==============================================================
# Main program
#==============================================================
def main():
"""
Main program function
"""
filtered_elements = None
all_listhomology = []
all_filtered_homologous = []
# Load arguments
args = getArguments()
# Check software
if args.path_software:
get_path = extract_path
else:
get_path = path_empty
# Configure option
conf_data = Inconfig(args.config, args.results)
# Analyze the homology for each software
for num, soft in enumerate(args.software):
output = args.results + soft + "_protein_homology.txt"
path_soft = get_path(soft, args.path_software, num)
# Compute homology
if not os.path.isfile(output) or "force_computation" in args.behavior:
#UGLY all of this
try:
if soft == "hmmsearch" or soft == "hmmscan":
if not args.query_hmm:
sys.exit("Please provide the hmm db with -dm option")
#if args.reverse:
## Reverse query and database
run_command(replace_motif(conf_data.hdict[soft], path_soft,
args.database, args.query,
args.query_hmm, output, args.thread,
args.e_value))
#else:
#run_command(replace_motif(conf_data.hdict[soft],
#path_soft, args.query,
#args.database,
#args.query_hmm, output,
#args.thread, args.e_value))
else:
if((soft == "blastp" or soft == "psiblast")
and args.database_blast):
run_command(
replace_motif(conf_data.hdict[soft], path_soft,
args.query, args.database_blast,
args.query_hmm, output,
args.thread, args.e_value))
else:
run_command(
replace_motif(conf_data.hdict[soft], path_soft,
args.query, args.database,
args.query_hmm, output,
args.thread, args.e_value))
except KeyError:
sys.exit("Key {0} is not in the configuration "
"file.".format(soft))
# Get the complete list here
# Parse result
print("parse result : " + soft)
listhomology = extract_homology(output, soft) #, args.reverse)
# Uniquify elements
listhomology = get_unique(listhomology)
# Sort the list for bisect
listhomology.sort()
if not "cumulative" in args.behavior and len(listhomology) > 0:
# Filter candidates with length
if args.filter_length_minimum or args.filter_length_maximum:
print("Filter length")
#if args.reverse:
#listhomology = filter_sequence_length(args.results,
#args.query, listhomology,
#args.filter_length_minimum,
#args.filter_length_maximum, soft)
#else:
listhomology = filter_sequence_length(args.results,
args.database, listhomology,
args.filter_length_minimum,
args.filter_length_maximum, soft)
# Check identity, similarity, coverage
if "check" in args.behavior:
# No output_fasta
#if args.reverse:
#else:
result_stat = compute_sequence_properties(
args.results, conf_data, args.path_clustalo,
args.query, args.database, listhomology,
args.thread)
# Order the information
result_dict = order_dict(result_stat)
if(args.filter_identity_minimum or args.filter_identity_maximum
or args.filter_similarity_minimum or args.filter_similarity_maximum
or args.filter_coverage_minimum or args.filter_coverage_maximum):
print("Filter sequence based on identity, similarity or coverage")
listhomology = filter_sequence_align(
listhomology, result_dict,
args.filter_identity_minimum,
args.filter_identity_maximum,
args.filter_similarity_minimum,
args.filter_similarity_maximum,
args.filter_coverage_minimum,
args.filter_coverage_maximum)
write_check_data(args.results, result_dict, soft, args.nbest,
listhomology)
else:
write_check_data(args.results, result_dict, soft, args.nbest)
# write statistics
# write_data(args.results, result_stat, soft)
# Extract homologous sequence
if "extract" in args.behavior:
output_fasta = args.results + soft + "_protein_homology.fasta"
# Get a multifasta that contains only the candidates and
# homologous proteins
print("Write sequence of the hits in {0}".format(output_fasta))
register_fasta(args.results, args.query, listhomology, output_fasta)
else:
# Add elements
all_listhomology += [listhomology]
# TODO Venn diagram here
# Venn diagram
if"cumulative" in args.behavior and all_listhomology:
group_elements = []
for list_element in all_listhomology:
group_elements += list_element
# Unique elements for bisect
group_elements = get_unique(group_elements)
# Sort the list for bisect search
group_elements.sort()
# Filter candidates with length
if((args.filter_length_minimum or args.filter_length_maximum)
and len(group_elements) > 0):
print("Filter length")
#if args.reverse:
#group_elements = filter_sequence_length(args.results, args.query,
#group_elements,
#args.filter_length_minimum,
#args.filter_length_maximum,
#"all")
#else:
group_elements = filter_sequence_length(args.results, args.database,
group_elements,
args.filter_length_minimum,
args.filter_length_maximum,
"all")
if "check" in args.behavior and len(group_elements) > 0:
# Analyse candidates
#if args.reverse:
result_stat = compute_sequence_properties(args.results, conf_data,
args.path_clustalo,
args.query, args.database,
group_elements,
args.thread)
#else:
#result_stat = compute_sequence_properties(args.results, conf_data,
#args.path_clustalo,
#args.database, args.query,
#group_elements,
#args.thread)
# Order the information
result_dict = order_dict(result_stat)
if(args.filter_identity_minimum or args.filter_identity_maximum
or args.filter_similarity_minimum or args.filter_similarity_maximum
or args.filter_coverage_minimum or args.filter_coverage_maximum):
print("Filter sequence based on identity, similarity or coverage")
group_elements = filter_sequence_align(
group_elements, result_dict,
args.filter_identity_minimum,
args.filter_identity_maximum,
args.filter_similarity_minimum,
args.filter_similarity_maximum,
args.filter_coverage_minimum,
args.filter_coverage_maximum)
if len(group_elements) == 0:
print("Warning : No element selected with this constraints "
"of identity, similarity and coverage !")
write_check_data(args.results, result_dict, "all", args.nbest,
group_elements)
else:
write_check_data(args.results, result_dict, "all", args.nbest)
if "extract" in args.behavior and len(group_elements) > 0:
output_fasta = args.results + "all_protein_homology.fasta"
# Get a multifasta that contains only the homologous proteins
print("Write sequence of the hits in {0}".format(output_fasta))
#if args.reverse:
#register_fasta(args.results, args.query, group_elements,
#output_fasta)
#else:
register_fasta(args.results, args.database, group_elements,
output_fasta)
if __name__ == '__main__':
main()
|
magic-trip/hfinder
|
hfinder.py
|
Python
|
gpl-2.0
| 37,882
|
[
"BLAST"
] |
b8de757b389b255d7ab5a9b6cd6c07bba36adbadd29731c0c56f0a9cdaf4257e
|
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Environment setup and teardown for remote devices."""
import distutils.version
import json
import logging
import os
import random
import sys
from devil.utils import reraiser_thread
from devil.utils import timeout_retry
from pylib.base import environment
from pylib.remote.device import appurify_sanitized
from pylib.remote.device import remote_device_helper
class RemoteDeviceEnvironment(environment.Environment):
"""An environment for running on remote devices."""
_ENV_KEY = 'env'
_DEVICE_KEY = 'device'
_DEFAULT_RETRIES = 0
def __init__(self, args, error_func):
"""Constructor.
Args:
args: Command line arguments.
error_func: error to show when using bad command line arguments.
"""
super(RemoteDeviceEnvironment, self).__init__()
self._access_token = None
self._device = None
self._device_type = args.device_type
self._verbose_count = args.verbose_count
self._timeouts = {
'queueing': 60 * 10,
'installing': 60 * 10,
'in-progress': 60 * 30,
'unknown': 60 * 5
}
# Example config file:
# {
# "remote_device": ["Galaxy S4", "Galaxy S3"],
# "remote_device_os": ["4.4.2", "4.4.4"],
# "remote_device_minimum_os": "4.4.2",
# "api_address": "www.example.com",
# "api_port": "80",
# "api_protocol": "http",
# "api_secret": "apisecret",
# "api_key": "apikey",
# "timeouts": {
# "queueing": 600,
# "installing": 600,
# "in-progress": 1800,
# "unknown": 300
# }
# }
if args.remote_device_file:
with open(args.remote_device_file) as device_file:
device_json = json.load(device_file)
else:
device_json = {}
self._api_address = device_json.get('api_address', None)
self._api_key = device_json.get('api_key', None)
self._api_port = device_json.get('api_port', None)
self._api_protocol = device_json.get('api_protocol', None)
self._api_secret = device_json.get('api_secret', None)
self._device_oem = device_json.get('device_oem', None)
self._device_type = device_json.get('device_type', 'Android')
self._network_config = device_json.get('network_config', None)
self._remote_device = device_json.get('remote_device', None)
self._remote_device_minimum_os = device_json.get(
'remote_device_minimum_os', None)
self._remote_device_os = device_json.get('remote_device_os', None)
self._remote_device_timeout = device_json.get(
'remote_device_timeout', None)
self._results_path = device_json.get('results_path', None)
self._runner_package = device_json.get('runner_package', None)
self._runner_type = device_json.get('runner_type', None)
self._timeouts.update(device_json.get('timeouts', {}))
def command_line_override(
file_value, cmd_line_value, desc, print_value=True):
if cmd_line_value:
if file_value and file_value != cmd_line_value:
if print_value:
logging.info('Overriding %s from %s to %s',
desc, file_value, cmd_line_value)
else:
logging.info('overriding %s', desc)
return cmd_line_value
return file_value
self._api_address = command_line_override(
self._api_address, args.api_address, 'api_address')
self._api_port = command_line_override(
self._api_port, args.api_port, 'api_port')
self._api_protocol = command_line_override(
self._api_protocol, args.api_protocol, 'api_protocol')
self._device_oem = command_line_override(
self._device_oem, args.device_oem, 'device_oem')
self._device_type = command_line_override(
self._device_type, args.device_type, 'device_type')
self._network_config = command_line_override(
self._network_config, args.network_config, 'network_config')
self._remote_device = command_line_override(
self._remote_device, args.remote_device, 'remote_device')
self._remote_device_minimum_os = command_line_override(
self._remote_device_minimum_os, args.remote_device_minimum_os,
'remote_device_minimum_os')
self._remote_device_os = command_line_override(
self._remote_device_os, args.remote_device_os, 'remote_device_os')
self._remote_device_timeout = command_line_override(
self._remote_device_timeout, args.remote_device_timeout,
'remote_device_timeout')
self._results_path = command_line_override(
self._results_path, args.results_path, 'results_path')
self._runner_package = command_line_override(
self._runner_package, args.runner_package, 'runner_package')
self._runner_type = command_line_override(
self._runner_type, args.runner_type, 'runner_type')
if args.api_key_file:
with open(args.api_key_file) as api_key_file:
temp_key = api_key_file.read().strip()
self._api_key = command_line_override(
self._api_key, temp_key, 'api_key', print_value=False)
self._api_key = command_line_override(
self._api_key, args.api_key, 'api_key', print_value=False)
if args.api_secret_file:
with open(args.api_secret_file) as api_secret_file:
temp_secret = api_secret_file.read().strip()
self._api_secret = command_line_override(
self._api_secret, temp_secret, 'api_secret', print_value=False)
self._api_secret = command_line_override(
self._api_secret, args.api_secret, 'api_secret', print_value=False)
if not self._api_address:
error_func('Must set api address with --api-address'
' or in --remote-device-file.')
if not self._api_key:
error_func('Must set api key with --api-key, --api-key-file'
' or in --remote-device-file')
if not self._api_port:
error_func('Must set api port with --api-port'
' or in --remote-device-file')
if not self._api_protocol:
error_func('Must set api protocol with --api-protocol'
' or in --remote-device-file. Example: http')
if not self._api_secret:
error_func('Must set api secret with --api-secret, --api-secret-file'
' or in --remote-device-file')
logging.info('Api address: %s', self._api_address)
logging.info('Api port: %s', self._api_port)
logging.info('Api protocol: %s', self._api_protocol)
logging.info('Remote device: %s', self._remote_device)
logging.info('Remote device minimum OS: %s',
self._remote_device_minimum_os)
logging.info('Remote device OS: %s', self._remote_device_os)
logging.info('Remote device OEM: %s', self._device_oem)
logging.info('Remote device type: %s', self._device_type)
logging.info('Remote device timout: %s', self._remote_device_timeout)
logging.info('Results Path: %s', self._results_path)
logging.info('Runner package: %s', self._runner_package)
logging.info('Runner type: %s', self._runner_type)
logging.info('Timeouts: %s', self._timeouts)
if not args.trigger and not args.collect:
self._trigger = True
self._collect = True
else:
self._trigger = args.trigger
self._collect = args.collect
def SetUp(self):
"""Set up the test environment."""
os.environ['APPURIFY_API_PROTO'] = self._api_protocol
os.environ['APPURIFY_API_HOST'] = self._api_address
os.environ['APPURIFY_API_PORT'] = self._api_port
os.environ['APPURIFY_STATUS_BASE_URL'] = 'none'
self._GetAccessToken()
if self._trigger:
self._SelectDevice()
def TearDown(self):
"""Teardown the test environment."""
self._RevokeAccessToken()
def __enter__(self):
"""Set up the test run when used as a context manager."""
try:
self.SetUp()
return self
except:
self.__exit__(*sys.exc_info())
raise
def __exit__(self, exc_type, exc_val, exc_tb):
"""Tears down the test run when used as a context manager."""
self.TearDown()
def DumpTo(self, persisted_data):
env_data = {
self._DEVICE_KEY: self._device,
}
persisted_data[self._ENV_KEY] = env_data
def LoadFrom(self, persisted_data):
env_data = persisted_data[self._ENV_KEY]
self._device = env_data[self._DEVICE_KEY]
def _GetAccessToken(self):
"""Generates access token for remote device service."""
logging.info('Generating remote service access token')
with appurify_sanitized.SanitizeLogging(self._verbose_count,
logging.WARNING):
access_token_results = appurify_sanitized.api.access_token_generate(
self._api_key, self._api_secret)
remote_device_helper.TestHttpResponse(access_token_results,
'Unable to generate access token.')
self._access_token = access_token_results.json()['response']['access_token']
def _RevokeAccessToken(self):
"""Destroys access token for remote device service."""
logging.info('Revoking remote service access token')
with appurify_sanitized.SanitizeLogging(self._verbose_count,
logging.WARNING):
revoke_token_results = appurify_sanitized.api.access_token_revoke(
self._access_token)
remote_device_helper.TestHttpResponse(revoke_token_results,
'Unable to revoke access token.')
def _SelectDevice(self):
if self._remote_device_timeout:
try:
timeout_retry.Run(self._FindDeviceWithTimeout,
self._remote_device_timeout, self._DEFAULT_RETRIES)
except reraiser_thread.TimeoutError:
self._NoDeviceFound()
else:
if not self._FindDevice():
self._NoDeviceFound()
def _FindDevice(self):
"""Find which device to use."""
logging.info('Finding device to run tests on.')
device_list = self._GetDeviceList()
random.shuffle(device_list)
for device in device_list:
if device['os_name'] != self._device_type:
continue
if self._remote_device and device['name'] not in self._remote_device:
continue
if (self._remote_device_os
and device['os_version'] not in self._remote_device_os):
continue
if self._device_oem and device['brand'] not in self._device_oem:
continue
if (self._remote_device_minimum_os
and distutils.version.LooseVersion(device['os_version'])
< distutils.version.LooseVersion(self._remote_device_minimum_os)):
continue
if device['has_available_device']:
logging.info('Found device: %s %s',
device['name'], device['os_version'])
self._device = device
return True
return False
def _FindDeviceWithTimeout(self):
"""Find which device to use with timeout."""
timeout_retry.WaitFor(self._FindDevice, wait_period=1)
def _PrintAvailableDevices(self, device_list):
def compare_devices(a, b):
for key in ('os_version', 'name'):
c = cmp(a[key], b[key])
if c:
return c
return 0
logging.critical('Available %s Devices:', self._device_type)
logging.critical(
' %s %s %s %s %s',
'OS'.ljust(10),
'Device Name'.ljust(30),
'Available'.ljust(10),
'Busy'.ljust(10),
'All'.ljust(10))
devices = (d for d in device_list if d['os_name'] == self._device_type)
for d in sorted(devices, compare_devices):
logging.critical(
' %s %s %s %s %s',
d['os_version'].ljust(10),
d['name'].ljust(30),
str(d['available_devices_count']).ljust(10),
str(d['busy_devices_count']).ljust(10),
str(d['all_devices_count']).ljust(10))
def _GetDeviceList(self):
with appurify_sanitized.SanitizeLogging(self._verbose_count,
logging.WARNING):
dev_list_res = appurify_sanitized.api.devices_list(self._access_token)
remote_device_helper.TestHttpResponse(dev_list_res,
'Unable to generate access token.')
return dev_list_res.json()['response']
def _NoDeviceFound(self):
self._PrintAvailableDevices(self._GetDeviceList())
raise remote_device_helper.RemoteDeviceError(
'No device found.', is_infra_error=True)
@property
def collect(self):
return self._collect
@property
def device_type_id(self):
return self._device['device_type_id']
@property
def network_config(self):
return self._network_config
@property
def only_output_failures(self): # pylint: disable=no-self-use
# TODO(jbudorick): Remove this once b/18981674 is fixed.
return True
@property
def results_path(self):
return self._results_path
@property
def runner_package(self):
return self._runner_package
@property
def runner_type(self):
return self._runner_type
@property
def timeouts(self):
return self._timeouts
@property
def token(self):
return self._access_token
@property
def trigger(self):
return self._trigger
@property
def verbose_count(self):
return self._verbose_count
@property
def device_type(self):
return self._device_type
|
Teamxrtc/webrtc-streaming-node
|
third_party/webrtc/src/chromium/src/build/android/pylib/remote/device/remote_device_environment.py
|
Python
|
mit
| 13,391
|
[
"Galaxy"
] |
92247364820ec4972ca614ac04d77452c954f8141578896091ffa394dd0b84bb
|
# RABDAM
# Copyright (C) 2020 Garman Group, University of Oxford
# This file is part of RABDAM.
# RABDAM is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation, either version 3 of
# the License, or (at your option) any later version.
# RABDAM is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
# You should have received a copy of the GNU Lesser General
# Public License along with this program. If not, see
# <http://www.gnu.org/licenses/>.
def convert_array_to_atom_list(array, all_atoms_list, sub_atoms_list):
"""
Converts numpy array of xyz coordinates into a list of Atom() objects,
for use in writing a PDB file
"""
import copy
import numpy as np
pdb_atom_list = []
for index, atom_id in enumerate(all_atoms_list):
for atom in sub_atoms_list:
if atom.atomNum == atom_id:
new_atom = copy.deepcopy(atom)
newXYZ = np.array([[array[index][0]],
[array[index][1]],
[array[index][2]]])
new_atom.xyzCoords = newXYZ
pdb_atom_list.append(new_atom)
break
return pdb_atom_list
def makePDB(header_lines, atomList, footer_lines, seqres, newPDBfilename, Bfac):
"""
Writes a PDB file containing a complete set of atom information for all
atoms in 'atomList', plus header and footer information.
"""
import numpy as np
exit = False
newPDBfile = open(newPDBfilename, 'w')
for line in header_lines:
if not line.endswith('\n'):
line += '\n'
newPDBfile.write(line)
for index, atm in enumerate(atomList):
a = atm.lineID.ljust(6)
if len(a) != 6:
exit = True
b = str(atm.atomNum).rjust(5)[-5:]
c = atm.atomType.ljust(3)
if len(c) != 3:
exit = True
d = atm.conformer.ljust(1)
if len(d) != 1:
exit = True
e = atm.resiType.ljust(3)
if len(e) != 3:
exit = True
f = atm.chainID.ljust(1)
if len(f) != 1:
exit = True
g = str(atm.resiNum).rjust(4)[-4:]
h = atm.insCode.ljust(1)
if len(h) != 1:
exit = True
i = '%8.3f' % atm.xyzCoords[0][0]
if len(i) != 8:
exit = True
j = '%8.3f' % atm.xyzCoords[1][0]
if len(j) != 8:
exit = True
k = '%8.3f' % atm.xyzCoords[2][0]
if len(k) != 8:
exit = True
l = '%6.2f' % atm.occupancy
if len(l) != 6:
exit = True
if Bfac.lower() == 'bfactor':
m = '%6.2f' % atm.bFactor
if len(m) != 6:
exit = True
elif Bfac.lower() == 'bdamage':
m = '%6.2f' % np.log(atm.bd)
if len(m) != 6:
exit = True
n = atm.element.rjust(2)
if len(n) != 2:
exit = True
o = atm.charge.rjust(2)
if len(o) != 2:
exit = True
if exit is False:
# Atom properties are appropriately ordered and spaced, and reported
# to the expected number of significant figures, for the PDB file
# format. Note that atomType for some metal ions will not follow
# standard PDB file format, but this will not affect the running of
# RABDAM (nor most other programs that the user might want to load
# the PDB file into, such as PyMol, Chimera, CCP4MG, WinCoot, etc.)
newLine = ''.join([a, b, ' ', c, d, e, ' ', f, g, h, ' ', i, j,
k, l, m, ' ', n, o, '\n'])
newPDBfile.write(newLine)
# Inserts TER cards
if index != (len(atomList) - 1):
if (
(atm.chainID != atomList[index+1].chainID)
and (atm.resiType in seqres)
):
newPDBfile.write('TER'.ljust(80) + '\n')
else:
if atm.resiType in seqres:
newPDBfile.write('TER'.ljust(80) + '\n')
for line in footer_lines:
if not line.endswith('\n'):
line += '\n'
newPDBfile.write(line)
print('New PDB file saved to %s' % newPDBfilename)
newPDBfile.close()
return exit
def writeDataFrame(bdamAtomList):
"""
Returns a DataFrame containing a complete set of atom information
(including both that provided in the input PDB file and also the BDamage
values calculated by RABDAM) for all atoms considered for BDamage analysis.
"""
import copy
import pandas as pd
# Initialises a list for each atom property considered.
REC = [None]*len(bdamAtomList)
ATMNUM = [None]*len(bdamAtomList)
ATMNAME = [None]*len(bdamAtomList)
CONFORMER = [None]*len(bdamAtomList)
RESNAME = [None]*len(bdamAtomList)
CHAIN = [None]*len(bdamAtomList)
RESNUM = [None]*len(bdamAtomList)
INSCODE = [None]*len(bdamAtomList)
XPOS = [None]*len(bdamAtomList)
YPOS = [None]*len(bdamAtomList)
ZPOS = [None]*len(bdamAtomList)
OCC = [None]*len(bdamAtomList)
BFAC = [None]*len(bdamAtomList)
ELEMENT = [None]*len(bdamAtomList)
CHARGE = [None]*len(bdamAtomList)
PD = [None]*len(bdamAtomList)
AVRG_BF = [None]*len(bdamAtomList)
BDAM = [None]*len(bdamAtomList)
# Lists are filled with the relevant values of the properties associated
# with each of the atoms considered for BDamage analysis.
for index, atm in enumerate(bdamAtomList):
REC[index] = atm.lineID
ATMNUM[index] = atm.atomNum
ATMNAME[index] = atm.origAtomType
CONFORMER[index] = atm.conformer
RESNAME[index] = atm.origResiType
CHAIN[index] = atm.origChainID
RESNUM[index] = atm.origResiNum
INSCODE[index] = atm.insCode
XPOS[index] = atm.xyzCoords[0][0]
YPOS[index] = atm.xyzCoords[1][0]
ZPOS[index] = atm.xyzCoords[2][0]
OCC[index] = atm.occupancy
BFAC[index] = atm.bFactor
ELEMENT[index] = atm.element
CHARGE[index] = atm.charge
PD[index] = atm.pd
AVRG_BF[index] = atm.avrg_bf
BDAM[index] = atm.bd
# Generates dictionary of DataFrame columns
df_list_dict = {'REC': REC,
'ATMNUM': ATMNUM,
'ATMNAME': ATMNAME,
'CONFORMER': CONFORMER,
'RESNAME': RESNAME,
'CHAIN': CHAIN,
'RESNUM': RESNUM,
'INSCODE': INSCODE,
'XPOS': XPOS,
'YPOS': YPOS,
'ZPOS': ZPOS,
'OCC': OCC,
'BFAC': BFAC,
'ELEMENT': ELEMENT,
'CHARGE': CHARGE,
'PD': PD,
'AVRG_BF': AVRG_BF,
'BDAM': BDAM}
# Ensures output dataframe is in cif format
df_list_dict_copy = copy.copy(df_list_dict)
for key in df_list_dict_copy:
if set(df_list_dict_copy[key]) == {''}:
df_list_dict[key] = ['?']*len(bdamAtomList)
elif (
len(set(df_list_dict_copy[key])) > 1
and '' in set(df_list_dict_copy[key])
):
cif_list = []
for atm in df_list_dict_copy[key]:
if atm == '':
cif_list.append('.')
else:
cif_list.append(atm)
df_list_dict[key] = cif_list
# Lists are concatenated into the colummns of a DataFrame.
df = pd.DataFrame(df_list_dict)
# DataFrame columns are ordered.
df = df[['REC', 'ATMNUM', 'ATMNAME', 'CONFORMER', 'RESNAME', 'CHAIN',
'RESNUM', 'INSCODE', 'XPOS', 'YPOS', 'ZPOS', 'OCC', 'BFAC',
'ELEMENT', 'CHARGE', 'PD', 'AVRG_BF', 'BDAM']]
return df
|
GarmanGroup/RABDAM
|
rabdam/Subroutines/makeDataFrame.py
|
Python
|
lgpl-3.0
| 8,239
|
[
"PyMOL"
] |
a70725699548c4d6b00db36741b6b8067526b7dc9e1030fcb047baf8f8968339
|
# Copyright (c) 2000-2013 LOGILAB S.A. (Paris, FRANCE).
# http://www.logilab.fr/ -- mailto:[email protected]
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation; either version 2 of the License, or (at your option) any later
# version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""handle diagram generation options for class diagram or default diagrams
"""
from logilab.common.compat import builtins
import astroid
from astroid.utils import LocalsVisitor
from pylint.pyreverse.diagrams import PackageDiagram, ClassDiagram
BUILTINS_NAME = builtins.__name__
# diagram generators ##########################################################
class DiaDefGenerator(object):
"""handle diagram generation options"""
def __init__(self, linker, handler):
"""common Diagram Handler initialization"""
self.config = handler.config
self._set_default_options()
self.linker = linker
self.classdiagram = None # defined by subclasses
def get_title(self, node):
"""get title for objects"""
title = node.name
if self.module_names:
title = '%s.%s' % (node.root().name, title)
return title
def _set_option(self, option):
"""activate some options if not explicitly deactivated"""
# if we have a class diagram, we want more information by default;
# so if the option is None, we return True
if option is None:
if self.config.classes:
return True
else:
return False
return option
def _set_default_options(self):
"""set different default options with _default dictionary"""
self.module_names = self._set_option(self.config.module_names)
all_ancestors = self._set_option(self.config.all_ancestors)
all_associated = self._set_option(self.config.all_associated)
anc_level, ass_level = (0, 0)
if all_ancestors:
anc_level = -1
if all_associated:
ass_level = -1
if self.config.show_ancestors is not None:
anc_level = self.config.show_ancestors
if self.config.show_associated is not None:
ass_level = self.config.show_associated
self.anc_level, self.ass_level = anc_level, ass_level
def _get_levels(self):
"""help function for search levels"""
return self.anc_level, self.ass_level
def show_node(self, node):
"""true if builtins and not show_builtins"""
if self.config.show_builtin:
return True
return node.root().name != BUILTINS_NAME
def add_class(self, node):
"""visit one class and add it to diagram"""
self.linker.visit(node)
self.classdiagram.add_object(self.get_title(node), node)
def get_ancestors(self, node, level):
"""return ancestor nodes of a class node"""
if level == 0:
return
for ancestor in node.ancestors(recurs=False):
if not self.show_node(ancestor):
continue
yield ancestor
def get_associated(self, klass_node, level):
"""return associated nodes of a class node"""
if level == 0:
return
for ass_nodes in list(klass_node.instance_attrs_type.values()) + \
list(klass_node.locals_type.values()):
for ass_node in ass_nodes:
if isinstance(ass_node, astroid.Instance):
ass_node = ass_node._proxied
if not (isinstance(ass_node, astroid.Class)
and self.show_node(ass_node)):
continue
yield ass_node
def extract_classes(self, klass_node, anc_level, ass_level):
"""extract recursively classes related to klass_node"""
if self.classdiagram.has_node(klass_node) or not self.show_node(klass_node):
return
self.add_class(klass_node)
for ancestor in self.get_ancestors(klass_node, anc_level):
self.extract_classes(ancestor, anc_level-1, ass_level)
for ass_node in self.get_associated(klass_node, ass_level):
self.extract_classes(ass_node, anc_level, ass_level-1)
class DefaultDiadefGenerator(LocalsVisitor, DiaDefGenerator):
"""generate minimum diagram definition for the project :
* a package diagram including project's modules
* a class diagram including project's classes
"""
def __init__(self, linker, handler):
DiaDefGenerator.__init__(self, linker, handler)
LocalsVisitor.__init__(self)
def visit_project(self, node):
"""visit an astroid.Project node
create a diagram definition for packages
"""
mode = self.config.mode
if len(node.modules) > 1:
self.pkgdiagram = PackageDiagram('packages %s' % node.name, mode)
else:
self.pkgdiagram = None
self.classdiagram = ClassDiagram('classes %s' % node.name, mode)
def leave_project(self, node): # pylint: disable=unused-argument
"""leave the astroid.Project node
return the generated diagram definition
"""
if self.pkgdiagram:
return self.pkgdiagram, self.classdiagram
return self.classdiagram,
def visit_module(self, node):
"""visit an astroid.Module node
add this class to the package diagram definition
"""
if self.pkgdiagram:
self.linker.visit(node)
self.pkgdiagram.add_object(node.name, node)
def visit_class(self, node):
"""visit an astroid.Class node
add this class to the class diagram definition
"""
anc_level, ass_level = self._get_levels()
self.extract_classes(node, anc_level, ass_level)
def visit_from(self, node):
"""visit astroid.From and catch modules for package diagram
"""
if self.pkgdiagram:
self.pkgdiagram.add_from_depend(node, node.modname)
class ClassDiadefGenerator(DiaDefGenerator):
"""generate a class diagram definition including all classes related to a
given class
"""
def __init__(self, linker, handler):
DiaDefGenerator.__init__(self, linker, handler)
def class_diagram(self, project, klass):
"""return a class diagram definition for the given klass and its
related klasses
"""
self.classdiagram = ClassDiagram(klass, self.config.mode)
if len(project.modules) > 1:
module, klass = klass.rsplit('.', 1)
module = project.get_module(module)
else:
module = project.modules[0]
klass = klass.split('.')[-1]
klass = next(module.ilookup(klass))
anc_level, ass_level = self._get_levels()
self.extract_classes(klass, anc_level, ass_level)
return self.classdiagram
# diagram handler #############################################################
class DiadefsHandler(object):
"""handle diagram definitions :
get it from user (i.e. xml files) or generate them
"""
def __init__(self, config):
self.config = config
def get_diadefs(self, project, linker):
"""get the diagrams configuration data
:param linker: astroid.inspector.Linker(IdGeneratorMixIn, LocalsVisitor)
:param project: astroid.manager.Project
"""
# read and interpret diagram definitions (Diadefs)
diagrams = []
generator = ClassDiadefGenerator(linker, self)
for klass in self.config.classes:
diagrams.append(generator.class_diagram(project, klass))
if not diagrams:
diagrams = DefaultDiadefGenerator(linker, self).visit(project)
for diagram in diagrams:
diagram.extract_relationships()
return diagrams
|
JetChars/vim
|
vim/bundle/python-mode/pymode/libs/pylint/pyreverse/diadefslib.py
|
Python
|
apache-2.0
| 8,381
|
[
"VisIt"
] |
95a148d9b2574e424ceaba6f8ca44eec0d6b4cc1e32585aa965de38209fca26b
|
"""Generate the storage of NARR 3 hourly products"""
import datetime
import sys
import os
import numpy as np
import pygrib
from pyiem.util import ncopen, logger
# This exists on dev laptop :/
TEMPLATE_FN = (
"/mesonet/ARCHIVE/data/1980/01/01/model/NARR/apcp_198001010000.grib"
)
BASEDIR = "/mesonet/data/iemre"
LOG = logger()
def init_year(ts):
"""
Create a new NetCDF file for a year of our specification!
"""
# Load up the example grib file to base our file on
grbs = pygrib.open(TEMPLATE_FN)
grb = grbs[1]
# grid shape is y, x
lats, lons = grb.latlons()
fp = "%s/%s_narr.nc" % (BASEDIR, ts.year)
if os.path.isfile(fp):
LOG.info("Cowardly refusing to overwrite file %s.", fp)
sys.exit()
nc = ncopen(fp, "w")
nc.title = "IEM Packaged NARR for %s" % (ts.year,)
nc.platform = "Grided Reanalysis"
nc.description = "NARR Data"
nc.institution = "Iowa State University, Ames, IA, USA"
nc.source = "Iowa Environmental Mesonet"
nc.project_id = "IEM"
nc.realization = 1
nc.Conventions = "CF-1.0"
nc.contact = "Daryl Herzmann, [email protected], 515-294-5978"
nc.history = ("%s Generated") % (
datetime.datetime.now().strftime("%d %B %Y"),
)
nc.comment = "No Comment at this time"
# Setup Dimensions
nc.createDimension("x", lats.shape[1])
nc.createDimension("y", lats.shape[0])
nc.createDimension("bnds", 2)
ts2 = datetime.datetime(ts.year + 1, 1, 1)
days = (ts2 - ts).days
print("Year %s has %s days" % (ts.year, days))
nc.createDimension("time", int(days) * 8)
# Setup Coordinate Variables
lat = nc.createVariable("lat", float, ("y", "x"))
lat.units = "degrees_north"
lat.long_name = "Latitude"
lat.standard_name = "latitude"
lat.axis = "Y"
lat[:] = lats
lon = nc.createVariable("lon", float, ("y", "x"))
lon.units = "degrees_east"
lon.long_name = "Longitude"
lon.standard_name = "longitude"
lon.axis = "X"
lon[:] = lons
tm = nc.createVariable("time", float, ("time",))
tm.units = "Hours since %s-01-01 00:00:0.0" % (ts.year,)
tm.long_name = "Time"
tm.standard_name = "time"
tm.axis = "T"
tm.calendar = "gregorian"
tm.bounds = "time_bnds"
tm[:] = np.arange(0, int(days) * 8) * 3
tmb = nc.createVariable("time_bnds", "d", ("time", "bnds"))
tmb[:, 0] = np.arange(0, int(days) * 8) * 3 - 1
tmb[:, 1] = np.arange(0, int(days) * 8) * 3
# 3 hour accum, 0 to 655.35
apcp = nc.createVariable(
"apcp", np.ushort, ("time", "y", "x"), fill_value=65535
)
apcp.scale_factor = 0.01
apcp.add_offset = 0.0
apcp.units = "mm"
apcp.long_name = "Precipitation"
apcp.standard_name = "Precipitation"
apcp.coordinates = "lon lat"
apcp.description = "Precipitation accumulation for the previous 3 hours"
nc.close()
if __name__ == "__main__":
init_year(datetime.datetime(int(sys.argv[1]), 1, 1))
|
akrherz/iem
|
scripts/iemre/init_narr.py
|
Python
|
mit
| 2,984
|
[
"NetCDF"
] |
218f4900546c48e96f759d955ebea9940240fda6dcf5b4f5ced6ef64d39e8994
|
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PyGpyopt(PythonPackage):
""" Performs global optimization with different acquisition functions. Among
other functionalities, it is possible to use GPyOpt to optimize physical
experiments (sequentially or in batches) and tune the parameters of Machine
Learning algorithms. It is able to handle large data sets via sparse
Gaussian process models."""
homepage = "http://sheffieldml.github.io/GPyOpt/"
pypi = "GPyOpt/GPyOpt-1.2.6.tar.gz"
version('1.2.6', sha256='e714daa035bb529a6db23c53665a762a4ab3456b9329c19ad3b03983f94c9b2a')
depends_on('py-setuptools', type='build')
depends_on('[email protected]:', type=('build', 'run'))
depends_on('[email protected]:', type=('build', 'run'))
depends_on('[email protected]:', type=('build', 'run'))
depends_on('[email protected]:', type=('test'))
|
LLNL/spack
|
var/spack/repos/builtin/packages/py-gpyopt/package.py
|
Python
|
lgpl-2.1
| 1,089
|
[
"Gaussian"
] |
71d82ead1fd6b3c17d132d3f2798b6d8b8c11970b2d5a72fa5a78c2416c4e676
|
BOARDS = {
'arduino' : {
'digital' : tuple(x for x in range(14)),
'analog' : tuple(x for x in range(6)),
'pwm' : (3, 5, 6, 9, 10, 11),
'use_ports' : True,
'disabled' : (0, 1, 14, 15) # Rx, Tx, Crystal
},
'arduino_mega' : {
'digital' : tuple(x for x in range(54)),
'analog' : tuple(x for x in range(16)),
'pwm' : tuple(x for x in range(2,14)),
'use_ports' : True,
'disabled' : (0, 1, 14, 15) # Rx, Tx, Crystal
}
}
|
hatchetation/pyFirmata
|
pyfirmata/boards.py
|
Python
|
bsd-3-clause
| 508
|
[
"CRYSTAL"
] |
8113b05843a48c71fac02e5a46050bf4daf115bccdd1b62aa513c4dd5070c5da
|
"""
EvMenu
This implements a full menu system for Evennia. It is considerably
more flexible than the older contrib/menusystem.py and also uses
menu plugin modules.
To start the menu, just import the EvMenu class from this module,
```python
from evennia.utils.evmenu import EvMenu
EvMenu(caller, menu_module_path,
startnode="node1",
cmdset_mergetype="Replace", cmdset_priority=1,
allow_quit=True, cmd_on_quit="look")
```
Where `caller` is the Object to use the menu on - it will get a new
cmdset while using the Menu. The menu_module_path is the python path
to a python module containing function defintions. By adjusting the
keyword options of the Menu() initialization call you can start the
menu at different places in the menu definition file, adjust if the
menu command should overload the normal commands or not, etc.
The menu is defined in a module (this can be the same module as the
command definition too) with function defintions:
```python
def node1(caller):
# (this is the start node if called like above)
# code
return text, options
def node_with_other_namen(caller, input_string):
# code
return text, options
```
Where caller is the object using the menu and input_string is the
command entered by the user on the *previous* node (the command
entered to get to this node). The node function code will only be
executed once per node-visit and the system will accept nodes with
both one or two arguments interchangeably.
The return values must be given in the above order, but each can be
returned as None as well. If the options are returned as None, the
menu is immediately exited and the default "look" command is called.
text (str, tuple or None): Text shown at this node. If a tuple, the second
element in the tuple is a help text to display at this node when
the user enters the menu help command there.
options (tuple, dict or None): ( {'key': name, # can also be a list of aliases. A special key is "_default", which
# marks this option as the default fallback when no other
# option matches the user input.
'desc': description, # option description
'goto': nodekey, # node to go to when chosen
'exec': nodekey, # node or callback to trigger as callback when chosen. If a node
# key is given the node will be executed once but its return u
# values are ignored. If a callable is given, it must accept
# one or two args, like any node.
{...}, ...)
If key is not given, the option will automatically be identified by
its number 1..N.
Example:
```python
# in menu_module.py
def node1(caller):
text = ("This is a node text",
"This is help text for this node")
options = ({"key": "testing",
"desc": "Select this to go to node 2",
"goto": "node2",
"exec": "callback1"},
{"desc": "Go to node 3.",
"goto": "node3"})
return text, options
def callback1(caller):
# this is called when choosing the "testing" option in node1
# (before going to node2). It needs not have return values.
caller.msg("Callback called!")
def node2(caller):
text = '''
This is node 2. It only allows you to go back
to the original node1. This extra indent will
be stripped. We don't include a help text.
'''
options = {"goto": "node1"}
return text, options
def node3(caller):
text = "This ends the menu since there are no options."
return text, None
```
When starting this menu with `Menu(caller, "path.to.menu_module")`,
the first node will look something like this:
This is a node text
______________________________________
testing: Select this to go to node 2
2: Go to node 3
Where you can both enter "testing" and "1" to select the first option.
If the client supports MXP, they may also mouse-click on "testing" to
do the same. When making this selection, a function "callback1" in the
same Using `help` will show the help text, otherwise a list of
available commands while in menu mode.
The menu tree is exited either by using the in-menu quit command or by
reaching a node without any options.
For a menu demo, import CmdTestDemo from this module and add it to
your default cmdset. Run it with this module, like `testdemo
evennia.utils.evdemo`.
"""
from textwrap import dedent
from inspect import isfunction, getargspec
from django.conf import settings
from evennia import Command, CmdSet
from evennia.utils.evtable import EvTable
from evennia.utils.ansi import ANSIString, strip_ansi
from evennia.utils.utils import mod_import, make_iter, pad, m_len
from evennia.commands import cmdhandler
# read from protocol NAWS later?
_MAX_TEXT_WIDTH = settings.CLIENT_DEFAULT_WIDTH
# we use cmdhandler instead of evennia.syscmdkeys to
# avoid some cases of loading before evennia init'd
_CMD_NOMATCH = cmdhandler.CMD_NOMATCH
_CMD_NOINPUT = cmdhandler.CMD_NOINPUT
# Return messages
# i18n
from django.utils.translation import ugettext as _
_ERR_NOT_IMPLEMENTED = _("Menu node '{nodename}' is not implemented. Make another choice.")
_ERR_GENERAL = _("Error in menu node '{nodename}'.")
_ERR_NO_OPTION_DESC = _("No description.")
_HELP_FULL = _("Commands: <menu option>, help, quit")
_HELP_NO_QUIT = _("Commands: <menu option>, help")
_HELP_NO_OPTIONS = _("Commands: help, quit")
_HELP_NO_OPTIONS_NO_QUIT = _("Commands: help")
_HELP_NO_OPTION_MATCH = _("Choose an option or try 'help'.")
class EvMenuError(RuntimeError):
"""
Error raised by menu when facing internal errors.
"""
pass
#------------------------------------------------------------
#
# Menu command and command set
#
#------------------------------------------------------------
class CmdEvMenuNode(Command):
"""
Menu options.
"""
key = "look"
aliases = ["l", _CMD_NOMATCH, _CMD_NOINPUT]
locks = "cmd:all()"
help_category = "Menu"
def func(self):
"""
Implement all menu commands.
"""
caller = self.caller
menu = caller.ndb._menutree
if not menu:
err = "Menu object not found as %s.ndb._menutree!" % (caller)
self.caller.msg(err)
raise EvMenuError(err)
# flags and data
raw_string = self.raw_string
cmd = raw_string.strip().lower()
options = menu.options
allow_quit = menu.allow_quit
cmd_on_quit = menu.cmd_on_quit
default = menu.default
print "cmd, options:", cmd, options
if cmd in options:
# this will overload the other commands
# if it has the same name!
goto, callback = options[cmd]
if callback:
menu.callback(callback, raw_string)
if goto:
menu.goto(goto, raw_string)
elif cmd in ("look", "l"):
caller.msg(menu.nodetext)
elif cmd in ("help", "h"):
caller.msg(menu.helptext)
elif allow_quit and cmd in ("quit", "q", "exit"):
menu.close_menu()
if cmd_on_quit is not None:
caller.execute_cmd(cmd_on_quit)
elif default:
goto, callback = default
if callback:
menu.callback(callback, raw_string)
if goto:
menu.goto(goto, raw_string)
else:
caller.msg(_HELP_NO_OPTION_MATCH)
if not (options or default):
# no options - we are at the end of the menu.
menu.close_menu()
if cmd_on_quit is not None:
caller.execute_cmd(cmd_on_quit)
class EvMenuCmdSet(CmdSet):
"""
The Menu cmdset replaces the current cmdset.
"""
key = "menu_cmdset"
priority = 1
mergetype = "Replace"
no_objs = True
no_exits = True
no_channels = False
def at_cmdset_creation(self):
"""
Called when creating the set.
"""
self.add(CmdEvMenuNode())
#------------------------------------------------------------
#
# Menu main class
#
#------------------------------------------------------------
class EvMenu(object):
"""
This object represents an operational menu. It is initialized from
a menufile.py instruction.
"""
def __init__(self, caller, menudata, startnode="start",
cmdset_mergetype="Replace", cmdset_priority=1,
allow_quit=True, cmd_on_quit="look"):
"""
Initialize the menu tree and start the caller onto the first node.
Args:
caller (str): The user of the menu.
menudata (str, module or dict): The full or relative path to the module
holding the menu tree data. All global functions in this module
whose name doesn't start with '_ ' will be parsed as menu nodes.
Also the module itself is accepted as input. Finally, a dictionary
menu tree can be given directly. This must then be a mapping
`{"nodekey":callable,...}` where `callable` must be called as
and return the data expected of a menu node. This allows for
dynamic menu creation.
startnode (str, optional): The starting node name in the menufile.
cmdset_mergetype (str, optional): 'Replace' (default) means the menu
commands will be exclusive - no other normal commands will
be usable while the user is in the menu. 'Union' means the
menu commands will be integrated with the existing commands
(it will merge with `merge_priority`), if so, make sure that
the menu's command names don't collide with existing commands
in an unexpected way. Also the CMD_NOMATCH and CMD_NOINPUT will
be overloaded by the menu cmdset. Other cmdser mergetypes
has little purpose for the menu.
cmdset_priority (int, optional): The merge priority for the
menu command set. The default (1) is usually enough for most
types of menus.
allow_quit (bool, optional): Allow user to use quit or
exit to leave the menu at any point. Recommended during
development!
cmd_on_quit (str or None, optional): When exiting the menu
(either by reaching a node with no options or by using the
in-built quit command (activated with `allow_quit`), this
command string will be executed. Set to None to not call
any command.
Raises:
EvMenuError: If the start/end node is not found in menu tree.
"""
self._caller = caller
self._startnode = startnode
self._menutree = self._parse_menudata(menudata)
if startnode not in self._menutree:
raise EvMenuError("Start node '%s' not in menu tree!" % startnode)
# variables made available to the command
self.allow_quit = allow_quit
self.cmd_on_quit = cmd_on_quit
self.default = None
self.nodetext = None
self.helptext = None
self.options = None
# store ourself on the object
self._caller.ndb._menutree = self
# set up the menu command on the caller
menu_cmdset = EvMenuCmdSet()
menu_cmdset.mergetype = str(cmdset_mergetype).lower().capitalize() or "Replace"
menu_cmdset.priority = int(cmdset_priority)
self._caller.cmdset.add(menu_cmdset)
# start the menu
self.goto(self._startnode, "")
def _parse_menudata(self, menudata):
"""
Parse a menufile for node functions and store in dictionary
map. Alternatively, accept a pre-made mapping dictionary of
node functions.
Args:
menudata (str, module or dict): The python.path to the menufile,
or the python module itself. If a dict, this should be a
mapping nodename:callable, where the callable must match
the criteria for a menu node.
Returns:
menutree (dict): A {nodekey: func}
"""
if isinstance(menudata, dict):
# This is assumed to be a pre-loaded menu tree.
return menudata
else:
# a python path of a module
module = mod_import(menudata)
return dict((key, func) for key, func in module.__dict__.items()
if isfunction(func) and not key.startswith("_"))
def _format_node(self, nodetext, optionlist):
"""
Format the node text + option section
Args:
nodetext (str): The node text
optionlist (list): List of (key, desc) pairs.
Returns:
string (str): The options section, including
all needed spaces.
Notes:
This will adjust the columns of the options, first to use
a maxiumum of 4 rows (expanding in columns), then gradually
growing to make use of the screen space.
"""
#
# handle the node text
#
nodetext = dedent(nodetext).strip()
nodetext_width_max = max(m_len(line) for line in nodetext.split("\n"))
if not optionlist:
# return the node text "naked".
separator1 = "_" * nodetext_width_max + "\n\n" if nodetext_width_max else ""
separator2 = "\n" if nodetext_width_max else "" + "_" * nodetext_width_max
return separator1 + nodetext + separator2
#
# handle the options
#
# column separation distance
colsep = 4
nlist = len(optionlist)
# get the widest option line in the table.
table_width_max = -1
table = []
for key, desc in optionlist:
table_width_max = max(table_width_max,
max(m_len(p) for p in key.split("\n")) +
max(m_len(p) for p in desc.split("\n")) + colsep)
raw_key = strip_ansi(key)
if raw_key != key:
# already decorations in key definition
table.append(ANSIString(" {lc%s{lt%s{le: %s" % (raw_key, key, desc)))
else:
# add a default white color to key
table.append(ANSIString(" {lc%s{lt{w%s{n{le: %s" % (raw_key, raw_key, desc)))
ncols = (_MAX_TEXT_WIDTH // table_width_max) + 1 # number of ncols
nlastcol = nlist % ncols # number of elements left in last row
# get the amount of rows needed (start with 4 rows)
nrows = 4
while nrows * ncols < nlist:
nrows += 1
ncols = nlist // nrows # number of full columns
nlastcol = nlist % nrows # number of elements in last column
# get the final column count
ncols = ncols + 1 if nlastcol > 0 else ncols
if ncols > 1:
# only extend if longer than one column
table.extend([" " for i in xrange(nrows-nlastcol)])
# build the actual table grid
table = [table[icol*nrows:(icol*nrows) + nrows] for icol in xrange(0, ncols)]
# adjust the width of each column
total_width = 0
for icol in xrange(len(table)):
col_width = max(max(m_len(p) for p in part.split("\n")) for part in table[icol]) + colsep
table[icol] = [pad(part, width=col_width + colsep, align="l") for part in table[icol]]
total_width += col_width
# format the table into columns
table = EvTable(table=table, border="none")
# build the page
total_width = max(total_width, nodetext_width_max)
separator1 = "_" * total_width + "\n\n" if nodetext_width_max else ""
separator2 = "\n" + "_" * total_width + "\n\n" if total_width else ""
return separator1 + nodetext + separator2 + unicode(table)
def _execute_node(self, nodename, raw_string):
"""
Execute a node.
Args:
nodename (str): Name of node.
raw_string (str): The raw default string entered on the
previous node (only used if the node accepts it as an
argument)
Returns:
nodetext, options (tuple): The node text (a string or a
tuple and the options tuple, if any.
"""
try:
node = self._menutree[nodename]
except KeyError:
self._caller.msg(_ERR_NOT_IMPLEMENTED.format(nodename=nodename))
raise EvMenuError
try:
# the node should return data as (text, options)
if len(getargspec(node).args) > 1:
# a node accepting raw_string
nodetext, options = node(self._caller, raw_string)
else:
# a normal node, only accepting caller
nodetext, options = node(self._caller)
except KeyError:
self._caller.msg(_ERR_NOT_IMPLEMENTED.format(nodename=nodename))
raise EvMenuError
except Exception:
self._caller.msg(_ERR_GENERAL.format(nodename=nodename))
raise
return nodetext, options
def callback(self, nodename, raw_string):
"""
Run a node as a callback. This makes no use of the return
values from the node.
Args:
nodename (str): Name of node.
raw_string (str): The raw default string entered on the
previous node (only used if the node accepts it as an
argument)
"""
if callable(nodename):
# this is a direct callable - execute it directly
try:
if len(getargspec(nodename).args) > 1:
# callable accepting raw_string
nodename(self._caller, raw_string)
else:
# normal callable, only the caller as arg
nodename(self._caller)
except Exception:
self._caller.msg(_ERR_GENERAL.format(nodename=nodename))
raise
else:
# nodename is a string; lookup as node
try:
# execute the node; we make no use of the return values here.
self._execute_node(nodename, raw_string)
except EvMenuError:
return
def goto(self, nodename, raw_string):
"""
Run a node by name
Args:
nodename (str): Name of node.
raw_string (str): The raw default string entered on the
previous node (only used if the node accepts it as an
argument)
"""
try:
# execute the node, make use of the returns.
nodetext, options = self._execute_node(nodename, raw_string)
except EvMenuError:
return
# validation of the node return values
helptext = ""
if hasattr(nodetext, "__iter__"):
if len(nodetext) > 1:
nodetext, helptext = nodetext[:2]
else:
nodetext = nodetext[0]
nodetext = str(nodetext) or ""
options = [options] if isinstance(options, dict) else options
# this will be displayed in the given order
display_options = []
# this is used for lookup
self.options = {}
self.default = None
if options:
for inum, dic in enumerate(options):
# fix up the option dicts
keys = make_iter(dic.get("key"))
if "_default" in keys:
keys = [key for key in keys if key != "_default"]
desc = dic.get("desc", dic.get("text", _ERR_NO_OPTION_DESC).strip())
goto, execute = dic.get("goto", None), dic.get("exec", None)
self.default = (goto, execute)
else:
keys = list(make_iter(dic.get("key", str(inum+1).strip()))) + [str(inum+1)]
desc = dic.get("desc", dic.get("text", _ERR_NO_OPTION_DESC).strip())
goto, execute = dic.get("goto", None), dic.get("exec", None)
if keys:
display_options.append((keys[0], desc))
for key in keys:
if goto or execute:
self.options[strip_ansi(key).strip().lower()] = (goto, execute)
self.nodetext = self._format_node(nodetext, display_options)
# handle the helptext
if helptext:
self.helptext = helptext
elif options:
self.helptext = _HELP_FULL if self.allow_quit else _HELP_NO_QUIT
else:
self.helptext = _HELP_NO_OPTIONS if self.allow_quit else _HELP_NO_OPTIONS_NO_QUIT
self._caller.execute_cmd("look")
def close_menu(self):
"""
Shutdown menu; occurs when reaching the end node.
"""
self._caller.cmdset.remove(EvMenuCmdSet)
del self._caller.ndb._menutree
# -------------------------------------------------------------------------------------------------
#
# Simple input shortcuts
#
# -------------------------------------------------------------------------------------------------
class CmdGetInput(Command):
"""
Enter your data and press return.
"""
key = _CMD_NOMATCH
aliases = _CMD_NOINPUT
def func(self):
"This is called when user enters anything."
caller = self.caller
callback = caller.ndb._getinputcallback
prompt = caller.ndb._getinputprompt
result = self.raw_string
ok = not callback(caller, prompt, result)
if ok:
# only clear the state if the callback does not return
# anything
del caller.ndb._getinputcallback
del caller.ndb._getinputprompt
caller.cmdset.remove(InputCmdSet)
class InputCmdSet(CmdSet):
"""
This stores the input command
"""
key = "input_cmdset"
priority = 1
mergetype = "Replace"
no_objs = True
no_exits = True
no_channels = False
def at_cmdset_creation(self):
"called once at creation"
self.add(CmdGetInput())
def get_input(caller, prompt, callback):
"""
This is a helper function for easily request input from
the caller.
Args:
caller (Player or Object): The entity being asked
the question. This should usually be an object
controlled by a user.
prompt (str): This text will be shown to the user,
in order to let them know their input is needed.
callback (callable): A function that will be called
when the user enters a reply. It must take three
arguments: the `caller`, the `prompt` text and the
`result` of the input given by the user. If the
callback doesn't return anything or return False,
the input prompt will be cleaned up and exited. If
returning True, the prompt will remain and continue to
accept input.
Raises:
RuntimeError: If the given callback is not callable.
"""
if not callable(callback):
raise RuntimeError("get_input: input callback is not callable.")
caller.ndb._getinputcallback = callback
caller.ndb._getinputprompt = prompt
caller.cmdset.add(InputCmdSet)
caller.msg(prompt)
#------------------------------------------------------------
#
# test menu strucure and testing command
#
#------------------------------------------------------------
def test_start_node(caller):
text = """
This is an example menu.
If you enter anything except the valid options, your input will be
recorded and you will be brought to a menu entry showing your
input.
Select options or use 'quit' to exit the menu.
"""
options = ({"key": ("{yS{net", "s"),
"desc": "Set an attribute on yourself.",
"exec": lambda caller: caller.attributes.add("menuattrtest", "Test value"),
"goto": "test_set_node"},
{"key": ("{yV{niew", "v"),
"desc": "View your own name",
"goto": "test_view_node"},
{"key": ("{yQ{nuit", "quit", "q", "Q"),
"desc": "Quit this menu example.",
"goto": "test_end_node"},
{"key": "_default",
"goto": "test_displayinput_node"})
return text, options
def test_set_node(caller):
text = ("""
The attribute 'menuattrtest' was set to
{w%s{n
(check it with examine after quitting the menu).
This node's has only one option, and one of its key aliases is the
string "_default", meaning it will catch any input, in this case
to return to the main menu. So you can e.g. press <return> to go
back now.
""" % caller.db.menuattrtest,
# optional help text for this node
"""
This is the help entry for this node. It is created by returning
the node text as a tuple - the second string in that tuple will be
used as the help text.
""")
options = {"key": ("back (default)", "_default"),
"desc": "back to main",
"goto": "test_start_node"}
return text, options
def test_view_node(caller):
text = """
Your name is {g%s{n!
click {lclook{lthere{le to trigger a look command under MXP.
This node's option has no explicit key (nor the "_default" key
set), and so gets assigned a number automatically. You can infact
-always- use numbers (1...N) to refer to listed options also if you
don't see a string option key (try it!).
""" % caller.key
options = {"desc": "back to main",
"goto": "test_start_node"}
return text, options
def test_displayinput_node(caller, raw_string):
text = """
You entered the text:
"{w%s{n"
... which could now be handled or stored here in some way if this
was not just an example.
This node has an option with a single alias "_default", which
makes it hidden from view. It catches all input (except the
in-menu help/quit commands) and will, in this case, bring you back
to the start node.
""" % raw_string
options = {"key": "_default",
"goto": "test_start_node"}
return text, options
def test_end_node(caller):
text = """
This is the end of the menu and since it has no options the menu
will exit here, followed by a call of the "look" command.
"""
return text, None
class CmdTestMenu(Command):
"""
Test menu
Usage:
testmenu <menumodule>
Starts a demo menu from a menu node definition module.
"""
key = "testmenu"
def func(self):
if not self.args:
self.caller.msg("Usage: testmenu menumodule")
return
# start menu
EvMenu(self.caller, self.args.strip(), startnode="test_start_node", cmdset_mergetype="Replace")
|
emergebtc/evennia
|
evennia/utils/evmenu.py
|
Python
|
bsd-3-clause
| 27,667
|
[
"VisIt"
] |
7ddfc01dfa0f127601d62b02790912557e1a7f7b4b665948ad6aebca3651886a
|
"""
Copyright (c) 2014 Brian Muller
Copyright (c) 2015 OpenBazaar
"""
import time
import sqlite3 as lite
from collections import OrderedDict, MutableMapping
from zope.interface import implements, Interface
from protos.objects import Value
from threading import RLock
class IStorage(Interface):
"""
Local storage for this node.
"""
def __setitem__(self, key, value):
"""
Set a key to the given value.
"""
def __getitem__(self, key):
"""
Get the given key. If item doesn't exist, raises C{KeyError}
"""
def get(self, key, default=None):
"""
Get given key. If not found, return default.
"""
def getSpecific(self, keyword, key):
"""
Return the exact value for a given keyword and key.
"""
def cull(self):
"""
Iterate over all keys and remove expired items
"""
def delete(self, keyword, key):
"""
Delete the value stored at keyword/key.
"""
def iterkeys(self):
"""
Get the key iterator for this storage, should yield a list of keys
"""
def iteritems(self, keyword):
"""
Get the value iterator for the given keyword, should yield a tuple of (key, value)
"""
def get_ttl(self, keyword, key):
"""
Get the remaining time for a given key.
"""
class ForgetfulStorage(object):
implements(IStorage)
def __init__(self, ttl=604800):
"""
By default, max age is a week.
"""
self.data = OrderedDict()
self.ttl = ttl
def __setitem__(self, keyword, values):
valueDic = TTLDict(self.ttl)
if keyword in self.data:
valueDic = self.data[keyword]
if values[0] not in valueDic:
valueDic[values[0]] = values[1]
else:
valueDic[values[0]] = values[1]
self.data[keyword] = valueDic
self.cull()
def cull(self):
for key in self.data.iterkeys():
self.data[key].cull()
if len(self.data[key]) == 0:
del self.data[key]
def get(self, keyword, default=None):
self.cull()
if keyword in self.data:
ret = []
for k, v in self[keyword].items():
value = Value()
value.valueKey = k
value.serializedData = v
ret.append(value.SerializeToString())
return ret
return default
def getSpecific(self, keyword, key):
self.cull()
if keyword in self.data and key in self.data[keyword]:
return self.data[keyword][key]
def delete(self, keyword, key):
del self.data[keyword][key]
self.cull()
def __getitem__(self, keyword):
self.cull()
return self.data[keyword]
def __iter__(self):
self.cull()
return iter(self.data)
def __repr__(self):
self.cull()
return repr(self.data)
def iterkeys(self):
self.cull()
return self.data.iterkeys()
def iteritems(self, keyword):
self.cull()
return self.data[keyword].iteritems()
def get_ttl(self, keyword, key):
if keyword in self.data and key in self.data[keyword]:
return self.data[keyword].get_ttl(key)
class PersistentStorage(object):
implements(IStorage)
def __init__(self, filename, ttl=604800):
self.ttl = ttl
self.db = lite.connect(filename)
self.db.text_factory = str
try:
cursor = self.db.cursor()
cursor.execute('''CREATE TABLE data(keyword BLOB, id BLOB, value BLOB, birthday FLOAT)''')
cursor.execute('''CREATE INDEX idx1 ON data(keyword);CREATE INDEX idx2 ON data(birthday);''')
self.db.commit()
except Exception:
self.cull()
def __setitem__(self, keyword, values):
cursor = self.db.cursor()
cursor.execute('''SELECT id, value FROM data WHERE keyword=? AND id=? AND value=?''',
(keyword, values[0], values[1]))
if cursor.fetchone() is None:
cursor.execute('''INSERT OR IGNORE INTO data(keyword, id, value, birthday)
VALUES (?,?,?,?)''', (keyword, values[0], values[1], time.time()))
self.db.commit()
self.cull()
def __getitem__(self, keyword):
self.cull()
cursor = self.db.cursor()
cursor.execute('''SELECT id, value FROM data WHERE keyword=?''', (keyword,))
return cursor.fetchall()
def get(self, keyword, default=None):
self.cull()
if len(self[keyword]) > 0:
ret = []
for k, v in self[keyword]:
value = Value()
value.valueKey = k
value.serializedData = v
ret.append(value.SerializeToString())
return ret
return default
def getSpecific(self, keyword, key):
try:
cursor = self.db.cursor()
cursor.execute('''SELECT value FROM data WHERE keyword=? AND id=?''', (keyword, key))
return cursor.fetchone()[0]
except Exception:
return None
def cull(self):
expiration = time.time() - self.ttl
cursor = self.db.cursor()
cursor.execute('''DELETE FROM data WHERE birthday < ?''', (expiration,))
self.db.commit()
def delete(self, keyword, key):
try:
cursor = self.db.cursor()
cursor.execute('''DELETE FROM data WHERE keyword=? AND id=?''', (keyword, key))
self.db.commit()
except Exception:
pass
self.cull()
def iterkeys(self):
self.cull()
try:
cursor = self.db.cursor()
cursor.execute('''SELECT keyword FROM data''')
keywords = cursor.fetchall()
keyword_list = []
for k in keywords:
if k[0] not in keyword_list:
keyword_list.append(k[0])
return keyword_list.__iter__()
except Exception:
return None
def iteritems(self, keyword):
self.cull()
try:
cursor = self.db.cursor()
cursor.execute('''SELECT id, value FROM data WHERE keyword=?''', (keyword,))
return cursor.fetchall().__iter__()
except Exception:
return None
def get_ttl(self, keyword, key):
cursor = self.db.cursor()
cursor.execute('''SELECT birthday FROM data WHERE keyword=? AND id=?''', (keyword, key,))
return self.ttl - (time.time() - cursor.fetchall()[0][0])
class TTLDict(MutableMapping):
"""
Dictionary with TTL
Extra args and kwargs are passed to initial .update() call
"""
def __init__(self, default_ttl, *args, **kwargs):
self._default_ttl = default_ttl
self._values = {}
self._lock = RLock()
self.update(*args, **kwargs)
def __repr__(self):
return '<TTLDict@%#08x; ttl=%r, v=%r;>' % (id(self), self._default_ttl, self._values)
def set_ttl(self, key, ttl, now=None):
""" Set TTL for the given key """
if now is None:
now = time.time()
with self._lock:
# pylint: disable=unused-variable
_expire, value = self._values[key]
self._values[key] = (now + ttl, value)
def get_ttl(self, key, now=None):
""" Return remaining TTL for a key """
if now is None:
now = time.time()
with self._lock:
# pylint: disable=unused-variable
expire, _value = self._values[key]
return expire - now
def expire_at(self, key, timestamp):
""" Set the key expire timestamp """
with self._lock:
# pylint: disable=unused-variable
_expire, value = self._values[key]
self._values[key] = (timestamp, value)
def is_expired(self, key, now=None, remove=False):
""" Check if key has expired """
with self._lock:
if now is None:
now = time.time()
# pylint: disable=unused-variable
expire, _value = self._values[key]
if expire is None:
return False
expired = expire < now
if expired and remove:
self.__delitem__(key)
return expired
def __len__(self):
with self._lock:
for key in self._values.keys():
self.is_expired(key, remove=True)
return len(self._values)
def __iter__(self):
with self._lock:
for key in self._values.keys():
if not self.is_expired(key, remove=True):
yield key
def __setitem__(self, key, value):
with self._lock:
if self._default_ttl is None:
expire = None
else:
expire = time.time() + self._default_ttl
self._values[key] = (expire, value)
def __delitem__(self, key):
with self._lock:
del self._values[key]
def __getitem__(self, key):
with self._lock:
self.is_expired(key, remove=True)
return self._values[key][1]
def cull(self):
with self._lock:
for key in self._values.keys():
self.is_expired(key, remove=True)
|
the9ull/OpenBazaar-Server
|
dht/storage.py
|
Python
|
mit
| 9,488
|
[
"Brian"
] |
698a1679dcc112c5f3f5bd539864f54876e3fd6020dd330ade7708f0045f1397
|
"""
Push test that pushes actual data to widgets set up on a geckoboard dashboard.
gecko_settings.json should contain the widget keys and api keys for corresponding
widgets.
"""
import geckopush
from tests import pull_keys
WIDGET_KEYS = pull_keys.get_keys()
API_KEY = WIDGET_KEYS['api_key']
d = geckopush.Dashboard(API_KEY)
def test_bar_chart():
bar_widget_key = WIDGET_KEYS['bar_widget_key']
bar = geckopush.BarChart(dashboard=d, widget_key=bar_widget_key, data=[1,2,3,4,5,6,7,8,9,10])
bar.x_axis_labels = ["one", "two", "three", "four", "five", "six", "seven", "eight", "nine", "ten"]
bar.x_axis_type = "standard"
bar.y_axis_format = "decimal"
bar.y_axis_unit = "USD"
ret = bar.push()
print(bar.get_payload())
print(ret)
if ret:
return True
else:
return False
def test_bullet_graph():
bullet_widget_key = WIDGET_KEYS['bullet_widget_key']
bullet = geckopush.BulletGraph(dashboard=d,
widget_key=bullet_widget_key,
label='Test Bullet Graph',
axis=["0", "200", "400", "600", "800", "1000"],
comparative="200",
measure_start="0",
measure_end="500",
red_start=0,
red_end=100,
amber_start=101,
amber_end=600,
green_start=601,
green_end=1000,
sublabel="A test Bullet graph",
projected_start='100',
projected_end='900',
)
bullet.add_data(
label='Second Bullet Graph',
axis=["0", "200", "400", "600", "800", "1000"],
comparative="100",
measure_start="0",
measure_end="800",
red_start=0,
red_end=200,
amber_start=201,
amber_end=300,
green_start=301,
green_end=1000,
sublabel="womp womp womp",
projected_start='600',
projected_end='900'
)
ret = bullet.push()
print(bullet.get_payload())
if ret:
return True
else:
return False
def test_funnel():
funnel_widget_key = WIDGET_KEYS['funnel_widget_key']
fun = geckopush.Funnel(dashboard=d, widget_key=funnel_widget_key)
fun.add_data(100, "one hundred")
fun.add_data(200, "two hundred")
fun.add_data(300, "three hundred")
fun.add_data(400, "four hundred")
fun.add_data(500, "five hundred")
fun.add_data(600, "six hundred")
fun.add_data(700, "seven hundred")
fun.add_data(800, "eight hundred")
ret = fun.push()
print(fun.get_payload())
if ret:
return True
else:
return False
def test_geckometer():
geckometer_widget_key = WIDGET_KEYS['geckometer_widget_key']
gm = geckopush.GeckoMeter(dashboard=d, widget_key=geckometer_widget_key,
item=26, min_value=0, max_value=50)
ret = gm.push()
print(gm.get_payload())
if ret:
return True
else:
return False
def test_highchart():
highchart_widget_key = WIDGET_KEYS["highchart_widget_key"]
highchart_str = "{chart:{style: {color: \"#b9bbbb\"},renderTo:\"container\",backgroundColor:\"transparent\",lineColor:\"rgba(35,37,38,100)\",plotShadow: false,},credits:{enabled:false},title:{style: {color: \"#b9bbbb\"},text:\"Monthly Average Temperature\"},xAxis:{categories:[\"Jan\",\"Feb\",\"Mar\",\"Apr\",\"May\",\"Jun\",\"Jul\",\"Aug\",\"Sep\",\"Oct\",\"Nov\",\"Dec\"]},yAxis:{title:{style: {color: \"#b9bbbb\"}, text:\"Temperature\"}},legend:{itemStyle: {color: \"#b9bbbb\"}, layout:\"vertical\",align:\"right\",verticalAlign:\"middle\",borderWidth:0},series:[{color:\"#108ec5\",name:\"NewYork\",data:[17.0,22.0,24.8,24.1,20.1,14.1,8.6,2.5]},{color:\"#52b238\",name:\"Berlin\",data:[13.5,17.0,18.6,17.9,14.3,9.0,3.9,1.0]},{color:\"#ee5728\",name:\"London\",data:[11.9,15.2,17.0,16.6,14.2,10.3,6.6,4.8]}]}"
hc = geckopush.HighCharts(dashboard=d,
widget_key=highchart_widget_key,
highchart=highchart_str)
ret = hc.push()
print(hc.get_payload())
if ret:
return True
else:
return False
def test_leaderboard():
leaderboard_widget_key = WIDGET_KEYS["leaderboard_widget_key"]
lb = geckopush.Leaderboard(dashboard=d,
widget_key=leaderboard_widget_key)
lb.add_data("Jack", 100, 200)
lb.add_data("Bob", 50, 50)
lb.add_data("Renaldo", 100, 20)
lb.add_data("Barney", 0, 0)
lb.add_data("Farnsworth", 96, 4)
ret = lb.push()
print(lb.get_payload())
if ret:
return True
else:
return False
def test_line_chart_datetime():
linechart_widget_key = WIDGET_KEYS["linechart_widget_key"]
lc = geckopush.LineChart(dashboard=d,
widget_key=linechart_widget_key)
lc.add_data(name="One", data=[400, 500, 900, 900, 1000])
lc.add_data(name="Two", data=[1000, 900, 800, 200, 100])
lc.add(x_axis_labels=["2015-10-01", "2015-10-02", "2015-10-03", "2015-10-04", "2015-10-06"])
lc.add(x_axis_type="datetime")
lc.add(y_axis_format="currency")
lc.add(y_axis_unit="USD")
ret = lc.push()
print(lc.get_payload())
if ret:
return True
else:
return False
def test_List():
lst_widget_key = WIDGET_KEYS["list_widget_key"]
lt = geckopush.List(dashboard=d,
widget_key=lst_widget_key)
lt.add_data(text="12345", name="numbers",
color="#ff2015", description="These are numbers")
lt.add_data(text="abcde", name="letters", color= "#ffffff", description="These are letters")
ret = lt.push()
print(lt.get_payload())
if ret:
return True
else:
return False
def test_map():
map_widget_key = WIDGET_KEYS["map_widget_key"]
mp = geckopush.Map(dashboard=d, widget_key=map_widget_key)
mp.add_data(city_name="New York", country_code="US", size="10")
mp.add_data(host="google.com")
mp.add_data(ip="46.228.47.115")
mp.add_data(latitude=22.434355, longitude=11.12345, size=5, color="#ffffff")
ret = mp.push()
print(mp.get_payload())
if ret:
return True
else:
return False
def test_monitoring():
monitoring_widget_key = WIDGET_KEYS["monitoring_widget_key"]
mo = geckopush.Monitoring(dashboard=d, widget_key=monitoring_widget_key)
mo.add_data(status="up", downtime="Never", responsetime= "123 ms")
ret = mo.push()
print(mo.get_payload())
if ret:
return True
else:
return False
def test_pie_chart():
piechart_widget_key = WIDGET_KEYS["pie_chart_widget_key"]
pi = geckopush.PieChart(dashboard=d, widget_key=piechart_widget_key)
pi.add_data(100, "Slice 1", "13699c")
pi.add_data(200, "Slice 2", "198acd")
ret = pi.push()
print(pi.get_payload())
if ret:
return True
else:
return False
def test_number_and_secondary_stat_1():
widget_key = WIDGET_KEYS["number_and_secondary_stat_widget_key_1"]
ns = geckopush.NumberAndSecondaryStat(dashboard=d, widget_key=widget_key)
ns.add_data(primary_value=15, secondary_value=25)
ret = ns.push()
print(ns.get_payload())
if ret:
return True
else:
return False
def test_number_and_secondary_stat_2():
widget_key = WIDGET_KEYS["number_and_secondary_stat_widget_key_2"]
ns = geckopush.NumberAndSecondaryStat(dashboard=d, widget_key=widget_key)
ns.add_data(primary_value=15, text="Hola Amigo")
ret = ns.push()
print(ns.get_payload())
if ret:
return True
else:
return False
def test_number_and_secondary_stat_3():
widget_key = WIDGET_KEYS["number_and_secondary_stat_widget_key_2"]
ns = geckopush.NumberAndSecondaryStat(dashboard=d, widget_key=widget_key)
ns.add_data(primary_value=15, secondary_value=[12345, 12345, 15555, 12345, 12322])
ret = ns.push()
print(ns.get_payload())
if ret:
return True
else:
return False
def test_rag_numbers():
widget_key = WIDGET_KEYS["RAG_numbers_widget_key"]
rg = geckopush.RAG(dashboard=d, widget_key=widget_key)
rg.add_data(text="One", value=50, prefix="$", color="green")
rg.add_data(text="Two", value=100, prefix="$", color="amber")
rg.add_data(text="Three", value=150, prefix="$", color="red")
ret = rg.push()
print(rg.get_payload())
if ret:
return True
else:
return False
def test_rag_columns():
widget_key = WIDGET_KEYS["RAG_columns_widget_key"]
rg = geckopush.RAG(dashboard=d, widget_key=widget_key)
rg.add_data(text="One", value=50, prefix="$", color="green")
rg.add_data(text="Two", value=100, prefix="$", color="amber")
rg.add_data(text="Three", value=150, prefix="$", color="red")
ret = rg.push()
print(rg.get_payload())
if ret:
return True
else:
return False
def test_text():
widget_key = WIDGET_KEYS["text_widget_key"]
rg = geckopush.Text(dashboard=d, widget_key=widget_key)
rg.add_data(text="Hello There My Friend", type=0)
rg.add_data(text="How are you doing?", type=1)
ret = rg.push()
print(rg.get_payload())
if ret:
return True
else:
return False
if __name__ == '__main__':
tests = [
test_bar_chart,
test_funnel,
test_bullet_graph,
test_geckometer,
test_highchart,
test_leaderboard,
test_line_chart_datetime,
test_List,
test_map,
test_monitoring,
test_pie_chart,
test_number_and_secondary_stat_1,
test_number_and_secondary_stat_2,
test_number_and_secondary_stat_3,
test_rag_numbers,
test_rag_columns,
test_text
]
successful = []
failed = []
for i, test in enumerate(tests):
result = test()
if result:
successful.append(test)
print("{} test successful".format(test.__name__))
else:
failed.append(test)
print("{} test failed".format(test.__name__))
print("")
if len(failed) > 0:
print("{}/{} tests failed".format(len(failed),len(tests)))
else:
print("ALL TESTS SUCCESSFUL")
|
patleeman/geckopush
|
tests/push_test.py
|
Python
|
mit
| 10,551
|
[
"Amber"
] |
44551939103cbd149f81a262fb765b44075a09959e4779b8f6cbcc859badd749
|
#!/usr/bin/env python
import pylab as pyl
from mpl_toolkits.axes_grid1 import AxesGrid
import cPickle as pickle
from colsort import colsort
def plot_uvj_vs_icd():
galaxies = pickle.load(open('galaxies.pickle','rb'))
galaxies = filter(lambda galaxy: galaxy.ICD_IH != None, galaxies)
galaxies = filter(lambda galaxy: galaxy.sersic != None and \
galaxy.ston_I > 30, galaxies)
#Upper and Lower limit arrow verts
arrow_left = [[0,0],[-1,1],[0,0],[-2,0],[0,0],[-1,-1],[0,0]]
F = pyl.figure(1,figsize=(8,3))
grid = AxesGrid(F, 111,
nrows_ncols=(1,4),
axes_pad = 0.1,
add_all=True,
aspect=False,
share_all = True)
ax1 = grid[0]
ax2 = grid[1]
ax3 = grid[2]
ax4 = grid[3]
for galaxy in galaxies:
if galaxy.sersic < 1.:
col1 =ax1.scatter(galaxy.ICD_IH * 100., pyl.log10(galaxy.ssfr),
s=25, c='0.8', edgecolor='0.8')
if 1. < galaxy.sersic < 2.:
col2 =ax2.scatter(galaxy.ICD_IH * 100., pyl.log10(galaxy.ssfr),
s=25, c='0.8', edgecolor='0.8')
if 2. < galaxy.sersic < 3.:
col3 =ax3.scatter(galaxy.ICD_IH * 100., pyl.log10(galaxy.ssfr),
s=25, c='0.8', edgecolor='0.8')
if 3. < galaxy.sersic:
if galaxy.ICD_IH*100 < 50:
col4 =ax4.scatter(galaxy.ICD_IH * 100., pyl.log10(galaxy.ssfr),
s=25, c='0.8', edgecolor='0.8')
else:
col4 = ax4.scatter(50, pyl.log10(galaxy.ssfr), marker=None,
s=100, verts=arrow_left)
ax1.set_ylabel('Log sSFR')
ax1.set_title('n < 1')
ax2.set_title('1 < n < 2')
ax3.set_title('2 < n < 3')
ax4.set_title('3 < n')
pyl.figtext(.5, .05, r'$\xi[i_{775},H_{160}]$ (%)',fontsize=18,
horizontalalignment='center')
ax1.set_xlim(-5,45)
ax2.set_xlim(-5,45)
ax3.set_xlim(-5,45)
ax4.set_xlim(-5,45)
ax1.set_ylim(-11, -6)
ax2.set_ylim(-11, -6)
ax3.set_ylim(-11, -6)
ax4.set_ylim(-11, -6)
import matplotlib.font_manager
# line1 = pyl.Line2D([], [], marker='o', mfc='0.8', mec='0.8', markersize=8,
# linewidth=0)
# line2 = pyl.Line2D([], [], marker='s', mec='blue', mfc='None',
# markersize=10, linewidth=0, markeredgewidth=2)
# line3 = pyl.Line2D([], [], color='r', linewidth=2)
# prop = matplotlib.font_manager.FontProperties(size='small')
# ax3.legend((line1, line2, line3), ('Data', 'Quartiles',
# 'Medians'), 'upper center', prop=prop, ncol=1)
pyl.tight_layout()
pyl.subplots_adjust(bottom=0.21, left=0.11)
pyl.show()
if __name__ =='__main__':
plot_uvj_vs_icd()
|
boada/ICD
|
sandbox/legacy_plot_code/plot_icd_n_ssfr.py
|
Python
|
mit
| 2,741
|
[
"Galaxy"
] |
8434857d6f7db27467d42e1045f02be414e90b2c586fa01feb401018cbd6f5af
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
This module provides utility classes for string operations.
"""
import re
from fractions import Fraction
from monty.dev import deprecated
SUBSCRIPT_UNICODE = {
"0": "₀",
"1": "₁",
"2": "₂",
"3": "₃",
"4": "₄",
"5": "₅",
"6": "₆",
"7": "₇",
"8": "₈",
"9": "₉",
}
SUPERSCRIPT_UNICODE = {
"0": "⁰",
"1": "¹",
"2": "²",
"3": "³",
"4": "⁴",
"5": "⁵",
"6": "⁶",
"7": "⁷",
"8": "⁸",
"9": "⁹",
"+": "⁺",
"-": "⁻",
}
class Stringify:
"""
Mix-in class for string formatting, e.g. superscripting numbers and symbols or superscripting.
"""
STRING_MODE = "SUBSCRIPT"
def to_pretty_string(self) -> str:
"""
:return: A pretty string representation. By default, the __str__ output is used, but this method can be
overridden if a different representation from default is desired.
"""
return self.__str__()
def to_latex_string(self) -> str:
"""
Generates a LaTeX formatted string. The mode is set by the class variable STRING_MODE, which defaults to
"SUBSCRIPT". E.g., Fe2O3 is transformed to Fe$_{2}$O$_{3}$. Setting STRING_MODE to "SUPERSCRIPT" creates
superscript, e.g., Fe2+ becomes Fe^{2+}. The initial string is obtained from the class's __str__ method.
:return: String for display as in LaTeX with proper superscripts and subscripts.
"""
str_ = self.to_pretty_string()
# First we process strings that already have _ and ^ by escaping the relevant parts.
str_ = re.sub(r"_(\d+)", r"$_{\1}$", str_)
str_ = re.sub(r"\^([\d\+\-]+)", r"$^{\1}$", str_)
if self.STRING_MODE == "SUBSCRIPT":
return re.sub(r"([A-Za-z\(\)])([\d\+\-\.]+)", r"\1$_{\2}$", str_)
if self.STRING_MODE == "SUPERSCRIPT":
return re.sub(r"([A-Za-z\(\)])([\d\+\-\.]+)", r"\1$^{\2}$", str_)
return str_
def to_html_string(self) -> str:
"""
Generates a HTML formatted string. This uses the output from to_latex_string to generate a HTML output.
:return: HTML formatted string.
"""
str_ = re.sub(r"\$_\{([^}]+)\}\$", r"<sub>\1</sub>", self.to_latex_string())
str_ = re.sub(r"\$\^\{([^}]+)\}\$", r"<sup>\1</sup>", str_)
return re.sub(r"\$\\overline\{([^}]+)\}\$", r'<span style="text-decoration:overline">\1</span>', str_)
def to_unicode_string(self):
"""
:return: Unicode string with proper sub and superscripts. Note that this works only with systems where the sub
and superscripts are pure integers.
"""
str_ = self.to_latex_string()
for m in re.finditer(r"\$_\{(\d+)\}\$", str_):
s1 = m.group()
s2 = [SUBSCRIPT_UNICODE[s] for s in m.group(1)]
str_ = str_.replace(s1, "".join(s2))
for m in re.finditer(r"\$\^\{([\d\+\-]+)\}\$", str_):
s1 = m.group()
s2 = [SUPERSCRIPT_UNICODE[s] for s in m.group(1)]
str_ = str_.replace(s1, "".join(s2))
return str_
def str_delimited(results, header=None, delimiter="\t"):
"""
Given a tuple of tuples, generate a delimited string form.
>>> results = [["a","b","c"],["d","e","f"],[1,2,3]]
>>> print(str_delimited(results,delimiter=","))
a,b,c
d,e,f
1,2,3
Args:
result: 2d sequence of arbitrary types.
header: optional header
Returns:
Aligned string output in a table-like format.
"""
returnstr = ""
if header is not None:
returnstr += delimiter.join(header) + "\n"
return returnstr + "\n".join([delimiter.join([str(m) for m in result]) for result in results])
def formula_double_format(afloat, ignore_ones=True, tol=1e-8):
"""
This function is used to make pretty formulas by formatting the amounts.
Instead of Li1.0 Fe1.0 P1.0 O4.0, you get LiFePO4.
Args:
afloat (float): a float
ignore_ones (bool): if true, floats of 1 are ignored.
tol (float): Tolerance to round to nearest int. i.e. 2.0000000001 -> 2
Returns:
A string representation of the float for formulas.
"""
if ignore_ones and afloat == 1:
return ""
if abs(afloat - int(afloat)) < tol:
return str(int(afloat))
return str(round(afloat, 8))
@deprecated(
message="These methods have been deprecated in favor of using the Stringify mix-in class, which provides "
"to_latex_string, to_unicode_string, etc. They will be removed in v2022."
)
def latexify(formula):
"""
Generates a LaTeX formatted formula. E.g., Fe2O3 is transformed to
Fe$_{2}$O$_{3}$.
Args:
formula (str): Input formula.
Returns:
Formula suitable for display as in LaTeX with proper subscripts.
"""
return re.sub(r"([A-Za-z\(\)])([\d\.]+)", r"\1$_{\2}$", formula)
@deprecated(
message="These methods have been deprecated in favor of using the Stringify mix-in class, which provides "
"to_latex_string, to_unicode_string, etc. They will be removed in v2022."
)
def htmlify(formula):
"""
Generates a HTML formatted formula, e.g. Fe2O3 is transformed to
Fe<sub>2</sub>O</sub>3</sub>
:param formula:
:return:
"""
return re.sub(r"([A-Za-z\(\)])([\d\.]+)", r"\1<sub>\2</sub>", formula)
@deprecated(
message="These methods have been deprecated in favor of using the Stringify mix-in class, which provides "
"to_latex_string, to_unicode_string, etc. They will be removed in v2022."
)
def unicodeify(formula):
"""
Generates a formula with unicode subscripts, e.g. Fe2O3 is transformed
to Fe₂O₃. Does not support formulae with decimal points.
:param formula:
:return:
"""
if "." in formula:
raise ValueError("No unicode character exists for subscript period.")
for original_subscript, subscript_unicode in SUBSCRIPT_UNICODE.items():
formula = formula.replace(str(original_subscript), subscript_unicode)
return formula
@deprecated(
message="These methods have been deprecated in favor of using the Stringify mix-in class, which provides "
"to_latex_string, to_unicode_string, etc. They will be removed in v2022."
)
def latexify_spacegroup(spacegroup_symbol):
r"""
Generates a latex formatted spacegroup. E.g., P2_1/c is converted to
P2$_{1}$/c and P-1 is converted to P$\\overline{1}$.
Args:
spacegroup_symbol (str): A spacegroup symbol
Returns:
A latex formatted spacegroup with proper subscripts and overlines.
"""
sym = re.sub(r"_(\d+)", r"$_{\1}$", spacegroup_symbol)
return re.sub(r"-(\d)", r"$\\overline{\1}$", sym)
@deprecated(
message="These methods have been deprecated in favor of using the Stringify mix-in class, which provides "
"to_latex_string, to_unicode_string, etc. They will be removed in v2022."
)
def unicodeify_spacegroup(spacegroup_symbol):
r"""
Generates a unicode formatted spacegroup. E.g., P2$_{1}$/c is converted to
P2₁/c and P$\\overline{1}$ is converted to P̅1.
Args:
spacegroup_symbol (str): A spacegroup symbol as LaTeX
Returns:
A unicode spacegroup with proper subscripts and overlines.
"""
if not spacegroup_symbol:
return ""
symbol = latexify_spacegroup(spacegroup_symbol)
for number, unicode_number in SUBSCRIPT_UNICODE.items():
symbol = symbol.replace("$_{" + str(number) + "}$", unicode_number)
symbol = symbol.replace("_" + str(number), unicode_number)
overline = "\u0305" # u"\u0304" (macron) is also an option
symbol = symbol.replace("$\\overline{", "")
symbol = symbol.replace("$", "")
symbol = symbol.replace("{", "")
# overline unicode symbol comes after the character with the overline
symbol = symbol.replace("}", overline)
return symbol
@deprecated(
message="These methods have been deprecated in favor of using the Stringify mix-in class, which provides "
"to_latex_string, to_unicode_string, etc. They will be removed in v2022."
)
def unicodeify_species(specie_string):
r"""
Generates a unicode formatted species string, with appropriate
superscripts for oxidation states.
Args:
specie_string (str): Species string, e.g. O2-
Returns:
Species string, e.g. O²⁻
"""
if not specie_string:
return ""
for character, unicode_character in SUPERSCRIPT_UNICODE.items():
specie_string = specie_string.replace(character, unicode_character)
return specie_string
def stream_has_colours(stream):
"""
True if stream supports colours. Python cookbook, #475186
"""
if not hasattr(stream, "isatty"):
return False
if not stream.isatty():
return False # auto color only on TTYs
try:
import curses
curses.setupterm()
return curses.tigetnum("colors") > 2
except Exception:
return False # guess false in case of error
def transformation_to_string(matrix, translation_vec=(0, 0, 0), components=("x", "y", "z"), c="", delim=","):
"""
Convenience method. Given matrix returns string, e.g. x+2y+1/4
:param matrix
:param translation_vec
:param components: either ('x', 'y', 'z') or ('a', 'b', 'c')
:param c: optional additional character to print (used for magmoms)
:param delim: delimiter
:return: xyz string
"""
parts = []
for i in range(3):
s = ""
m = matrix[i]
t = translation_vec[i]
for j, dim in enumerate(components):
if m[j] != 0:
f = Fraction(m[j]).limit_denominator()
if s != "" and f >= 0:
s += "+"
if abs(f.numerator) != 1:
s += str(f.numerator)
elif f < 0:
s += "-"
s += c + dim
if f.denominator != 1:
s += "/" + str(f.denominator)
if t != 0:
s += ("+" if (t > 0 and s != "") else "") + str(Fraction(t).limit_denominator())
if s == "":
s += "0"
parts.append(s)
return delim.join(parts)
def disordered_formula(disordered_struct, symbols=("x", "y", "z"), fmt="plain"):
"""
Returns a formula of a form like AxB1-x (x=0.5)
for disordered structures. Will only return a
formula for disordered structures with one
kind of disordered site at present.
Args:
disordered_struct: a disordered structure
symbols: a tuple of characters to use for
subscripts, by default this is ('x', 'y', 'z')
but if you have more than three disordered
species more symbols will need to be added
fmt (str): 'plain', 'HTML' or 'LaTeX'
Returns (str): a disordered formula string
"""
# this is in string utils and not in
# Composition because we need to have access
# to site occupancies to calculate this, so
# have to pass the full structure as an argument
# (alternatively this could be made a method on
# Structure)
from pymatgen.core.composition import Composition
from pymatgen.core.periodic_table import get_el_sp
if disordered_struct.is_ordered:
raise ValueError("Structure is not disordered, " "so disordered formula not defined.")
disordered_site_compositions = {site.species for site in disordered_struct if not site.is_ordered}
if len(disordered_site_compositions) > 1:
# this probably won't happen too often
raise ValueError(
"Ambiguous how to define disordered " "formula when more than one type of disordered " "site is present."
)
disordered_site_composition = disordered_site_compositions.pop()
disordered_species = {str(sp) for sp, occu in disordered_site_composition.items()}
if len(disordered_species) > len(symbols):
# this probably won't happen too often either
raise ValueError("Not enough symbols to describe disordered composition: " "{}".format(symbols))
symbols = list(symbols)[0 : len(disordered_species) - 1]
comp = disordered_struct.composition.get_el_amt_dict().items()
# sort by electronegativity, as per composition
comp = sorted(comp, key=lambda x: get_el_sp(x[0]).X)
disordered_comp = []
variable_map = {}
total_disordered_occu = sum([occu for sp, occu in comp if str(sp) in disordered_species])
# composition to get common factor
factor_comp = disordered_struct.composition.as_dict()
factor_comp["X"] = total_disordered_occu
for sp in disordered_species:
del factor_comp[str(sp)]
factor_comp = Composition.from_dict(factor_comp)
factor = factor_comp.get_reduced_formula_and_factor()[1]
total_disordered_occu /= factor
remainder = "{}-{}".format(
formula_double_format(total_disordered_occu, ignore_ones=False),
"-".join(symbols),
)
for sp, occu in comp:
sp = str(sp)
if sp not in disordered_species:
disordered_comp.append((sp, formula_double_format(occu / factor)))
else:
if len(symbols) > 0:
symbol = symbols.pop(0)
disordered_comp.append((sp, symbol))
variable_map[symbol] = occu / total_disordered_occu / factor
else:
disordered_comp.append((sp, remainder))
if fmt == "LaTeX":
sub_start = "_{"
sub_end = "}"
elif fmt == "HTML":
sub_start = "<sub>"
sub_end = "</sub>"
elif fmt != "plain":
raise ValueError("Unsupported output format, " "choose from: LaTeX, HTML, plain")
disordered_formula = []
for sp, occu in disordered_comp:
disordered_formula.append(sp)
if occu: # can be empty string if 1
if fmt != "plain":
disordered_formula.append(sub_start)
disordered_formula.append(occu)
if fmt != "plain":
disordered_formula.append(sub_end)
disordered_formula.append(" ")
disordered_formula += ["{}={} ".format(k, formula_double_format(v)) for k, v in variable_map.items()]
return "".join(map(str, disordered_formula))[0:-1]
|
richardtran415/pymatgen
|
pymatgen/util/string.py
|
Python
|
mit
| 14,409
|
[
"pymatgen"
] |
7c9e10c140684f95da9744dad0324c09b4ffb6f83f4c3e1e9f6f07ccfb55cdb3
|
import subprocess
import sys
usage = """
python prepareMicroarrayProbes.py MODE ...
there are four modes:
BUILD:
builds a bowtie index and requires two additional arguments:
(i) the cDNA sequence fasta file
(ii) the bowtie index name/path
example:
python prepareMicroarrayProbes.py BUILD myCDNA.fasta myBowtieIndex
TABTOFASTA:
reformats a table of probe names and sequences into a fasta file.
Requires five additional arguments:
(i) file with the tabular probe name and sequence data
(ii) tells if there is a header in the table (1 for yes, 0 for no)
(iii) column number (starting with 1) holding the probe names
(iv) column number (starting with 1) holding the sequences
(v) fasta file where the sequences will be written
ALIGN:
aligns the probes to the cDNA and requires four additional arguments:
(i) the bowtie index name/path (see BUILD above)
(ii) a fasta file with the probe sequences
(iii) a file where the unaligned sequences will be stored
(iv) a file where the aligned sequences will be stored
example:
python prepareMicroarrayProbes.py BUILD myCDNA.fasta myBowtieIndex
EXTRACT:
extracts the mappings from probe names to locus IDs and vice versa.
Requires three additional arguments:
(i) file with the aligned sequences (see ALIGN above)
(ii) file with the probe name to locus ID mappings
(iii) file with the locus ID to probe name mappings
"""
if len(sys.argv) < 2:
sys.exit(usage)
mode = sys.argv[1]
# build bowtie index
def buildIndex(fastaFile, indexPath):
command = "bowtie-build -f -q -o 0 %s %s" % (fastaFile, indexPath)
process = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(stdout_data, stderr_data) = process.communicate()
if stderr_data is not None:
print >> sys.stderr, "errors raised by bowtie-build: " + stderr_data
if stdout_data is not None:
print >> sys.stdout, "bowtie-build message: " + stdout_data
print >> sys.stderr, "build bowtie index"
# convert the sequence table with probeName, sequence, spot_id to a fasta file
def convertSeqTabToFasta(seqTab, hasHeader, probeColumn, sequenceColumn, fastaFile):
with open(seqTab, 'r') as infile, open(fastaFile, 'w') as outfile:
if hasHeader:
header = infile.readline()
print >> sys.stderr, "removed this header:", header[:-1]
for line in infile:
fields = line[:-1].split('\t')
pn = fields[probeColumn]
sequence = fields[sequenceColumn]
print >> outfile, '>'+pn
print >> outfile, sequence
print >> sys.stderr, "converted sequence table to fasta"
# align sequences
def alignSequences(bowtieIndex, sequenceFile, unalignedFile, alignedFile):
command = "bowtie -v 3 -m 10 -a --best --strata -p 4 -t %s -f %s --un %s %s" % (bowtieIndex, sequenceFile, unalignedFile, alignedFile)
# ask the user if he wants to run the command
process = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(stdout_data, stderr_data) = process.communicate()
if stderr_data is not None:
print >> sys.stderr, "errors raised by bowtie: " + stderr_data
if stdout_data is not None:
print >> sys.stdout, "bowtie message: " + stdout_data
print >> sys.stderr, "aligned sequences"
# get mapping tables
def getMapping(alignedFile, PNtoLOCUSfile, LOCUStoPNfile):
LOCUStoPN = {}
PNtoLOCUS = {}
with open(alignedFile, 'r') as infile:
for line in infile:
pn, strand, modelID, pos, sequence, quality, mm, conversions = line[:-1].split('\t')
geneID = modelID.split('.')[0]
try:
if pn not in LOCUStoPN[geneID]:
LOCUStoPN[geneID].append(pn)
except KeyError:
LOCUStoPN[geneID] = [pn]
try:
if geneID not in PNtoLOCUS[pn]:
PNtoLOCUS[pn].append(geneID)
except KeyError:
PNtoLOCUS[pn] = [geneID]
print >> sys.stderr, "extracted mappings"
with open(PNtoLOCUSfile, 'w') as outfile:
for pn, locusList in PNtoLOCUS.items():
print >> outfile, pn + '\t' + ';'.join(locusList)
with open(LOCUStoPNfile, 'w') as outfile:
for locus, pnList in LOCUStoPN.items():
print >> outfile, locus + '\t' + ';'.join(pnList)
#
mode = sys.argv[1]
if mode == "BUILD":
fastaFile = sys.argv[2]
indexPath = sys.argv[3]
buildIndex(fastaFile, indexPath)
elif mode == "TABTOFASTA":
seqTab = sys.argv[2]
hasHeader = bool(int(sys.argv[3]))
probeColumn = int(sys.argv[4])-1
sequenceColumn = int(sys.argv[5])-1
fastaFile = sys.argv[6]
convertSeqTabToFasta(seqTab, hasHeader, probeColumn, sequenceColumn, fastaFile)
elif mode == "ALIGN":
bowtieIndex = sys.argv[2]
sequenceFile = sys.argv[3]
unalignedFile = sys.argv[4]
alignedFile = sys.argv[5]
alignSequences(bowtieIndex, sequenceFile, unalignedFile, alignedFile)
elif mode == "EXTRACT":
alignedFile = sys.argv[2]
PNtoLOCUSfile = sys.argv[3]
LOCUStoPNfile = sys.argv[4]
getMapping(alignedFile, PNtoLOCUSfile, LOCUStoPNfile)
else:
print >> sys.stderr, "unkown mode"
sys.exit(usage)
|
MWSchmid/microarray
|
prepareMicroarrayProbes.py
|
Python
|
gpl-3.0
| 4,846
|
[
"Bowtie"
] |
bd4dd48487112446b900f11a229dc1f395b23d4d108987190a1ec472e0844501
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Handles function calls, by generating compiled function names and calls.
Note: this transformer does not rename the top level object being converted;
that is the caller's responsibility.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import gast
from tensorflow.python.autograph.core import converter
from tensorflow.python.autograph.pyct import anno
from tensorflow.python.autograph.pyct import ast_util
from tensorflow.python.autograph.pyct import parser
from tensorflow.python.autograph.pyct import templates
# TODO(mdan): Rename to FunctionCallsTransformer.
class _Function(object):
no_root = True
class CallTreeTransformer(converter.Base):
"""Transforms the call tree by renaming transformed symbols."""
def visit_FunctionDef(self, node):
self.state[_Function].enter()
node.args = self.visit(node.args)
node.body = self.visit_block(node.body)
if self.state[_Function].level < 2:
# Top-level functions lose their decorator because the conversion is
# always just-in-time and by the time it happens the decorators are
# already set to be applied.
node.decorator_list = []
else:
# Inner functions are converted already, so we insert a decorator to
# prevent double conversion. Double conversion would work too, but this
# saves the overhead.
node.decorator_list.append(
parser.parse_expression('ag__.do_not_convert_internal'))
if node.returns:
node.returns = self.visit(node.returns)
self.state[_Function].exit()
return node
def visit_With(self, node):
# Context manager calls (in node.items) are not converted.
node.body = self.visit_block(node.body)
return node
def visit_Call(self, node):
# TODO(mdan): Refactor converted_call as a 'Call' operator.
# Calls to the internal 'ag__' module are never converted (though their
# arguments might be).
full_name = str(anno.getanno(node.func, anno.Basic.QN, default=''))
if full_name.startswith('ag__.'):
return self.generic_visit(node)
if (full_name == 'print' and
not self.ctx.program.options.uses(converter.Feature.BUILTIN_FUNCTIONS)):
return self.generic_visit(node)
if isinstance(node.func, gast.Attribute):
func = gast.Str(node.func.attr)
owner = node.func.value
else:
func = node.func
owner = parser.parse_expression('None')
starred_arg = None
normal_args = []
for a in node.args:
if isinstance(a, gast.Starred):
assert starred_arg is None, 'Multiple *args should be impossible.'
starred_arg = a
else:
a = self.visit(a)
normal_args.append(a)
if starred_arg is None:
args = templates.replace_as_expression('(args,)', args=normal_args)
else:
args = templates.replace_as_expression(
'(args,) + tuple(stararg)',
stararg=starred_arg.value,
args=normal_args)
kwargs_arg = None
normal_keywords = []
for k in node.keywords:
if k.arg is None:
assert kwargs_arg is None, 'Multiple **kwargs should be impossible.'
kwargs_arg = k
else:
k = self.visit(k)
normal_keywords.append(k)
if kwargs_arg is None:
kwargs = ast_util.keywords_to_dict(normal_keywords)
else:
kwargs = templates.replace_as_expression(
'dict(kwargs, **keywords)',
kwargs=kwargs_arg.value,
keywords=ast_util.keywords_to_dict(normal_keywords))
template = """
ag__.converted_call(func, owner, options, args, kwargs)
"""
new_call = templates.replace_as_expression(
template,
func=func,
owner=owner,
options=self.ctx.program.options.to_ast(
internal_convert_user_code=self.ctx.program.options.recursive),
args=args,
kwargs=kwargs)
return new_call
def visit_Print(self, node):
node = self.generic_visit(node)
args = node.values
# Following is the case when calling print(a, b)
if len(args) == 1 and isinstance(args[0], gast.Tuple):
args = args[0].elts
template = """
ag__.converted_call(func, None, options, args, {})
"""
return templates.replace_as_expression(
template,
func='print',
options=self.ctx.program.options.to_ast(),
args=args)
def transform(node, ctx):
"""Transform function call to the compiled counterparts.
Args:
node: AST
ctx: EntityContext
Returns:
A tuple (node, new_names):
node: The transformed AST
new_names: set(string), containing any newly-generated names
"""
return CallTreeTransformer(ctx).visit(node)
|
kevin-coder/tensorflow-fork
|
tensorflow/python/autograph/converters/call_trees.py
|
Python
|
apache-2.0
| 5,417
|
[
"VisIt"
] |
820405f0ce6f2802dc6f786f16b587e167cc51f154a6ad0ca01af1f1e4b7bc76
|
# -*- Python -*-
# Converted by ./convert_mime_type_table.py from:
# /usr/src2/apache_1.2b6/conf/mime.types
#
content_type_map = {
'css': 'text/css',
'js': 'text/javascript',
'log': 'text/plain',
'ai': 'application/postscript',
'aif': 'audio/x-aiff',
'aifc': 'audio/x-aiff',
'aiff': 'audio/x-aiff',
'au': 'audio/basic',
'avi': 'video/x-msvideo',
'bcpio': 'application/x-bcpio',
'bin': 'application/octet-stream',
'cdf': 'application/x-netcdf',
'class': 'application/octet-stream',
'cpio': 'application/x-cpio',
'cpt': 'application/mac-compactpro',
'csh': 'application/x-csh',
'dcr': 'application/x-director',
'dir': 'application/x-director',
'dms': 'application/octet-stream',
'doc': 'application/msword',
'dvi': 'application/x-dvi',
'dxr': 'application/x-director',
'eps': 'application/postscript',
'etx': 'text/x-setext',
'exe': 'application/octet-stream',
'gif': 'image/gif',
'gtar': 'application/x-gtar',
'gz': 'application/x-gzip',
'hdf': 'application/x-hdf',
'hqx': 'application/mac-binhex40',
'htm': 'text/html',
'html': 'text/html',
'ice': 'x-conference/x-cooltalk',
'ief': 'image/ief',
'jpe': 'image/jpeg',
'jpeg': 'image/jpeg',
'jpg': 'image/jpeg',
'kar': 'audio/midi',
'latex': 'application/x-latex',
'lha': 'application/octet-stream',
'lzh': 'application/octet-stream',
'man': 'application/x-troff-man',
'me': 'application/x-troff-me',
'mid': 'audio/midi',
'midi': 'audio/midi',
'mif': 'application/x-mif',
'mov': 'video/quicktime',
'movie': 'video/x-sgi-movie',
'mp2': 'audio/mpeg',
'mpe': 'video/mpeg',
'mpeg': 'video/mpeg',
'mpg': 'video/mpeg',
'mpga': 'audio/mpeg',
'mp3': 'audio/mpeg',
'ms': 'application/x-troff-ms',
'nc': 'application/x-netcdf',
'oda': 'application/oda',
'pbm': 'image/x-portable-bitmap',
'pdb': 'chemical/x-pdb',
'pdf': 'application/pdf',
'pgm': 'image/x-portable-graymap',
'png': 'image/png',
'pnm': 'image/x-portable-anymap',
'ppm': 'image/x-portable-pixmap',
'ppt': 'application/powerpoint',
'ps': 'application/postscript',
'qt': 'video/quicktime',
'ra': 'audio/x-realaudio',
'ram': 'audio/x-pn-realaudio',
'ras': 'image/x-cmu-raster',
'rgb': 'image/x-rgb',
'roff': 'application/x-troff',
'rpm': 'audio/x-pn-realaudio-plugin',
'rtf': 'application/rtf',
'rtx': 'text/richtext',
'sgm': 'text/x-sgml',
'sgml': 'text/x-sgml',
'sh': 'application/x-sh',
'shar': 'application/x-shar',
'sit': 'application/x-stuffit',
'skd': 'application/x-koan',
'skm': 'application/x-koan',
'skp': 'application/x-koan',
'snd': 'audio/basic',
'src': 'application/x-wais-source',
'sv4cpio': 'application/x-sv4cpio',
'sv4crc': 'application/x-sv4crc',
't': 'application/x-troff',
'tar': 'application/x-tar',
'tcl': 'application/x-tcl',
'tex': 'application/x-tex',
'texi': 'application/x-texinfo',
'texinfo': 'application/x-texinfo',
'tif': 'image/tiff',
'tiff': 'image/tiff',
'tr': 'application/x-troff',
'tsv': 'text/tab-separated-values',
'txt': 'text/plain',
'ustar': 'application/x-ustar',
'vcd': 'application/x-cdlink',
'vrml': 'x-world/x-vrml',
'wav': 'audio/x-wav',
'wrl': 'x-world/x-vrml',
'xbm': 'image/x-xbitmap',
'xpm': 'image/x-xpixmap',
'xwd': 'image/x-xwindowdump',
'xyz': 'chemical/x-pdb',
'zip': 'application/zip',
}
|
hansroh/skitai
|
skitai/handlers/mime_type_table.py
|
Python
|
mit
| 3,687
|
[
"NetCDF"
] |
d2c975bee79f94025e2c864ab86be113f1c46f3d549b0d3c8d951d05f37cfba2
|
#
# Copyright (c) 2015 nexB Inc. and others. All rights reserved.
# http://nexb.com and https://github.com/nexB/scancode-toolkit/
# The ScanCode software is licensed under the Apache License version 2.0.
# Data generated with ScanCode require an acknowledgment.
# ScanCode is a trademark of nexB Inc.
#
# You may not use this software except in compliance with the License.
# You may obtain a copy of the License at: http://apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software distributed
# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
#
# When you publish or redistribute any data created with ScanCode or any ScanCode
# derivative work, you must accompany this data with the following acknowledgment:
#
# Generated with ScanCode and provided on an "AS IS" BASIS, WITHOUT WARRANTIES
# OR CONDITIONS OF ANY KIND, either express or implied. No content created from
# ScanCode should be considered or used as legal advice. Consult an Attorney
# for any legal advice.
# ScanCode is a free software code scanning tool from nexB Inc. and others.
# Visit https://github.com/nexB/scancode-toolkit/ for support and download.
from __future__ import absolute_import, print_function
import sys
import os
import getpass
import subprocess
def os_arch():
"""
Return a tuple for the current the OS and architecture.
"""
if sys.maxsize > 2 ** 32:
arch = '64'
else:
arch = '32'
sys_platform = str(sys.platform).lower()
if 'linux' in sys_platform:
os = 'linux'
elif'win32' in sys_platform:
os = 'win'
elif 'darwin' in sys_platform:
os = 'mac'
else:
raise Exception('Unsupported OS/platform')
return os, arch
# FIXME use these for architectures
'''
darwin/386
darwin/amd64
linux/386
linux/amd64
linux/arm
windows/386
windows/amd64
freebsd/386
freebsd/amd64
freebsd/arm
openbsd/386
openbsd/amd64
netbsd/386
netbsd/amd64
netbsd/arm
plan9/386
'''
#
# OS/Arch
#
current_os, current_arch = os_arch()
on_windows = current_os == 'win'
on_mac = current_os == 'mac'
on_linux = current_os == 'linux'
on_posix = not on_windows and (on_mac or on_linux)
current_os_arch = '%(current_os)s-%(current_arch)s' % locals()
noarch = 'noarch'
current_os_noarch = '%(current_os)s-%(noarch)s' % locals()
#
# Shared library file extensions
#
if on_windows:
lib_ext = '.dll'
if on_mac:
lib_ext = '.dylib'
if on_linux:
lib_ext = '.so'
#
# Python versions
#
py27 = (sys.version_info[0] == 2 and sys.version_info[1] == 7)
py34 = (sys.version_info[0] == 3 and sys.version_info[1] == 4)
py35 = (sys.version_info[0] == 3 and sys.version_info[1] == 5)
#
# User related
#
if on_windows:
user_home = os.path.join(os.path.expandvars('$HOMEDRIVE'),
os.path.expandvars('$HOMEPATH'))
else:
user_home = os.path.expanduser('~')
username = getpass.getuser()
# Do not let Windows error pop up messages with default SetErrorMode
# See http://msdn.microsoft.com/en-us/library/ms680621(VS100).aspx
#
# SEM_FAILCRITICALERRORS:
# The system does not display the critical-error-handler message box.
# Instead, the system sends the error to the calling process.
#
# SEM_NOGPFAULTERRORBOX:
# The system does not display the Windows Error Reporting dialog.
if on_windows:
import ctypes
# 3 is SEM_FAILCRITICALERRORS | SEM_NOGPFAULTERRORBOX
ctypes.windll.kernel32.SetErrorMode(3) # @UndefinedVariable
|
retrography/scancode-toolkit
|
src/commoncode/system.py
|
Python
|
apache-2.0
| 3,647
|
[
"VisIt"
] |
4814790f4421c5678be8b26317837f00387655f19fbd8c541bcf061372afdfc1
|
import os
import re
import shutil
import sys
if sys.version_info[:2] < (2, 6):
sys.exit('virtualenv requires Python 2.6 or higher.')
try:
from setuptools import setup
from setuptools.command.test import test as TestCommand
class PyTest(TestCommand):
user_options = [('pytest-args=', 'a', "Arguments to pass to py.test")]
def initialize_options(self):
TestCommand.initialize_options(self)
self.pytest_args = None
def finalize_options(self):
TestCommand.finalize_options(self)
self.test_args = []
self.test_suite = True
def run_tests(self):
# import here, because outside the eggs aren't loaded
import pytest
errno = pytest.main(self.pytest_args)
sys.exit(errno)
setup_params = {
'entry_points': {
'console_scripts': [
'virtualenv=virtualenv:main',
'virtualenv-%s.%s=virtualenv:main' % sys.version_info[:2]
],
},
'zip_safe': False,
'cmdclass': {'test': PyTest},
'tests_require': ['pytest', 'mock'],
}
except ImportError:
from distutils.core import setup
if sys.platform == 'win32':
print('Note: without Setuptools installed you will '
'have to use "python -m virtualenv ENV"')
setup_params = {}
else:
script = 'scripts/virtualenv'
script_ver = script + '-%s.%s' % sys.version_info[:2]
shutil.copy(script, script_ver)
setup_params = {'scripts': [script, script_ver]}
def read_file(*paths):
here = os.path.dirname(os.path.abspath(__file__))
with open(os.path.join(here, *paths)) as f:
return f.read()
# Get long_description from index.rst:
long_description = read_file('docs', 'index.rst')
long_description = long_description.strip().split('split here', 1)[0]
# Add release history
long_description += "\n\n" + read_file('docs', 'changes.rst')
def get_version():
version_file = read_file('virtualenv.py')
version_match = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]",
version_file, re.M)
if version_match:
return version_match.group(1)
raise RuntimeError("Unable to find version string.")
# Hack to prevent stupid TypeError: 'NoneType' object is not callable error on
# exit of python setup.py test # in multiprocessing/util.py _exit_function when
# running python setup.py test (see
# http://www.eby-sarna.com/pipermail/peak/2010-May/003357.html)
try:
import multiprocessing # noqa
except ImportError:
pass
setup(
name='virtualenv',
version=get_version(),
description="Virtual Python Environment builder",
long_description=long_description,
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.1',
'Programming Language :: Python :: 3.2',
],
keywords='setuptools deployment installation distutils',
author='Ian Bicking',
author_email='[email protected]',
maintainer='Jannis Leidel, Carl Meyer and Brian Rosner',
maintainer_email='[email protected]',
url='https://virtualenv.pypa.io/',
license='MIT',
py_modules=['virtualenv'],
packages=['virtualenv_support'],
package_data={'virtualenv_support': ['*.whl']},
**setup_params)
|
boardman/sphinx-buildpack
|
vendor/virtualenv-13.1.0/setup.py
|
Python
|
mit
| 3,693
|
[
"Brian"
] |
88f5a0fe8853626cf5f940c4409833b88aa6215677111e8327b0e8e61a46a358
|
# sql/visitors.py
# Copyright (C) 2005-2012 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Visitor/traversal interface and library functions.
SQLAlchemy schema and expression constructs rely on a Python-centric
version of the classic "visitor" pattern as the primary way in which
they apply functionality. The most common use of this pattern
is statement compilation, where individual expression classes match
up to rendering methods that produce a string result. Beyond this,
the visitor system is also used to inspect expressions for various
information and patterns, as well as for usage in
some kinds of expression transformation. Other kinds of transformation
use a non-visitor traversal system.
For many examples of how the visit system is used, see the
sqlalchemy.sql.util and the sqlalchemy.sql.compiler modules.
For an introduction to clause adaption, see
http://techspot.zzzeek.org/2008/01/23/expression-transformations/
"""
from collections import deque
from .. import util
import operator
__all__ = ['VisitableType', 'Visitable', 'ClauseVisitor',
'CloningVisitor', 'ReplacingCloningVisitor', 'iterate',
'iterate_depthfirst', 'traverse_using', 'traverse',
'cloned_traverse', 'replacement_traverse']
class VisitableType(type):
"""Metaclass which assigns a `_compiler_dispatch` method to classes
having a `__visit_name__` attribute.
The _compiler_dispatch attribute becomes an instance method which
looks approximately like the following::
def _compiler_dispatch (self, visitor, **kw):
'''Look for an attribute named "visit_" + self.__visit_name__
on the visitor, and call it with the same kw params.'''
visit_attr = 'visit_%s' % self.__visit_name__
return getattr(visitor, visit_attr)(self, **kw)
Classes having no __visit_name__ attribute will remain unaffected.
"""
def __init__(cls, clsname, bases, clsdict):
if cls.__name__ == 'Visitable' or not hasattr(cls, '__visit_name__'):
super(VisitableType, cls).__init__(clsname, bases, clsdict)
return
_generate_dispatch(cls)
super(VisitableType, cls).__init__(clsname, bases, clsdict)
def _generate_dispatch(cls):
"""Return an optimized visit dispatch function for the cls
for use by the compiler.
"""
if '__visit_name__' in cls.__dict__:
visit_name = cls.__visit_name__
if isinstance(visit_name, str):
# There is an optimization opportunity here because the
# the string name of the class's __visit_name__ is known at
# this early stage (import time) so it can be pre-constructed.
getter = operator.attrgetter("visit_%s" % visit_name)
def _compiler_dispatch(self, visitor, **kw):
return getter(visitor)(self, **kw)
else:
# The optimization opportunity is lost for this case because the
# __visit_name__ is not yet a string. As a result, the visit
# string has to be recalculated with each compilation.
def _compiler_dispatch(self, visitor, **kw):
visit_attr = 'visit_%s' % self.__visit_name__
return getattr(visitor, visit_attr)(self, **kw)
_compiler_dispatch.__doc__ = \
"""Look for an attribute named "visit_" + self.__visit_name__
on the visitor, and call it with the same kw params.
"""
cls._compiler_dispatch = _compiler_dispatch
class Visitable(object):
"""Base class for visitable objects, applies the
``VisitableType`` metaclass.
"""
__metaclass__ = VisitableType
class ClauseVisitor(object):
"""Base class for visitor objects which can traverse using
the traverse() function.
"""
__traverse_options__ = {}
def traverse_single(self, obj, **kw):
for v in self._visitor_iterator:
meth = getattr(v, "visit_%s" % obj.__visit_name__, None)
if meth:
return meth(obj, **kw)
def iterate(self, obj):
"""traverse the given expression structure, returning an iterator
of all elements.
"""
return iterate(obj, self.__traverse_options__)
def traverse(self, obj):
"""traverse and visit the given expression structure."""
return traverse(obj, self.__traverse_options__, self._visitor_dict)
@util.memoized_property
def _visitor_dict(self):
visitors = {}
for name in dir(self):
if name.startswith('visit_'):
visitors[name[6:]] = getattr(self, name)
return visitors
@property
def _visitor_iterator(self):
"""iterate through this visitor and each 'chained' visitor."""
v = self
while v:
yield v
v = getattr(v, '_next', None)
def chain(self, visitor):
"""'chain' an additional ClauseVisitor onto this ClauseVisitor.
the chained visitor will receive all visit events after this one.
"""
tail = list(self._visitor_iterator)[-1]
tail._next = visitor
return self
class CloningVisitor(ClauseVisitor):
"""Base class for visitor objects which can traverse using
the cloned_traverse() function.
"""
def copy_and_process(self, list_):
"""Apply cloned traversal to the given list of elements, and return
the new list.
"""
return [self.traverse(x) for x in list_]
def traverse(self, obj):
"""traverse and visit the given expression structure."""
return cloned_traverse(
obj, self.__traverse_options__, self._visitor_dict)
class ReplacingCloningVisitor(CloningVisitor):
"""Base class for visitor objects which can traverse using
the replacement_traverse() function.
"""
def replace(self, elem):
"""receive pre-copied elements during a cloning traversal.
If the method returns a new element, the element is used
instead of creating a simple copy of the element. Traversal
will halt on the newly returned element if it is re-encountered.
"""
return None
def traverse(self, obj):
"""traverse and visit the given expression structure."""
def replace(elem):
for v in self._visitor_iterator:
e = v.replace(elem)
if e is not None:
return e
return replacement_traverse(obj, self.__traverse_options__, replace)
def iterate(obj, opts):
"""traverse the given expression structure, returning an iterator.
traversal is configured to be breadth-first.
"""
stack = deque([obj])
while stack:
t = stack.popleft()
yield t
for c in t.get_children(**opts):
stack.append(c)
def iterate_depthfirst(obj, opts):
"""traverse the given expression structure, returning an iterator.
traversal is configured to be depth-first.
"""
stack = deque([obj])
traversal = deque()
while stack:
t = stack.pop()
traversal.appendleft(t)
for c in t.get_children(**opts):
stack.append(c)
return iter(traversal)
def traverse_using(iterator, obj, visitors):
"""visit the given expression structure using the given iterator of
objects.
"""
for target in iterator:
meth = visitors.get(target.__visit_name__, None)
if meth:
meth(target)
return obj
def traverse(obj, opts, visitors):
"""traverse and visit the given expression structure using the default
iterator.
"""
return traverse_using(iterate(obj, opts), obj, visitors)
def traverse_depthfirst(obj, opts, visitors):
"""traverse and visit the given expression structure using the
depth-first iterator.
"""
return traverse_using(iterate_depthfirst(obj, opts), obj, visitors)
def cloned_traverse(obj, opts, visitors):
"""clone the given expression structure, allowing
modifications by visitors."""
cloned = util.column_dict()
stop_on = util.column_set(opts.get('stop_on', []))
def clone(elem):
if elem in stop_on:
return elem
else:
if id(elem) not in cloned:
cloned[id(elem)] = newelem = elem._clone()
newelem._copy_internals(clone=clone)
meth = visitors.get(newelem.__visit_name__, None)
if meth:
meth(newelem)
return cloned[id(elem)]
if obj is not None:
obj = clone(obj)
return obj
def replacement_traverse(obj, opts, replace):
"""clone the given expression structure, allowing element
replacement by a given replacement function."""
cloned = util.column_dict()
stop_on = util.column_set([id(x) for x in opts.get('stop_on', [])])
def clone(elem, **kw):
if id(elem) in stop_on or \
'no_replacement_traverse' in elem._annotations:
return elem
else:
newelem = replace(elem)
if newelem is not None:
stop_on.add(id(newelem))
return newelem
else:
if elem not in cloned:
cloned[elem] = newelem = elem._clone()
newelem._copy_internals(clone=clone, **kw)
return cloned[elem]
if obj is not None:
obj = clone(obj, **opts)
return obj
|
fredericmohr/mitro
|
mitro-mail/build/venv/lib/python2.7/site-packages/sqlalchemy/sql/visitors.py
|
Python
|
gpl-3.0
| 9,614
|
[
"VisIt"
] |
f882cc925a479cecb8357afe73d08260e6b70c636bb7edfd34c81257dafa5ea3
|
"""SCons.Util
Various utility functions go here.
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Util.py 3897 2009/01/13 06:45:54 scons"
import copy
import os
import os.path
import re
import string
import sys
import types
from UserDict import UserDict
from UserList import UserList
from UserString import UserString
# Don't "from types import ..." these because we need to get at the
# types module later to look for UnicodeType.
DictType = types.DictType
InstanceType = types.InstanceType
ListType = types.ListType
StringType = types.StringType
TupleType = types.TupleType
def dictify(keys, values, result={}):
for k, v in zip(keys, values):
result[k] = v
return result
_altsep = os.altsep
if _altsep is None and sys.platform == 'win32':
# My ActivePython 2.0.1 doesn't set os.altsep! What gives?
_altsep = '/'
if _altsep:
def rightmost_separator(path, sep, _altsep=_altsep):
rfind = string.rfind
return max(rfind(path, sep), rfind(path, _altsep))
else:
rightmost_separator = string.rfind
# First two from the Python Cookbook, just for completeness.
# (Yeah, yeah, YAGNI...)
def containsAny(str, set):
"""Check whether sequence str contains ANY of the items in set."""
for c in set:
if c in str: return 1
return 0
def containsAll(str, set):
"""Check whether sequence str contains ALL of the items in set."""
for c in set:
if c not in str: return 0
return 1
def containsOnly(str, set):
"""Check whether sequence str contains ONLY items in set."""
for c in str:
if c not in set: return 0
return 1
def splitext(path):
"Same as os.path.splitext() but faster."
sep = rightmost_separator(path, os.sep)
dot = string.rfind(path, '.')
# An ext is only real if it has at least one non-digit char
if dot > sep and not containsOnly(path[dot:], "0123456789."):
return path[:dot],path[dot:]
else:
return path,""
def updrive(path):
"""
Make the drive letter (if any) upper case.
This is useful because Windows is inconsitent on the case
of the drive letter, which can cause inconsistencies when
calculating command signatures.
"""
drive, rest = os.path.splitdrive(path)
if drive:
path = string.upper(drive) + rest
return path
class NodeList(UserList):
"""This class is almost exactly like a regular list of Nodes
(actually it can hold any object), with one important difference.
If you try to get an attribute from this list, it will return that
attribute from every item in the list. For example:
>>> someList = NodeList([ ' foo ', ' bar ' ])
>>> someList.strip()
[ 'foo', 'bar' ]
"""
def __nonzero__(self):
return len(self.data) != 0
def __str__(self):
return string.join(map(str, self.data))
def __iter__(self):
return iter(self.data)
def __call__(self, *args, **kwargs):
result = map(lambda x, args=args, kwargs=kwargs: apply(x,
args,
kwargs),
self.data)
return self.__class__(result)
def __getattr__(self, name):
result = map(lambda x, n=name: getattr(x, n), self.data)
return self.__class__(result)
_get_env_var = re.compile(r'^\$([_a-zA-Z]\w*|{[_a-zA-Z]\w*})$')
def get_environment_var(varstr):
"""Given a string, first determine if it looks like a reference
to a single environment variable, like "$FOO" or "${FOO}".
If so, return that variable with no decorations ("FOO").
If not, return None."""
mo=_get_env_var.match(to_String(varstr))
if mo:
var = mo.group(1)
if var[0] == '{':
return var[1:-1]
else:
return var
else:
return None
class DisplayEngine:
def __init__(self):
self.__call__ = self.print_it
def print_it(self, text, append_newline=1):
if append_newline: text = text + '\n'
try:
sys.stdout.write(text)
except IOError:
# Stdout might be connected to a pipe that has been closed
# by now. The most likely reason for the pipe being closed
# is that the user has press ctrl-c. It this is the case,
# then SCons is currently shutdown. We therefore ignore
# IOError's here so that SCons can continue and shutdown
# properly so that the .sconsign is correctly written
# before SCons exits.
pass
def dont_print(self, text, append_newline=1):
pass
def set_mode(self, mode):
if mode:
self.__call__ = self.print_it
else:
self.__call__ = self.dont_print
def render_tree(root, child_func, prune=0, margin=[0], visited={}):
"""
Render a tree of nodes into an ASCII tree view.
root - the root node of the tree
child_func - the function called to get the children of a node
prune - don't visit the same node twice
margin - the format of the left margin to use for children of root.
1 results in a pipe, and 0 results in no pipe.
visited - a dictionary of visited nodes in the current branch if not prune,
or in the whole tree if prune.
"""
rname = str(root)
children = child_func(root)
retval = ""
for pipe in margin[:-1]:
if pipe:
retval = retval + "| "
else:
retval = retval + " "
if visited.has_key(rname):
return retval + "+-[" + rname + "]\n"
retval = retval + "+-" + rname + "\n"
if not prune:
visited = copy.copy(visited)
visited[rname] = 1
for i in range(len(children)):
margin.append(i<len(children)-1)
retval = retval + render_tree(children[i], child_func, prune, margin, visited
)
margin.pop()
return retval
IDX = lambda N: N and 1 or 0
def print_tree(root, child_func, prune=0, showtags=0, margin=[0], visited={}):
"""
Print a tree of nodes. This is like render_tree, except it prints
lines directly instead of creating a string representation in memory,
so that huge trees can be printed.
root - the root node of the tree
child_func - the function called to get the children of a node
prune - don't visit the same node twice
showtags - print status information to the left of each node line
margin - the format of the left margin to use for children of root.
1 results in a pipe, and 0 results in no pipe.
visited - a dictionary of visited nodes in the current branch if not prune,
or in the whole tree if prune.
"""
rname = str(root)
if showtags:
if showtags == 2:
print ' E = exists'
print ' R = exists in repository only'
print ' b = implicit builder'
print ' B = explicit builder'
print ' S = side effect'
print ' P = precious'
print ' A = always build'
print ' C = current'
print ' N = no clean'
print ' H = no cache'
print ''
tags = ['[']
tags.append(' E'[IDX(root.exists())])
tags.append(' R'[IDX(root.rexists() and not root.exists())])
tags.append(' BbB'[[0,1][IDX(root.has_explicit_builder())] +
[0,2][IDX(root.has_builder())]])
tags.append(' S'[IDX(root.side_effect)])
tags.append(' P'[IDX(root.precious)])
tags.append(' A'[IDX(root.always_build)])
tags.append(' C'[IDX(root.is_up_to_date())])
tags.append(' N'[IDX(root.noclean)])
tags.append(' H'[IDX(root.nocache)])
tags.append(']')
else:
tags = []
def MMM(m):
return [" ","| "][m]
margins = map(MMM, margin[:-1])
children = child_func(root)
if prune and visited.has_key(rname) and children:
print string.join(tags + margins + ['+-[', rname, ']'], '')
return
print string.join(tags + margins + ['+-', rname], '')
visited[rname] = 1
if children:
margin.append(1)
map(lambda C, cf=child_func, p=prune, i=IDX(showtags), m=margin, v=visited:
print_tree(C, cf, p, i, m, v),
children[:-1])
margin[-1] = 0
print_tree(children[-1], child_func, prune, IDX(showtags), margin, visited)
margin.pop()
# Functions for deciding if things are like various types, mainly to
# handle UserDict, UserList and UserString like their underlying types.
#
# Yes, all of this manual testing breaks polymorphism, and the real
# Pythonic way to do all of this would be to just try it and handle the
# exception, but handling the exception when it's not the right type is
# often too slow.
try:
class mystr(str):
pass
except TypeError:
# An older Python version without new-style classes.
#
# The actual implementations here have been selected after timings
# coded up in in bench/is_types.py (from the SCons source tree,
# see the scons-src distribution), mostly against Python 1.5.2.
# Key results from those timings:
#
# -- Storing the type of the object in a variable (t = type(obj))
# slows down the case where it's a native type and the first
# comparison will match, but nicely speeds up the case where
# it's a different native type. Since that's going to be
# common, it's a good tradeoff.
#
# -- The data show that calling isinstance() on an object that's
# a native type (dict, list or string) is expensive enough
# that checking up front for whether the object is of type
# InstanceType is a pretty big win, even though it does slow
# down the case where it really *is* an object instance a
# little bit.
def is_Dict(obj):
t = type(obj)
return t is DictType or \
(t is InstanceType and isinstance(obj, UserDict))
def is_List(obj):
t = type(obj)
return t is ListType \
or (t is InstanceType and isinstance(obj, UserList))
def is_Sequence(obj):
t = type(obj)
return t is ListType \
or t is TupleType \
or (t is InstanceType and isinstance(obj, UserList))
def is_Tuple(obj):
t = type(obj)
return t is TupleType
if hasattr(types, 'UnicodeType'):
def is_String(obj):
t = type(obj)
return t is StringType \
or t is UnicodeType \
or (t is InstanceType and isinstance(obj, UserString))
else:
def is_String(obj):
t = type(obj)
return t is StringType \
or (t is InstanceType and isinstance(obj, UserString))
def is_Scalar(obj):
return is_String(obj) or not is_Sequence(obj)
def flatten(obj, result=None):
"""Flatten a sequence to a non-nested list.
Flatten() converts either a single scalar or a nested sequence
to a non-nested list. Note that flatten() considers strings
to be scalars instead of sequences like Python would.
"""
if is_Scalar(obj):
return [obj]
if result is None:
result = []
for item in obj:
if is_Scalar(item):
result.append(item)
else:
flatten_sequence(item, result)
return result
def flatten_sequence(sequence, result=None):
"""Flatten a sequence to a non-nested list.
Same as flatten(), but it does not handle the single scalar
case. This is slightly more efficient when one knows that
the sequence to flatten can not be a scalar.
"""
if result is None:
result = []
for item in sequence:
if is_Scalar(item):
result.append(item)
else:
flatten_sequence(item, result)
return result
#
# Generic convert-to-string functions that abstract away whether or
# not the Python we're executing has Unicode support. The wrapper
# to_String_for_signature() will use a for_signature() method if the
# specified object has one.
#
if hasattr(types, 'UnicodeType'):
UnicodeType = types.UnicodeType
def to_String(s):
if isinstance(s, UserString):
t = type(s.data)
else:
t = type(s)
if t is UnicodeType:
return unicode(s)
else:
return str(s)
else:
to_String = str
def to_String_for_signature(obj):
try:
f = obj.for_signature
except AttributeError:
return to_String_for_subst(obj)
else:
return f()
def to_String_for_subst(s):
if is_Sequence( s ):
return string.join( map(to_String_for_subst, s) )
return to_String( s )
else:
# A modern Python version with new-style classes, so we can just use
# isinstance().
#
# We are using the following trick to speed-up these
# functions. Default arguments are used to take a snapshot of the
# the global functions and constants used by these functions. This
# transforms accesses to global variable into local variables
# accesses (i.e. LOAD_FAST instead of LOAD_GLOBAL).
DictTypes = (dict, UserDict)
ListTypes = (list, UserList)
SequenceTypes = (list, tuple, UserList)
# Empirically, Python versions with new-style classes all have
# unicode.
#
# Note that profiling data shows a speed-up when comparing
# explicitely with str and unicode instead of simply comparing
# with basestring. (at least on Python 2.5.1)
StringTypes = (str, unicode, UserString)
# Empirically, it is faster to check explicitely for str and
# unicode than for basestring.
BaseStringTypes = (str, unicode)
def is_Dict(obj, isinstance=isinstance, DictTypes=DictTypes):
return isinstance(obj, DictTypes)
def is_List(obj, isinstance=isinstance, ListTypes=ListTypes):
return isinstance(obj, ListTypes)
def is_Sequence(obj, isinstance=isinstance, SequenceTypes=SequenceTypes):
return isinstance(obj, SequenceTypes)
def is_Tuple(obj, isinstance=isinstance, tuple=tuple):
return isinstance(obj, tuple)
def is_String(obj, isinstance=isinstance, StringTypes=StringTypes):
return isinstance(obj, StringTypes)
def is_Scalar(obj, isinstance=isinstance, StringTypes=StringTypes, SequenceTypes=SequenceTypes):
# Profiling shows that there is an impressive speed-up of 2x
# when explicitely checking for strings instead of just not
# sequence when the argument (i.e. obj) is already a string.
# But, if obj is a not string than it is twice as fast to
# check only for 'not sequence'. The following code therefore
# assumes that the obj argument is a string must of the time.
return isinstance(obj, StringTypes) or not isinstance(obj, SequenceTypes)
def do_flatten(sequence, result, isinstance=isinstance,
StringTypes=StringTypes, SequenceTypes=SequenceTypes):
for item in sequence:
if isinstance(item, StringTypes) or not isinstance(item, SequenceTypes):
result.append(item)
else:
do_flatten(item, result)
def flatten(obj, isinstance=isinstance, StringTypes=StringTypes,
SequenceTypes=SequenceTypes, do_flatten=do_flatten):
"""Flatten a sequence to a non-nested list.
Flatten() converts either a single scalar or a nested sequence
to a non-nested list. Note that flatten() considers strings
to be scalars instead of sequences like Python would.
"""
if isinstance(obj, StringTypes) or not isinstance(obj, SequenceTypes):
return [obj]
result = []
for item in obj:
if isinstance(item, StringTypes) or not isinstance(item, SequenceTypes):
result.append(item)
else:
do_flatten(item, result)
return result
def flatten_sequence(sequence, isinstance=isinstance, StringTypes=StringTypes,
SequenceTypes=SequenceTypes, do_flatten=do_flatten):
"""Flatten a sequence to a non-nested list.
Same as flatten(), but it does not handle the single scalar
case. This is slightly more efficient when one knows that
the sequence to flatten can not be a scalar.
"""
result = []
for item in sequence:
if isinstance(item, StringTypes) or not isinstance(item, SequenceTypes):
result.append(item)
else:
do_flatten(item, result)
return result
#
# Generic convert-to-string functions that abstract away whether or
# not the Python we're executing has Unicode support. The wrapper
# to_String_for_signature() will use a for_signature() method if the
# specified object has one.
#
def to_String(s,
isinstance=isinstance, str=str,
UserString=UserString, BaseStringTypes=BaseStringTypes):
if isinstance(s,BaseStringTypes):
# Early out when already a string!
return s
elif isinstance(s, UserString):
# s.data can only be either a unicode or a regular
# string. Please see the UserString initializer.
return s.data
else:
return str(s)
def to_String_for_subst(s,
isinstance=isinstance, join=string.join, str=str, to_String=to_String,
BaseStringTypes=BaseStringTypes, SequenceTypes=SequenceTypes,
UserString=UserString):
# Note that the test cases are sorted by order of probability.
if isinstance(s, BaseStringTypes):
return s
elif isinstance(s, SequenceTypes):
l = []
for e in s:
l.append(to_String_for_subst(e))
return join( s )
elif isinstance(s, UserString):
# s.data can only be either a unicode or a regular
# string. Please see the UserString initializer.
return s.data
else:
return str(s)
def to_String_for_signature(obj, to_String_for_subst=to_String_for_subst,
AttributeError=AttributeError):
try:
f = obj.for_signature
except AttributeError:
return to_String_for_subst(obj)
else:
return f()
# The SCons "semi-deep" copy.
#
# This makes separate copies of lists (including UserList objects)
# dictionaries (including UserDict objects) and tuples, but just copies
# references to anything else it finds.
#
# A special case is any object that has a __semi_deepcopy__() method,
# which we invoke to create the copy, which is used by the BuilderDict
# class because of its extra initialization argument.
#
# The dispatch table approach used here is a direct rip-off from the
# normal Python copy module.
_semi_deepcopy_dispatch = d = {}
def _semi_deepcopy_dict(x):
copy = {}
for key, val in x.items():
# The regular Python copy.deepcopy() also deepcopies the key,
# as follows:
#
# copy[semi_deepcopy(key)] = semi_deepcopy(val)
#
# Doesn't seem like we need to, but we'll comment it just in case.
copy[key] = semi_deepcopy(val)
return copy
d[types.DictionaryType] = _semi_deepcopy_dict
def _semi_deepcopy_list(x):
return map(semi_deepcopy, x)
d[types.ListType] = _semi_deepcopy_list
def _semi_deepcopy_tuple(x):
return tuple(map(semi_deepcopy, x))
d[types.TupleType] = _semi_deepcopy_tuple
def _semi_deepcopy_inst(x):
if hasattr(x, '__semi_deepcopy__'):
return x.__semi_deepcopy__()
elif isinstance(x, UserDict):
return x.__class__(_semi_deepcopy_dict(x))
elif isinstance(x, UserList):
return x.__class__(_semi_deepcopy_list(x))
else:
return x
d[types.InstanceType] = _semi_deepcopy_inst
def semi_deepcopy(x):
copier = _semi_deepcopy_dispatch.get(type(x))
if copier:
return copier(x)
else:
return x
class Proxy:
"""A simple generic Proxy class, forwarding all calls to
subject. So, for the benefit of the python newbie, what does
this really mean? Well, it means that you can take an object, let's
call it 'objA', and wrap it in this Proxy class, with a statement
like this
proxyObj = Proxy(objA),
Then, if in the future, you do something like this
x = proxyObj.var1,
since Proxy does not have a 'var1' attribute (but presumably objA does),
the request actually is equivalent to saying
x = objA.var1
Inherit from this class to create a Proxy."""
def __init__(self, subject):
"""Wrap an object as a Proxy object"""
self.__subject = subject
def __getattr__(self, name):
"""Retrieve an attribute from the wrapped object. If the named
attribute doesn't exist, AttributeError is raised"""
return getattr(self.__subject, name)
def get(self):
"""Retrieve the entire wrapped object"""
return self.__subject
def __cmp__(self, other):
if issubclass(other.__class__, self.__subject.__class__):
return cmp(self.__subject, other)
return cmp(self.__dict__, other.__dict__)
# attempt to load the windows registry module:
can_read_reg = 0
try:
import _winreg
can_read_reg = 1
hkey_mod = _winreg
RegOpenKeyEx = _winreg.OpenKeyEx
RegEnumKey = _winreg.EnumKey
RegEnumValue = _winreg.EnumValue
RegQueryValueEx = _winreg.QueryValueEx
RegError = _winreg.error
except ImportError:
try:
import win32api
import win32con
can_read_reg = 1
hkey_mod = win32con
RegOpenKeyEx = win32api.RegOpenKeyEx
RegEnumKey = win32api.RegEnumKey
RegEnumValue = win32api.RegEnumValue
RegQueryValueEx = win32api.RegQueryValueEx
RegError = win32api.error
except ImportError:
class _NoError(Exception):
pass
RegError = _NoError
if can_read_reg:
HKEY_CLASSES_ROOT = hkey_mod.HKEY_CLASSES_ROOT
HKEY_LOCAL_MACHINE = hkey_mod.HKEY_LOCAL_MACHINE
HKEY_CURRENT_USER = hkey_mod.HKEY_CURRENT_USER
HKEY_USERS = hkey_mod.HKEY_USERS
def RegGetValue(root, key):
"""This utility function returns a value in the registry
without having to open the key first. Only available on
Windows platforms with a version of Python that can read the
registry. Returns the same thing as
SCons.Util.RegQueryValueEx, except you just specify the entire
path to the value, and don't have to bother opening the key
first. So:
Instead of:
k = SCons.Util.RegOpenKeyEx(SCons.Util.HKEY_LOCAL_MACHINE,
r'SOFTWARE\Microsoft\Windows\CurrentVersion')
out = SCons.Util.RegQueryValueEx(k,
'ProgramFilesDir')
You can write:
out = SCons.Util.RegGetValue(SCons.Util.HKEY_LOCAL_MACHINE,
r'SOFTWARE\Microsoft\Windows\CurrentVersion\ProgramFilesDir')
"""
# I would use os.path.split here, but it's not a filesystem
# path...
p = key.rfind('\\') + 1
keyp = key[:p]
val = key[p:]
k = RegOpenKeyEx(root, keyp)
return RegQueryValueEx(k,val)
if sys.platform == 'win32':
def WhereIs(file, path=None, pathext=None, reject=[]):
if path is None:
try:
path = os.environ['PATH']
except KeyError:
return None
if is_String(path):
path = string.split(path, os.pathsep)
if pathext is None:
try:
pathext = os.environ['PATHEXT']
except KeyError:
pathext = '.COM;.EXE;.BAT;.CMD'
if is_String(pathext):
pathext = string.split(pathext, os.pathsep)
for ext in pathext:
if string.lower(ext) == string.lower(file[-len(ext):]):
pathext = ['']
break
if not is_List(reject) and not is_Tuple(reject):
reject = [reject]
for dir in path:
f = os.path.join(dir, file)
for ext in pathext:
fext = f + ext
if os.path.isfile(fext):
try:
reject.index(fext)
except ValueError:
return os.path.normpath(fext)
continue
return None
elif os.name == 'os2':
def WhereIs(file, path=None, pathext=None, reject=[]):
if path is None:
try:
path = os.environ['PATH']
except KeyError:
return None
if is_String(path):
path = string.split(path, os.pathsep)
if pathext is None:
pathext = ['.exe', '.cmd']
for ext in pathext:
if string.lower(ext) == string.lower(file[-len(ext):]):
pathext = ['']
break
if not is_List(reject) and not is_Tuple(reject):
reject = [reject]
for dir in path:
f = os.path.join(dir, file)
for ext in pathext:
fext = f + ext
if os.path.isfile(fext):
try:
reject.index(fext)
except ValueError:
return os.path.normpath(fext)
continue
return None
else:
def WhereIs(file, path=None, pathext=None, reject=[]):
import stat
if path is None:
try:
path = os.environ['PATH']
except KeyError:
return None
if is_String(path):
path = string.split(path, os.pathsep)
if not is_List(reject) and not is_Tuple(reject):
reject = [reject]
for d in path:
f = os.path.join(d, file)
if os.path.isfile(f):
try:
st = os.stat(f)
except OSError:
# os.stat() raises OSError, not IOError if the file
# doesn't exist, so in this case we let IOError get
# raised so as to not mask possibly serious disk or
# network issues.
continue
if stat.S_IMODE(st[stat.ST_MODE]) & 0111:
try:
reject.index(f)
except ValueError:
return os.path.normpath(f)
continue
return None
def PrependPath(oldpath, newpath, sep = os.pathsep,
delete_existing=1, canonicalize=None):
"""This prepends newpath elements to the given oldpath. Will only
add any particular path once (leaving the first one it encounters
and ignoring the rest, to preserve path order), and will
os.path.normpath and os.path.normcase all paths to help assure
this. This can also handle the case where the given old path
variable is a list instead of a string, in which case a list will
be returned instead of a string.
Example:
Old Path: "/foo/bar:/foo"
New Path: "/biz/boom:/foo"
Result: "/biz/boom:/foo:/foo/bar"
If delete_existing is 0, then adding a path that exists will
not move it to the beginning; it will stay where it is in the
list.
If canonicalize is not None, it is applied to each element of
newpath before use.
"""
orig = oldpath
is_list = 1
paths = orig
if not is_List(orig) and not is_Tuple(orig):
paths = string.split(paths, sep)
is_list = 0
if is_String(newpath):
newpaths = string.split(newpath, sep)
elif not is_List(newpath) and not is_Tuple(newpath):
newpaths = [ newpath ] # might be a Dir
else:
newpaths = newpath
if canonicalize:
newpaths=map(canonicalize, newpaths)
if not delete_existing:
# First uniquify the old paths, making sure to
# preserve the first instance (in Unix/Linux,
# the first one wins), and remembering them in normpaths.
# Then insert the new paths at the head of the list
# if they're not already in the normpaths list.
result = []
normpaths = []
for path in paths:
if not path:
continue
normpath = os.path.normpath(os.path.normcase(path))
if normpath not in normpaths:
result.append(path)
normpaths.append(normpath)
newpaths.reverse() # since we're inserting at the head
for path in newpaths:
if not path:
continue
normpath = os.path.normpath(os.path.normcase(path))
if normpath not in normpaths:
result.insert(0, path)
normpaths.append(normpath)
paths = result
else:
newpaths = newpaths + paths # prepend new paths
normpaths = []
paths = []
# now we add them only if they are unique
for path in newpaths:
normpath = os.path.normpath(os.path.normcase(path))
if path and not normpath in normpaths:
paths.append(path)
normpaths.append(normpath)
if is_list:
return paths
else:
return string.join(paths, sep)
def AppendPath(oldpath, newpath, sep = os.pathsep,
delete_existing=1, canonicalize=None):
"""This appends new path elements to the given old path. Will
only add any particular path once (leaving the last one it
encounters and ignoring the rest, to preserve path order), and
will os.path.normpath and os.path.normcase all paths to help
assure this. This can also handle the case where the given old
path variable is a list instead of a string, in which case a list
will be returned instead of a string.
Example:
Old Path: "/foo/bar:/foo"
New Path: "/biz/boom:/foo"
Result: "/foo/bar:/biz/boom:/foo"
If delete_existing is 0, then adding a path that exists
will not move it to the end; it will stay where it is in the list.
If canonicalize is not None, it is applied to each element of
newpath before use.
"""
orig = oldpath
is_list = 1
paths = orig
if not is_List(orig) and not is_Tuple(orig):
paths = string.split(paths, sep)
is_list = 0
if is_String(newpath):
newpaths = string.split(newpath, sep)
elif not is_List(newpath) and not is_Tuple(newpath):
newpaths = [ newpath ] # might be a Dir
else:
newpaths = newpath
if canonicalize:
newpaths=map(canonicalize, newpaths)
if not delete_existing:
# add old paths to result, then
# add new paths if not already present
# (I thought about using a dict for normpaths for speed,
# but it's not clear hashing the strings would be faster
# than linear searching these typically short lists.)
result = []
normpaths = []
for path in paths:
if not path:
continue
result.append(path)
normpaths.append(os.path.normpath(os.path.normcase(path)))
for path in newpaths:
if not path:
continue
normpath = os.path.normpath(os.path.normcase(path))
if normpath not in normpaths:
result.append(path)
normpaths.append(normpath)
paths = result
else:
# start w/ new paths, add old ones if not present,
# then reverse.
newpaths = paths + newpaths # append new paths
newpaths.reverse()
normpaths = []
paths = []
# now we add them only if they are unique
for path in newpaths:
normpath = os.path.normpath(os.path.normcase(path))
if path and not normpath in normpaths:
paths.append(path)
normpaths.append(normpath)
paths.reverse()
if is_list:
return paths
else:
return string.join(paths, sep)
if sys.platform == 'cygwin':
def get_native_path(path):
"""Transforms an absolute path into a native path for the system. In
Cygwin, this converts from a Cygwin path to a Windows one."""
return string.replace(os.popen('cygpath -w ' + path).read(), '\n', '')
else:
def get_native_path(path):
"""Transforms an absolute path into a native path for the system.
Non-Cygwin version, just leave the path alone."""
return path
display = DisplayEngine()
def Split(arg):
if is_List(arg) or is_Tuple(arg):
return arg
elif is_String(arg):
return string.split(arg)
else:
return [arg]
class CLVar(UserList):
"""A class for command-line construction variables.
This is a list that uses Split() to split an initial string along
white-space arguments, and similarly to split any strings that get
added. This allows us to Do the Right Thing with Append() and
Prepend() (as well as straight Python foo = env['VAR'] + 'arg1
arg2') regardless of whether a user adds a list or a string to a
command-line construction variable.
"""
def __init__(self, seq = []):
UserList.__init__(self, Split(seq))
def __add__(self, other):
return UserList.__add__(self, CLVar(other))
def __radd__(self, other):
return UserList.__radd__(self, CLVar(other))
def __coerce__(self, other):
return (self, CLVar(other))
def __str__(self):
return string.join(self.data)
# A dictionary that preserves the order in which items are added.
# Submitted by David Benjamin to ActiveState's Python Cookbook web site:
# http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/107747
# Including fixes/enhancements from the follow-on discussions.
class OrderedDict(UserDict):
def __init__(self, dict = None):
self._keys = []
UserDict.__init__(self, dict)
def __delitem__(self, key):
UserDict.__delitem__(self, key)
self._keys.remove(key)
def __setitem__(self, key, item):
UserDict.__setitem__(self, key, item)
if key not in self._keys: self._keys.append(key)
def clear(self):
UserDict.clear(self)
self._keys = []
def copy(self):
dict = OrderedDict()
dict.update(self)
return dict
def items(self):
return zip(self._keys, self.values())
def keys(self):
return self._keys[:]
def popitem(self):
try:
key = self._keys[-1]
except IndexError:
raise KeyError('dictionary is empty')
val = self[key]
del self[key]
return (key, val)
def setdefault(self, key, failobj = None):
UserDict.setdefault(self, key, failobj)
if key not in self._keys: self._keys.append(key)
def update(self, dict):
for (key, val) in dict.items():
self.__setitem__(key, val)
def values(self):
return map(self.get, self._keys)
class Selector(OrderedDict):
"""A callable ordered dictionary that maps file suffixes to
dictionary values. We preserve the order in which items are added
so that get_suffix() calls always return the first suffix added."""
def __call__(self, env, source, ext=None):
if ext is None:
try:
ext = source[0].suffix
except IndexError:
ext = ""
try:
return self[ext]
except KeyError:
# Try to perform Environment substitution on the keys of
# the dictionary before giving up.
s_dict = {}
for (k,v) in self.items():
if not k is None:
s_k = env.subst(k)
if s_dict.has_key(s_k):
# We only raise an error when variables point
# to the same suffix. If one suffix is literal
# and a variable suffix contains this literal,
# the literal wins and we don't raise an error.
raise KeyError, (s_dict[s_k][0], k, s_k)
s_dict[s_k] = (k,v)
try:
return s_dict[ext][1]
except KeyError:
try:
return self[None]
except KeyError:
return None
if sys.platform == 'cygwin':
# On Cygwin, os.path.normcase() lies, so just report back the
# fact that the underlying Windows OS is case-insensitive.
def case_sensitive_suffixes(s1, s2):
return 0
else:
def case_sensitive_suffixes(s1, s2):
return (os.path.normcase(s1) != os.path.normcase(s2))
def adjustixes(fname, pre, suf, ensure_suffix=False):
if pre:
path, fn = os.path.split(os.path.normpath(fname))
if fn[:len(pre)] != pre:
fname = os.path.join(path, pre + fn)
# Only append a suffix if the suffix we're going to add isn't already
# there, and if either we've been asked to ensure the specific suffix
# is present or there's no suffix on it at all.
if suf and fname[-len(suf):] != suf and \
(ensure_suffix or not splitext(fname)[1]):
fname = fname + suf
return fname
# From Tim Peters,
# http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/52560
# ASPN: Python Cookbook: Remove duplicates from a sequence
# (Also in the printed Python Cookbook.)
def unique(s):
"""Return a list of the elements in s, but without duplicates.
For example, unique([1,2,3,1,2,3]) is some permutation of [1,2,3],
unique("abcabc") some permutation of ["a", "b", "c"], and
unique(([1, 2], [2, 3], [1, 2])) some permutation of
[[2, 3], [1, 2]].
For best speed, all sequence elements should be hashable. Then
unique() will usually work in linear time.
If not possible, the sequence elements should enjoy a total
ordering, and if list(s).sort() doesn't raise TypeError it's
assumed that they do enjoy a total ordering. Then unique() will
usually work in O(N*log2(N)) time.
If that's not possible either, the sequence elements must support
equality-testing. Then unique() will usually work in quadratic
time.
"""
n = len(s)
if n == 0:
return []
# Try using a dict first, as that's the fastest and will usually
# work. If it doesn't work, it will usually fail quickly, so it
# usually doesn't cost much to *try* it. It requires that all the
# sequence elements be hashable, and support equality comparison.
u = {}
try:
for x in s:
u[x] = 1
except TypeError:
pass # move on to the next method
else:
return u.keys()
del u
# We can't hash all the elements. Second fastest is to sort,
# which brings the equal elements together; then duplicates are
# easy to weed out in a single pass.
# NOTE: Python's list.sort() was designed to be efficient in the
# presence of many duplicate elements. This isn't true of all
# sort functions in all languages or libraries, so this approach
# is more effective in Python than it may be elsewhere.
try:
t = list(s)
t.sort()
except TypeError:
pass # move on to the next method
else:
assert n > 0
last = t[0]
lasti = i = 1
while i < n:
if t[i] != last:
t[lasti] = last = t[i]
lasti = lasti + 1
i = i + 1
return t[:lasti]
del t
# Brute force is all that's left.
u = []
for x in s:
if x not in u:
u.append(x)
return u
# From Alex Martelli,
# http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/52560
# ASPN: Python Cookbook: Remove duplicates from a sequence
# First comment, dated 2001/10/13.
# (Also in the printed Python Cookbook.)
def uniquer(seq, idfun=None):
if idfun is None:
def idfun(x): return x
seen = {}
result = []
for item in seq:
marker = idfun(item)
# in old Python versions:
# if seen.has_key(marker)
# but in new ones:
if marker in seen: continue
seen[marker] = 1
result.append(item)
return result
# A more efficient implementation of Alex's uniquer(), this avoids the
# idfun() argument and function-call overhead by assuming that all
# items in the sequence are hashable.
def uniquer_hashables(seq):
seen = {}
result = []
for item in seq:
#if not item in seen:
if not seen.has_key(item):
seen[item] = 1
result.append(item)
return result
# Much of the logic here was originally based on recipe 4.9 from the
# Python CookBook, but we had to dumb it way down for Python 1.5.2.
class LogicalLines:
def __init__(self, fileobj):
self.fileobj = fileobj
def readline(self):
result = []
while 1:
line = self.fileobj.readline()
if not line:
break
if line[-2:] == '\\\n':
result.append(line[:-2])
else:
result.append(line)
break
return string.join(result, '')
def readlines(self):
result = []
while 1:
line = self.readline()
if not line:
break
result.append(line)
return result
class UniqueList(UserList):
def __init__(self, seq = []):
UserList.__init__(self, seq)
self.unique = True
def __make_unique(self):
if not self.unique:
self.data = uniquer_hashables(self.data)
self.unique = True
def __lt__(self, other):
self.__make_unique()
return UserList.__lt__(self, other)
def __le__(self, other):
self.__make_unique()
return UserList.__le__(self, other)
def __eq__(self, other):
self.__make_unique()
return UserList.__eq__(self, other)
def __ne__(self, other):
self.__make_unique()
return UserList.__ne__(self, other)
def __gt__(self, other):
self.__make_unique()
return UserList.__gt__(self, other)
def __ge__(self, other):
self.__make_unique()
return UserList.__ge__(self, other)
def __cmp__(self, other):
self.__make_unique()
return UserList.__cmp__(self, other)
def __len__(self):
self.__make_unique()
return UserList.__len__(self)
def __getitem__(self, i):
self.__make_unique()
return UserList.__getitem__(self, i)
def __setitem__(self, i, item):
UserList.__setitem__(self, i, item)
self.unique = False
def __getslice__(self, i, j):
self.__make_unique()
return UserList.__getslice__(self, i, j)
def __setslice__(self, i, j, other):
UserList.__setslice__(self, i, j, other)
self.unique = False
def __add__(self, other):
result = UserList.__add__(self, other)
result.unique = False
return result
def __radd__(self, other):
result = UserList.__radd__(self, other)
result.unique = False
return result
def __iadd__(self, other):
result = UserList.__iadd__(self, other)
result.unique = False
return result
def __mul__(self, other):
result = UserList.__mul__(self, other)
result.unique = False
return result
def __rmul__(self, other):
result = UserList.__rmul__(self, other)
result.unique = False
return result
def __imul__(self, other):
result = UserList.__imul__(self, other)
result.unique = False
return result
def append(self, item):
UserList.append(self, item)
self.unique = False
def insert(self, i):
UserList.insert(self, i)
self.unique = False
def count(self, item):
self.__make_unique()
return UserList.count(self, item)
def index(self, item):
self.__make_unique()
return UserList.index(self, item)
def reverse(self):
self.__make_unique()
UserList.reverse(self)
def sort(self, *args, **kwds):
self.__make_unique()
#return UserList.sort(self, *args, **kwds)
return apply(UserList.sort, (self,)+args, kwds)
def extend(self, other):
UserList.extend(self, other)
self.unique = False
class Unbuffered:
"""
A proxy class that wraps a file object, flushing after every write,
and delegating everything else to the wrapped object.
"""
def __init__(self, file):
self.file = file
def write(self, arg):
try:
self.file.write(arg)
self.file.flush()
except IOError:
# Stdout might be connected to a pipe that has been closed
# by now. The most likely reason for the pipe being closed
# is that the user has press ctrl-c. It this is the case,
# then SCons is currently shutdown. We therefore ignore
# IOError's here so that SCons can continue and shutdown
# properly so that the .sconsign is correctly written
# before SCons exits.
pass
def __getattr__(self, attr):
return getattr(self.file, attr)
def make_path_relative(path):
""" makes an absolute path name to a relative pathname.
"""
if os.path.isabs(path):
drive_s,path = os.path.splitdrive(path)
import re
if not drive_s:
path=re.compile("/*(.*)").findall(path)[0]
else:
path=path[1:]
assert( not os.path.isabs( path ) ), path
return path
# The original idea for AddMethod() and RenameFunction() come from the
# following post to the ActiveState Python Cookbook:
#
# ASPN: Python Cookbook : Install bound methods in an instance
# http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/223613
#
# That code was a little fragile, though, so the following changes
# have been wrung on it:
#
# * Switched the installmethod() "object" and "function" arguments,
# so the order reflects that the left-hand side is the thing being
# "assigned to" and the right-hand side is the value being assigned.
#
# * Changed explicit type-checking to the "try: klass = object.__class__"
# block in installmethod() below so that it still works with the
# old-style classes that SCons uses.
#
# * Replaced the by-hand creation of methods and functions with use of
# the "new" module, as alluded to in Alex Martelli's response to the
# following Cookbook post:
#
# ASPN: Python Cookbook : Dynamically added methods to a class
# http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/81732
def AddMethod(object, function, name = None):
"""
Adds either a bound method to an instance or an unbound method to
a class. If name is ommited the name of the specified function
is used by default.
Example:
a = A()
def f(self, x, y):
self.z = x + y
AddMethod(f, A, "add")
a.add(2, 4)
print a.z
AddMethod(lambda self, i: self.l[i], a, "listIndex")
print a.listIndex(5)
"""
import new
if name is None:
name = function.func_name
else:
function = RenameFunction(function, name)
try:
klass = object.__class__
except AttributeError:
# "object" is really a class, so it gets an unbound method.
object.__dict__[name] = new.instancemethod(function, None, object)
else:
# "object" is really an instance, so it gets a bound method.
object.__dict__[name] = new.instancemethod(function, object, klass)
def RenameFunction(function, name):
"""
Returns a function identical to the specified function, but with
the specified name.
"""
import new
# Compatibility for Python 1.5 and 2.1. Can be removed in favor of
# passing function.func_defaults directly to new.function() once
# we base on Python 2.2 or later.
func_defaults = function.func_defaults
if func_defaults is None:
func_defaults = ()
return new.function(function.func_code,
function.func_globals,
name,
func_defaults)
md5 = False
def MD5signature(s):
return str(s)
def MD5filesignature(fname, chunksize=65536):
f = open(fname, "rb")
result = f.read()
f.close()
return result
try:
import hashlib
except ImportError:
pass
else:
if hasattr(hashlib, 'md5'):
md5 = True
def MD5signature(s):
m = hashlib.md5()
m.update(str(s))
return m.hexdigest()
def MD5filesignature(fname, chunksize=65536):
m = hashlib.md5()
f = open(fname, "rb")
while 1:
blck = f.read(chunksize)
if not blck:
break
m.update(str(blck))
f.close()
return m.hexdigest()
def MD5collect(signatures):
"""
Collects a list of signatures into an aggregate signature.
signatures - a list of signatures
returns - the aggregate signature
"""
if len(signatures) == 1:
return signatures[0]
else:
return MD5signature(string.join(signatures, ', '))
# From Dinu C. Gherman,
# Python Cookbook, second edition, recipe 6.17, p. 277.
# Also:
# http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/68205
# ASPN: Python Cookbook: Null Object Design Pattern
# TODO(1.5):
#class Null(object):
class Null:
""" Null objects always and reliably "do nothing." """
def __new__(cls, *args, **kwargs):
if not '_inst' in vars(cls):
#cls._inst = type.__new__(cls, *args, **kwargs)
cls._inst = apply(type.__new__, (cls,) + args, kwargs)
return cls._inst
def __init__(self, *args, **kwargs):
pass
def __call__(self, *args, **kwargs):
return self
def __repr__(self):
return "Null(0x%08X)" % id(self)
def __nonzero__(self):
return False
def __getattr__(self, name):
return self
def __setattr__(self, name, value):
return self
def __delattr__(self, name):
return self
class NullSeq(Null):
def __len__(self):
return 0
def __iter__(self):
return iter(())
def __getitem__(self, i):
return self
def __delitem__(self, i):
return self
def __setitem__(self, i, v):
return self
del __revision__
|
rwatson/chromium-capsicum
|
third_party/scons/scons-local/SCons/Util.py
|
Python
|
bsd-3-clause
| 52,579
|
[
"VisIt"
] |
e52c6b600485a6806b057af1336a5a54f60f20f511db9ab6b52e628a8943d278
|
"""
Basic operations for BRI Mongo databases.
"""
import logging
import re
from functools import wraps
import datetime
from .. import util
logger = logging.getLogger(__name__)
def find_objects(collection):
"""
Return a decorator that retrieves objects from the specified
collection, given a db connection and query.
:type collection: str
:param collection: String indicating the name of the collection
"""
def decorator(f):
@wraps(f)
def wrapper(*args):
db, query = f(*args)
logger.debug("searching '{}' collection with query '{}'"
.format(collection, query))
return list(db[collection].find(query))
return wrapper
return decorator
def insert_objects(collection):
"""
Return a decorator that inserts one or more objects in into
specified collection; if object exists, updates any individual
fields that are not empty in the input object.
:type collection: str
:param collection: string indicating the name of the collection
"""
def decorator(f):
@wraps(f)
def wrapper(*args):
db, objects = f(*args)
objects = [objects] if not isinstance(objects, list) else objects
logger.debug("inserting list of objects: {}".format(objects))
for o in objects:
logger.debug("inserting '{}' into '{}' collection"
.format(o, collection))
for k, v in o.items():
if v is not None:
logger.debug("updating field {}".format(k))
db[collection].update_one({'_id': o['_id']},
{'$set': {k: v}},
upsert=True)
return wrapper
return decorator
@find_objects('samples')
def get_samples(db, query):
"""
Return list of documents from 'samples' collection based on query.
"""
return db, query
@find_objects('counts')
def get_counts(db, query):
"""
Return list of documents from 'counts' collection based on query.
"""
return db, query
@find_objects('metrics')
def get_metrics(db, query):
"""
Return list of documents from 'metrics' collection based on query.
"""
return db, query
@find_objects('runs')
def get_runs(db, query):
"""
Return list of documents from 'runs' collection based on query.
"""
return db, query
@find_objects('genomicsWorkflowbatches')
def get_genomicsWorkflowbatches(db, query):
"""
Return list of documents from 'genomicsWorkflowbatches' collection based
on query.
"""
return db, query
@find_objects('genomicsSamples')
def get_genomicsSamples(db, query):
"""
Return list of documents from 'genomicsSamples' collection based on query.
"""
return db, query
@find_objects('genomicsCounts')
def get_genomicsCounts(db, query):
"""
Return list of documents from 'genomicsCounts' collection based on query.
"""
return db, query
@find_objects('genomicsMetrics')
def get_genomicsMetrics(db, query):
"""
Return list of documents from 'genomicsMetrics' collection based on query.
"""
return db, query
@find_objects('genomicsRuns')
def get_genomicsRuns(db, query):
"""
Return list of documents from 'genomicsRuns' collection based on query.
"""
return db, query
@insert_objects('samples')
def put_samples(db, samples):
"""
Insert each document in list into 'samples' collection.
"""
return db, samples
@insert_objects('counts')
def put_counts(db, counts):
"""
Insert each document in list into 'counts' collection.
"""
return db, counts
@insert_objects('metrics')
def put_metrics(db, metrics):
"""
Insert each document in list into 'metrics' collection.
"""
return db, metrics
@insert_objects('runs')
def put_runs(db, runs):
"""
Insert each document in list into 'runs' collection.
"""
return db, runs
@insert_objects('genomicsWorkflowbatches')
def put_genomicsWorkflowbatches(db, workflowbatches):
"""
Insert each document in list into 'genomicsWorkflowbatches' collection.
"""
return db, workflowbatches
@insert_objects('genomicsSamples')
def put_genomicsSamples(db, samples):
"""
Insert each document in list into 'genomicsSamples' collection.
"""
return db, samples
@insert_objects('genomicsCounts')
def put_genomicsCounts(db, counts):
"""
Insert each document in list into 'genomicsCounts' collection.
"""
return db, counts
@insert_objects('genomicsMetrics')
def put_genomicsMetrics(db, metrics):
"""
Insert each document in list into 'genomicsMetrics' collection.
"""
return db, metrics
@insert_objects('genomicsRuns')
def put_genomicsRuns(db, runs):
"""
Insert each document in list into 'genomicsRuns' collection.
"""
return db, runs
def create_workflowbatch_id(db, prefix, date):
"""
Check the 'workflowbatches' collection and construct ID with lowest
available batch number (i.e., ''<prefix>_<date>_<number>').
:type db: type[pymongo.database.Database]
:param db: database object for current MongoDB connection
:type prefix: str
:param prefix: base string for workflow batch ID, based on workflow
batch type (e.g., 'globusgalaxy' for Globus Galaxy workflow
:type date: type[datetime.datetime]
:param date: date on which workflow batch was run
:rtype: str
:return: a unique ID for the workflow batch, with the prefix and
date combination appended with the highest available integer
"""
isodate = datetime.date.isoformat(date)
query = {'_id': {'$regex': '{}_{}_.+'.format(prefix, isodate)}}
logger.debug("searching 'genomicsWorkflowbatches' collection with query '{}'"
.format(query))
workflowbatches = get_genomicsWorkflowbatches(db, query)
logger.debug("matched workflow batches: '{}'".format(workflowbatches))
num = 1
if len(workflowbatches):
num = max([int(util.matchdefault('\d$', wb['_id']))
for wb in workflowbatches])
while True:
num_regex = re.compile('_{}$'.format(num))
logger.debug("searching for workflowbatches '{}' ending in '{}'"
.format(workflowbatches, num))
if any([num_regex.search(wb['_id']) for wb in workflowbatches]):
num += 1
break
return '{}_{}_{}'.format(prefix, isodate, num)
def search_ancestors(db, sample_id, field):
"""
Given an object in the 'samples' collection, specified by the input
ID, iteratively walk through ancestors based on 'parentId' until
a value is found for the requested field.
:type db: type[pymongo.database.Database]
:param db: database object for current MongoDB connection
:type sample_id: str
:param sample_id: a unique ID for a sample in GenLIMS
:type field: str
:param field: the field for which to search among ancestor samples
:return: value for field, if found
"""
sample = db.samples.find_one({'_id': sample_id})
if sample is not None:
if field in sample:
return sample[field]
else:
try:
return search_ancestors(db, sample['parentId'], field)
except KeyError:
logger.debug("input sample '{}' has no mapped parent sample"
.format(sample_id),
exc_info=True)
else:
logger.debug("input sample '{}' not found in db"
.format(sample_id))
|
jaeddy/bripipetools
|
bripipetools/database/operations.py
|
Python
|
mit
| 7,760
|
[
"Galaxy"
] |
01ee6e1e095d2f599de537912411cf858ac5df5a4787af270e55eb7ea4a494e5
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Run this test like so:
# vtkpython TestParametricFunctions.py -D $VTK_DATA_ROOT \
# -B $VTK_DATA_ROOT/Baseline/Graphics/
import vtk
import vtk.test.Testing
from vtk.util.misc import vtkGetDataRoot
VTK_DATA_ROOT = vtkGetDataRoot()
# ------------------------------------------------------------
# Purpose: Test the parametric functions.
# ------------------------------------------------------------
class TestParametricFunctions(vtk.test.Testing.vtkTest):
def testParametricFunctions(self):
# ------------------------------------------------------------
# Get a texture
# ------------------------------------------------------------
textureReader = vtk.vtkJPEGReader()
textureReader.SetFileName(VTK_DATA_ROOT + "/Data/beach.jpg")
texture = vtk.vtkTexture()
texture.SetInputConnection(textureReader.GetOutputPort())
# ------------------------------------------------------------
# For each parametric surface:
# 1) Create it
# 2) Assign mappers and actors
# 3) Position this object
# 5) Add a label
# ------------------------------------------------------------
# ------------------------------------------------------------
# Create a torus
# ------------------------------------------------------------
torus = vtk.vtkParametricTorus()
torusSource = vtk.vtkParametricFunctionSource()
torusSource.SetParametricFunction(torus)
torusSource.SetScalarModeToPhase()
torusMapper = vtk.vtkPolyDataMapper()
torusMapper.SetInputConnection(torusSource.GetOutputPort())
torusMapper.SetScalarRange(0, 360)
torusActor = vtk.vtkActor()
torusActor.SetMapper(torusMapper)
torusActor.SetPosition(0, 12, 0)
torusTextMapper = vtk.vtkTextMapper()
torusTextMapper.SetInput("Torus")
torusTextMapper.GetTextProperty().SetJustificationToCentered()
torusTextMapper.GetTextProperty().SetVerticalJustificationToCentered()
torusTextMapper.GetTextProperty().SetColor(1, 0, 0)
torusTextMapper.GetTextProperty().SetFontSize(14)
torusTextActor = vtk.vtkActor2D()
torusTextActor.SetMapper(torusTextMapper)
torusTextActor.GetPositionCoordinate().SetCoordinateSystemToWorld()
torusTextActor.GetPositionCoordinate().SetValue(0, 9.5, 0)
# ------------------------------------------------------------
# Create a klein bottle
# ------------------------------------------------------------
klein = vtk.vtkParametricKlein()
kleinSource = vtk.vtkParametricFunctionSource()
kleinSource.SetParametricFunction(klein)
kleinSource.SetScalarModeToU0V0()
kleinMapper = vtk.vtkPolyDataMapper()
kleinMapper.SetInputConnection(kleinSource.GetOutputPort())
kleinMapper.SetScalarRange(0, 3)
kleinActor = vtk.vtkActor()
kleinActor.SetMapper(kleinMapper)
kleinActor.SetPosition(8, 10.5, 0)
kleinTextMapper = vtk.vtkTextMapper()
kleinTextMapper.SetInput("Klein")
kleinTextMapper.GetTextProperty().SetJustificationToCentered()
kleinTextMapper.GetTextProperty().SetVerticalJustificationToCentered()
kleinTextMapper.GetTextProperty().SetColor(1, 0, 0)
kleinTextMapper.GetTextProperty().SetFontSize(14)
kleinTextActor = vtk.vtkActor2D()
kleinTextActor.SetMapper(kleinTextMapper)
kleinTextActor.GetPositionCoordinate().SetCoordinateSystemToWorld()
kleinTextActor.GetPositionCoordinate().SetValue(8, 9.5, 0)
# ------------------------------------------------------------
# Create a Figure-8 Klein
# ------------------------------------------------------------
klein2 = vtk.vtkParametricFigure8Klein()
klein2Source = vtk.vtkParametricFunctionSource()
klein2Source.SetParametricFunction(klein2)
klein2Source.GenerateTextureCoordinatesOn()
klein2Mapper = vtk.vtkPolyDataMapper()
klein2Mapper.SetInputConnection(klein2Source.GetOutputPort())
klein2Mapper.SetScalarRange(0, 3)
klein2Actor = vtk.vtkActor()
klein2Actor.SetMapper(klein2Mapper)
klein2Actor.SetPosition(16, 12, 0)
klein2Actor.SetTexture(texture)
fig8KleinTextMapper = vtk.vtkTextMapper()
fig8KleinTextMapper.SetInput("Fig-8.Klein")
fig8KleinTextMapper.GetTextProperty().SetJustificationToCentered()
fig8KleinTextMapper.GetTextProperty().SetVerticalJustificationToCentered()
fig8KleinTextMapper.GetTextProperty().SetColor(1, 0, 0)
fig8KleinTextMapper.GetTextProperty().SetFontSize(14)
fig8KleinTextActor = vtk.vtkActor2D()
fig8KleinTextActor.SetMapper(fig8KleinTextMapper)
fig8KleinTextActor.GetPositionCoordinate().SetCoordinateSystemToWorld()
fig8KleinTextActor.GetPositionCoordinate().SetValue(16, 9.5, 0)
# ------------------------------------------------------------
# Create a mobius strip
# ------------------------------------------------------------
mobius = vtk.vtkParametricMobius()
mobiusSource = vtk.vtkParametricFunctionSource()
mobiusSource.SetParametricFunction(mobius)
mobiusSource.GenerateTextureCoordinatesOn()
mobiusMapper = vtk.vtkPolyDataMapper()
mobiusMapper.SetInputConnection(mobiusSource.GetOutputPort())
mobiusActor = vtk.vtkActor()
mobiusActor.SetMapper(mobiusMapper)
mobiusActor.RotateX(45)
mobiusActor.SetPosition(24, 12, 0)
mobiusActor.SetTexture(texture)
mobiusTextMapper = vtk.vtkTextMapper()
mobiusTextMapper.SetInput("Mobius")
mobiusTextMapper.GetTextProperty().SetJustificationToCentered()
mobiusTextMapper.GetTextProperty().SetVerticalJustificationToCentered()
mobiusTextMapper.GetTextProperty().SetColor(1, 0, 0)
mobiusTextMapper.GetTextProperty().SetFontSize(14)
mobiusTextActor = vtk.vtkActor2D()
mobiusTextActor.SetMapper(mobiusTextMapper)
mobiusTextActor.GetPositionCoordinate().SetCoordinateSystemToWorld()
mobiusTextActor.GetPositionCoordinate().SetValue(24, 9.5, 0)
# ------------------------------------------------------------
# Create a super toroid
# ------------------------------------------------------------
toroid = vtk.vtkParametricSuperToroid()
toroid.SetN1(2)
toroid.SetN2(3)
toroidSource = vtk.vtkParametricFunctionSource()
toroidSource.SetParametricFunction(toroid)
toroidSource.SetScalarModeToU()
toroidMapper = vtk.vtkPolyDataMapper()
toroidMapper.SetInputConnection(toroidSource.GetOutputPort())
toroidMapper.SetScalarRange(0, 6.28)
toroidActor = vtk.vtkActor()
toroidActor.SetMapper(toroidMapper)
toroidActor.SetPosition(0, 4, 0)
superToroidTextMapper = vtk.vtkTextMapper()
superToroidTextMapper.SetInput("Super.Toroid")
superToroidTextMapper.GetTextProperty().SetJustificationToCentered()
superToroidTextMapper.GetTextProperty().SetVerticalJustificationToCentered()
superToroidTextMapper.GetTextProperty().SetColor(1, 0, 0)
superToroidTextMapper.GetTextProperty().SetFontSize(14)
superToroidTextActor = vtk.vtkActor2D()
superToroidTextActor.SetMapper(superToroidTextMapper)
superToroidTextActor.GetPositionCoordinate().SetCoordinateSystemToWorld()
superToroidTextActor.GetPositionCoordinate().SetValue(0, 1.5, 0)
# ------------------------------------------------------------
# Create a super ellipsoid
# ------------------------------------------------------------
superEllipsoid = vtk.vtkParametricSuperEllipsoid()
superEllipsoid.SetXRadius(1.25)
superEllipsoid.SetYRadius(1.5)
superEllipsoid.SetZRadius(1.0)
superEllipsoid.SetN1(1.1)
superEllipsoid.SetN2(1.75)
superEllipsoidSource = vtk.vtkParametricFunctionSource()
superEllipsoidSource.SetParametricFunction(superEllipsoid)
superEllipsoidSource.SetScalarModeToV()
superEllipsoidMapper = vtk.vtkPolyDataMapper()
superEllipsoidMapper.SetInputConnection(superEllipsoidSource.GetOutputPort())
superEllipsoidMapper.SetScalarRange(0, 3.14)
superEllipsoidActor = vtk.vtkActor()
superEllipsoidActor.SetMapper(superEllipsoidMapper)
superEllipsoidActor.SetPosition(8, 4, 0)
superEllipsoidTextMapper = vtk.vtkTextMapper()
superEllipsoidTextMapper.SetInput("Super.Ellipsoid")
superEllipsoidTextMapper.GetTextProperty().SetJustificationToCentered()
superEllipsoidTextMapper.GetTextProperty().SetVerticalJustificationToCentered()
superEllipsoidTextMapper.GetTextProperty().SetColor(1, 0, 0)
superEllipsoidTextMapper.GetTextProperty().SetFontSize(14)
superEllipsoidTextActor = vtk.vtkActor2D()
superEllipsoidTextActor.SetMapper(superEllipsoidTextMapper)
superEllipsoidTextActor.GetPositionCoordinate().SetCoordinateSystemToWorld()
superEllipsoidTextActor.GetPositionCoordinate().SetValue(8, 1.5, 0)
# ------------------------------------------------------------
# Create an open 1D spline
# ------------------------------------------------------------
math = vtk.vtkMath()
inputPoints = vtk.vtkPoints()
for i in range(0, 10):
x = math.Random(-1, 1)
y = math.Random(-1, 1)
z = math.Random(-1, 1)
inputPoints.InsertPoint(i,x,y,z)
spline = vtk.vtkParametricSpline()
spline.SetPoints(inputPoints)
spline.ClosedOff()
splineSource = vtk.vtkParametricFunctionSource()
splineSource.SetParametricFunction(spline)
splineMapper = vtk.vtkPolyDataMapper()
splineMapper.SetInputConnection(splineSource.GetOutputPort())
splineActor = vtk.vtkActor()
splineActor.SetMapper(splineMapper)
splineActor.SetPosition(16, 4, 0)
splineActor.GetProperty().SetColor(0, 0, 0)
splineTextMapper = vtk.vtkTextMapper()
splineTextMapper.SetInput("Open.Spline")
splineTextMapper.GetTextProperty().SetJustificationToCentered()
splineTextMapper.GetTextProperty().SetVerticalJustificationToCentered()
splineTextMapper.GetTextProperty().SetColor(1, 0, 0)
splineTextMapper.GetTextProperty().SetFontSize(14)
splineTextActor = vtk.vtkActor2D()
splineTextActor.SetMapper(splineTextMapper)
splineTextActor.GetPositionCoordinate().SetCoordinateSystemToWorld()
splineTextActor.GetPositionCoordinate().SetValue(16, 1.5, 0)
# ------------------------------------------------------------
# Create a closed 1D spline
# ------------------------------------------------------------
spline2 = vtk.vtkParametricSpline()
spline2.SetPoints(inputPoints)
spline2.ClosedOn()
spline2Source = vtk.vtkParametricFunctionSource()
spline2Source.SetParametricFunction(spline2)
spline2Mapper = vtk.vtkPolyDataMapper()
spline2Mapper.SetInputConnection(spline2Source.GetOutputPort())
spline2Actor = vtk.vtkActor()
spline2Actor.SetMapper(spline2Mapper)
spline2Actor.SetPosition(24, 4, 0)
spline2Actor.GetProperty().SetColor(0, 0, 0)
spline2TextMapper = vtk.vtkTextMapper()
spline2TextMapper.SetInput("Closed.Spline")
spline2TextMapper.GetTextProperty().SetJustificationToCentered()
spline2TextMapper.GetTextProperty().SetVerticalJustificationToCentered()
spline2TextMapper.GetTextProperty().SetColor(1, 0, 0)
spline2TextMapper.GetTextProperty().SetFontSize(14)
spline2TextActor = vtk.vtkActor2D()
spline2TextActor.SetMapper(spline2TextMapper)
spline2TextActor.GetPositionCoordinate().SetCoordinateSystemToWorld()
spline2TextActor.GetPositionCoordinate().SetValue(24, 1.5, 0)
# ------------------------------------------------------------
# Create a spiral conic
# ------------------------------------------------------------
sconic = vtk.vtkParametricConicSpiral()
sconic.SetA(0.8)
sconic.SetB(2.5)
sconic.SetC(0.4)
sconicSource = vtk.vtkParametricFunctionSource()
sconicSource.SetParametricFunction(sconic)
sconicSource.SetScalarModeToDistance()
sconicMapper = vtk.vtkPolyDataMapper()
sconicMapper.SetInputConnection(sconicSource.GetOutputPort())
sconicActor = vtk.vtkActor()
sconicActor.SetMapper(sconicMapper)
sconicMapper.SetScalarRange(0, 9)
sconicActor.SetPosition(0, -4, 0)
sconicActor.SetScale(1.2, 1.2, 1.2)
sconicTextMapper = vtk.vtkTextMapper()
sconicTextMapper.SetInput("Spiral.Conic")
sconicTextMapper.GetTextProperty().SetJustificationToCentered()
sconicTextMapper.GetTextProperty().SetVerticalJustificationToCentered()
sconicTextMapper.GetTextProperty().SetColor(1, 0, 0)
sconicTextMapper.GetTextProperty().SetFontSize(14)
sconicTextActor = vtk.vtkActor2D()
sconicTextActor.SetMapper(sconicTextMapper)
sconicTextActor.GetPositionCoordinate().SetCoordinateSystemToWorld()
sconicTextActor.GetPositionCoordinate().SetValue(0, -6.5, 0)
# ------------------------------------------------------------
# Create Boy's surface
# ------------------------------------------------------------
boy = vtk.vtkParametricBoy()
boySource = vtk.vtkParametricFunctionSource()
boySource.SetParametricFunction(boy)
boySource.SetScalarModeToModulus()
boyMapper = vtk.vtkPolyDataMapper()
boyMapper.SetInputConnection(boySource.GetOutputPort())
boyMapper.SetScalarRange(0, 2)
boyActor = vtk.vtkActor()
boyActor.SetMapper(boyMapper)
boyActor.SetPosition(8, -4, 0)
boyActor.SetScale(1.5, 1.5, 1.5)
boyTextMapper = vtk.vtkTextMapper()
boyTextMapper.SetInput("Boy")
boyTextMapper.GetTextProperty().SetJustificationToCentered()
boyTextMapper.GetTextProperty().SetVerticalJustificationToCentered()
boyTextMapper.GetTextProperty().SetColor(1, 0, 0)
boyTextMapper.GetTextProperty().SetFontSize(14)
boyTextActor = vtk.vtkActor2D()
boyTextActor.SetMapper(boyTextMapper)
boyTextActor.GetPositionCoordinate().SetCoordinateSystemToWorld()
boyTextActor.GetPositionCoordinate().SetValue(8, -6.5, 0)
# ------------------------------------------------------------
# Create a cross cap
# ------------------------------------------------------------
crossCap = vtk.vtkParametricCrossCap()
crossCapSource = vtk.vtkParametricFunctionSource()
crossCapSource.SetParametricFunction(crossCap)
crossCapSource.SetScalarModeToY()
crossCapMapper = vtk.vtkPolyDataMapper()
crossCapMapper.SetInputConnection(crossCapSource.GetOutputPort())
crossCapActor = vtk.vtkActor()
crossCapActor.SetMapper(crossCapMapper)
crossCapActor.RotateX(65)
crossCapActor.SetPosition(16, -4, 0)
crossCapActor.SetScale(1.5, 1.5, 1.5)
crossCapTextMapper = vtk.vtkTextMapper()
crossCapTextMapper.SetInput("Cross.Cap")
crossCapTextMapper.GetTextProperty().SetJustificationToCentered()
crossCapTextMapper.GetTextProperty().SetVerticalJustificationToCentered()
crossCapTextMapper.GetTextProperty().SetColor(1, 0, 0)
crossCapTextMapper.GetTextProperty().SetFontSize(14)
crossCapTextActor = vtk.vtkActor2D()
crossCapTextActor.SetMapper(crossCapTextMapper)
crossCapTextActor.GetPositionCoordinate().SetCoordinateSystemToWorld()
crossCapTextActor.GetPositionCoordinate().SetValue(16, -6.5, 0)
# ------------------------------------------------------------
# Create Dini's surface
# ------------------------------------------------------------
dini = vtk.vtkParametricDini()
diniSource = vtk.vtkParametricFunctionSource()
diniSource.SetScalarModeToDistance()
diniSource.SetParametricFunction(dini)
diniMapper = vtk.vtkPolyDataMapper()
diniMapper.SetInputConnection(diniSource.GetOutputPort())
diniActor = vtk.vtkActor()
diniActor.SetMapper(diniMapper)
diniActor.RotateX(-90)
diniActor.SetPosition(24, -3, 0)
diniActor.SetScale(1.5, 1.5, 0.5)
diniTextMapper = vtk.vtkTextMapper()
diniTextMapper.SetInput("Dini")
diniTextMapper.GetTextProperty().SetJustificationToCentered()
diniTextMapper.GetTextProperty().SetVerticalJustificationToCentered()
diniTextMapper.GetTextProperty().SetColor(1, 0, 0)
diniTextMapper.GetTextProperty().SetFontSize(14)
diniTextActor = vtk.vtkActor2D()
diniTextActor.SetMapper(diniTextMapper)
diniTextActor.GetPositionCoordinate().SetCoordinateSystemToWorld()
diniTextActor.GetPositionCoordinate().SetValue(24, -6.5, 0)
# ------------------------------------------------------------
# Create Enneper's surface
# ------------------------------------------------------------
enneper = vtk.vtkParametricEnneper()
enneperSource = vtk.vtkParametricFunctionSource()
enneperSource.SetParametricFunction(enneper)
enneperSource.SetScalarModeToQuadrant()
enneperMapper = vtk.vtkPolyDataMapper()
enneperMapper.SetInputConnection(enneperSource.GetOutputPort())
enneperMapper.SetScalarRange(1, 4)
enneperActor = vtk.vtkActor()
enneperActor.SetMapper(enneperMapper)
enneperActor.SetPosition(0, -12, 0)
enneperActor.SetScale(0.25, 0.25, 0.25)
enneperTextMapper = vtk.vtkTextMapper()
enneperTextMapper.SetInput("Enneper")
enneperTextMapper.GetTextProperty().SetJustificationToCentered()
enneperTextMapper.GetTextProperty().SetVerticalJustificationToCentered()
enneperTextMapper.GetTextProperty().SetColor(1, 0, 0)
enneperTextMapper.GetTextProperty().SetFontSize(14)
enneperTextActor = vtk.vtkActor2D()
enneperTextActor.SetMapper(enneperTextMapper)
enneperTextActor.GetPositionCoordinate().SetCoordinateSystemToWorld()
enneperTextActor.GetPositionCoordinate().SetValue(0, -14.5, 0)
# ------------------------------------------------------------
# Create an ellipsoidal surface
# ------------------------------------------------------------
ellipsoid = vtk.vtkParametricEllipsoid()
ellipsoid.SetXRadius(1)
ellipsoid.SetYRadius(0.75)
ellipsoid.SetZRadius(0.5)
ellipsoidSource = vtk.vtkParametricFunctionSource()
ellipsoidSource.SetParametricFunction(ellipsoid)
ellipsoidSource.SetScalarModeToZ()
ellipsoidMapper = vtk.vtkPolyDataMapper()
ellipsoidMapper.SetInputConnection(ellipsoidSource.GetOutputPort())
ellipsoidMapper.SetScalarRange(-0.5, 0.5)
ellipsoidActor = vtk.vtkActor()
ellipsoidActor.SetMapper(ellipsoidMapper)
ellipsoidActor.SetPosition(8, -12, 0)
ellipsoidActor.SetScale(1.5, 1.5, 1.5)
ellipsoidTextMapper = vtk.vtkTextMapper()
ellipsoidTextMapper.SetInput("Ellipsoid")
ellipsoidTextMapper.GetTextProperty().SetJustificationToCentered()
ellipsoidTextMapper.GetTextProperty().SetVerticalJustificationToCentered()
ellipsoidTextMapper.GetTextProperty().SetColor(1, 0, 0)
ellipsoidTextMapper.GetTextProperty().SetFontSize(14)
ellipsoidTextActor = vtk.vtkActor2D()
ellipsoidTextActor.SetMapper(ellipsoidTextMapper)
ellipsoidTextActor.GetPositionCoordinate().SetCoordinateSystemToWorld()
ellipsoidTextActor.GetPositionCoordinate().SetValue(8, -14.5, 0)
# ------------------------------------------------------------
# Create an surface with random hills on it.
# Note that for testing, we will disable the
# random generation of the surfaces. This is
# because random number generators do not
# return the same result on different operating
# systems.
# ------------------------------------------------------------
randomHills = vtk.vtkParametricRandomHills()
randomHills.AllowRandomGenerationOff()
randomHills.GenerateTheHills()
randomHillsSource = vtk.vtkParametricFunctionSource()
randomHillsSource.SetParametricFunction(randomHills)
randomHillsSource.GenerateTextureCoordinatesOn()
randomHillsMapper = vtk.vtkPolyDataMapper()
randomHillsMapper.SetInputConnection(randomHillsSource.GetOutputPort())
randomHillsActor = vtk.vtkActor()
randomHillsActor.SetMapper(randomHillsMapper)
randomHillsActor.SetPosition(16, -14, 0)
randomHillsActor.SetScale(0.2, 0.2, 0.2)
randomHillsActor.SetTexture(texture)
randomHillsTextMapper = vtk.vtkTextMapper()
randomHillsTextMapper.SetInput("Random.Hills")
randomHillsTextMapper.GetTextProperty().SetJustificationToCentered()
randomHillsTextMapper.GetTextProperty().SetVerticalJustificationToCentered()
randomHillsTextMapper.GetTextProperty().SetColor(1, 0, 0)
randomHillsTextMapper.GetTextProperty().SetFontSize(14)
randomHillsTextActor = vtk.vtkActor2D()
randomHillsTextActor.SetMapper(randomHillsTextMapper)
randomHillsTextActor.GetPositionCoordinate().SetCoordinateSystemToWorld()
randomHillsTextActor.GetPositionCoordinate().SetValue(16, -14.5, 0)
# ------------------------------------------------------------
# Create an Steiner's Roman Surface.
# ------------------------------------------------------------
roman = vtk.vtkParametricRoman()
roman.SetRadius(1.5)
romanSource = vtk.vtkParametricFunctionSource()
romanSource.SetParametricFunction(roman)
romanSource.SetScalarModeToX()
romanMapper = vtk.vtkPolyDataMapper()
romanMapper.SetInputConnection(romanSource.GetOutputPort())
romanActor = vtk.vtkActor()
romanActor.SetMapper(romanMapper)
romanActor.SetPosition(24, -12, 0)
romanTextMapper = vtk.vtkTextMapper()
romanTextMapper.SetInput("Roman")
romanTextMapper.GetTextProperty().SetJustificationToCentered()
romanTextMapper.GetTextProperty().SetVerticalJustificationToCentered()
romanTextMapper.GetTextProperty().SetColor(1, 0, 0)
romanTextMapper.GetTextProperty().SetFontSize(14)
romanTextActor = vtk.vtkActor2D()
romanTextActor.SetMapper(romanTextMapper)
romanTextActor.GetPositionCoordinate().SetCoordinateSystemToWorld()
romanTextActor.GetPositionCoordinate().SetValue(24, -14.5, 0)
# ------------------------------------------------------------
# Create the RenderWindow, Renderer and both Actors
# ------------------------------------------------------------
ren = vtk.vtkRenderer()
renWin = vtk.vtkRenderWindow()
renWin.AddRenderer(ren)
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(renWin)
# add actors
ren.AddViewProp(torusActor)
ren.AddViewProp(kleinActor)
ren.AddViewProp(klein2Actor)
ren.AddViewProp(toroidActor)
ren.AddViewProp(superEllipsoidActor)
ren.AddViewProp(mobiusActor)
ren.AddViewProp(splineActor)
ren.AddViewProp(spline2Actor)
ren.AddViewProp(sconicActor)
ren.AddViewProp(boyActor)
ren.AddViewProp(crossCapActor)
ren.AddViewProp(diniActor)
ren.AddViewProp(enneperActor)
ren.AddViewProp(ellipsoidActor)
ren.AddViewProp(randomHillsActor)
ren.AddViewProp(romanActor)
#add text actors
ren.AddViewProp(torusTextActor)
ren.AddViewProp(kleinTextActor)
ren.AddViewProp(fig8KleinTextActor)
ren.AddViewProp(mobiusTextActor)
ren.AddViewProp(superToroidTextActor)
ren.AddViewProp(superEllipsoidTextActor)
ren.AddViewProp(splineTextActor)
ren.AddViewProp(spline2TextActor)
ren.AddViewProp(sconicTextActor)
ren.AddViewProp(boyTextActor)
ren.AddViewProp(crossCapTextActor)
ren.AddViewProp(diniTextActor)
ren.AddViewProp(enneperTextActor)
ren.AddViewProp(ellipsoidTextActor)
ren.AddViewProp(randomHillsTextActor)
ren.AddViewProp(romanTextActor)
ren.SetBackground(0.7, 0.8, 1)
renWin.SetSize(500, 500)
ren.ResetCamera()
ren.GetActiveCamera().Zoom(1.3)
iren.Initialize()
renWin.Render()
#iren.Start()
img_file = "TestParametricFunctions.png"
vtk.test.Testing.compareImage(iren.GetRenderWindow(),vtk.test.Testing.getAbsImagePath(img_file),threshold=25)
vtk.test.Testing.interact()
if __name__ == "__main__":
vtk.test.Testing.main([(TestParametricFunctions, 'test')])
|
aashish24/VTK-old
|
Common/ComputationalGeometry/Testing/Python/TestParametricFunctions.py
|
Python
|
bsd-3-clause
| 25,521
|
[
"VTK"
] |
758ca412fed9592df6eefb97c4edbebb31cbba71944f68e32bf048a145b6e77b
|
#!/usr/bin/env python
# -*- coding: iso-8859-1 -*-
#
# Impressive, a fancy presentation tool
# Copyright (C) 2005-2008 Martin J. Fiedler <[email protected]>
# portions Copyright (C) 2005 Rob Reid <[email protected]>
# portions Copyright (C) 2006 Ronan Le Hy <[email protected]>
# portions Copyright (C) 2007 Luke Campagnola <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License, version 2, as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
__title__ = "Impressive (dual head)"
__version__ = "0.10.2"
__author__ = "Martin J. Fiedler"
__email__ = "[email protected]"
__website__ = "http://impressive.sourceforge.net/"
import sys
def greet(): print >>sys.stderr, "Welcome to", __title__, "version", __version__
if __name__ == "__main__": greet()
TopLeft, BottomLeft, TopRight, BottomRight, TopCenter, BottomCenter = range(6)
NoCache, MemCache, FileCache, PersistentCache = range(4) # for CacheMode
Off, First, Last = range(3) # for AutoOverview
# You may change the following lines to modify the default settings
Fullscreen = True
Scaling = False
Supersample = None
BackgroundRendering = True
UseGhostScript = False
UseAutoScreenSize = True
ScreenWidth = 1024
ScreenHeight = 768
TransitionDuration = 1000
MouseHideDelay = 3000
BoxFadeDuration = 100
ZoomDuration = 250
BlankFadeDuration = 250
MeshResX = 48
MeshResY = 36
MarkColor = (1.0, 0.0, 0.0, 0.1)
BoxEdgeSize = 4
SpotRadius = 64
SpotDetail = 16
CacheMode = FileCache
OverviewBorder = 3
OverviewLogoBorder = 24
AutoOverview = Off
InitialPage = None
Wrap = False
AutoAdvance = None
RenderToDirectory = None
Rotation = 0
AllowExtensions = True
DAR = None
PAR = 1.0
PollInterval = 0
PageRangeStart = 0
PageRangeEnd = 999999
FontSize = 14
FontTextureWidth = 512
FontTextureHeight = 256
Gamma = 1.0
BlackLevel = 0
GammaStep = 1.1
BlackLevelStep = 8
EstimatedDuration = None
ProgressBarSize = 16
ProgressBarAlpha = 128
CursorImage = None
CursorHotspot = (0, 0)
MinutesOnly = False
OSDMargin = 16
OSDAlpha = 1.0
OSDTimePos = TopRight
OSDTitlePos = BottomLeft
OSDPagePos = BottomRight
OSDStatusPos = TopLeft
# import basic modules
import random, getopt, os, types, re, codecs, tempfile, glob, StringIO, md5, re
import traceback
from math import *
# initialize some platform-specific settings
if os.name == "nt":
root = os.path.split(sys.argv[0])[0] or "."
pdftoppmPath = os.path.join(root, "pdftoppm.exe")
GhostScriptPath = os.path.join(root, "gs\\gswin32c.exe")
GhostScriptPlatformOptions = ["-I" + os.path.join(root, "gs")]
try:
import win32api
MPlayerPath = os.path.join(root, "mplayer.exe")
def GetScreenSize():
dm = win32api.EnumDisplaySettings(None, -1) #ENUM_CURRENT_SETTINGS
return (int(dm.PelsWidth), int(dm.PelsHeight))
def RunURL(url):
win32api.ShellExecute(0, "open", url, "", "", 0)
except ImportError:
MPlayerPath = ""
def GetScreenSize():
if True: # DualHead is not set at this point
return ProjectionFrame.size()
else:
return pygame.display.list_modes()[0]
def RunURL(url): print "Error: cannot run URL `%s'" % url
MPlayerPlatformOptions = [ "-colorkey", "0x000000" ]
MPlayerColorKey = True
pdftkPath = os.path.join(root, "pdftk.exe")
FileNameEscape = '"'
spawn = os.spawnv
if getattr(sys, "frozen", None):
sys.path.append(root)
FontPath = []
FontList = ["Verdana.ttf", "Arial.ttf"]
else:
pdftoppmPath = "pdftoppm"
GhostScriptPath = "gs"
GhostScriptPlatformOptions = []
MPlayerPath = "mplayer"
MPlayerPlatformOptions = [ "-vo", "gl" ]
MPlayerColorKey = False
pdftkPath = "pdftk"
spawn = os.spawnvp
FileNameEscape = ""
FontPath = ["/usr/share/fonts", "/usr/local/share/fonts", "/usr/X11R6/lib/X11/fonts/TTF"]
FontList = ["DejaVuSans.ttf", "Vera.ttf", "Verdana.ttf"]
def RunURL(url):
try:
spawn(os.P_NOWAIT, "xdg-open", ["xdg-open", url])
except OSError:
print >>sys.stderr, "Error: cannot open URL `%s'" % url
def GetScreenSize():
if True: # HACK: DualHead is not set at this point
return ProjectionFrame.size()
res_re = re.compile(r'\s*(\d+)x(\d+)\s+\d+\.\d+\*')
# parse string like
# LVDS connected 1280x800+1920+0 (normal left inverted right x axis y axis) 287mm x 180mm
todo_res_re = re.compile(r"""([^\s]+) # monitor name, e.g. LVDS or VGA
\s+connected\s+
([^s]+) # geometry expression""", re.VERBOSE)
# use different regex, do not match for strings ending with asterisk,
# search for strings with 'connected' and parse the complete
# geometry expression 1280x800+1920+0 for **every** monitor,
# use LVDS as prompter monitor and prefer VGA as projection monitor
for path in os.getenv("PATH").split(':'):
fullpath = os.path.join(path, "xrandr")
if os.path.exists(fullpath):
res = None
try:
for line in os.popen(fullpath, "r"):
m = res_re.match(line)
if m:
# m.group(1) - monitor name
# m.group(2) - X geometry expression
print "xrandr found matching line " + line
res = tuple(map(int, m.groups()))
print "xrandr found match " + str(res)
except OSError:
pass
if res:
return res
return pygame.display.list_modes()[0]
# import special modules
try:
from OpenGL.GL import *
import pygame
from pygame.locals import *
import Image, ImageDraw, ImageFont, ImageFilter
from PIL import TiffImagePlugin, BmpImagePlugin, JpegImagePlugin, PngImagePlugin, PpmImagePlugin
except (ValueError, ImportError), err:
print >>sys.stderr, "Oops! Cannot load necessary modules:", err
print >>sys.stderr, """To use Impressive, you need to install the following Python modules:
- PyOpenGL [python-opengl] http://pyopengl.sourceforge.net/
- PyGame [python-pygame] http://www.pygame.org/
- PIL [python-imaging] http://www.pythonware.com/products/pil/
- PyWin32 (OPTIONAL, Win32) http://starship.python.net/crew/mhammond/win32/
Additionally, please be sure to have pdftoppm or GhostScript installed if you
intend to use PDF input."""
sys.exit(1)
try:
import thread
EnableBackgroundRendering = True
def create_lock(): return thread.allocate_lock()
except ImportError:
EnableBackgroundRendering = False
class pseudolock:
def __init__(self): self.state = False
def acquire(self, dummy=0): self.state = True
def release(self): self.state = False
def locked(self): return self.state
def create_lock(): return pseudolock()
##### TOOL CODE ################################################################
# initialize private variables
FileName = ""
FileList = []
InfoScriptPath = None
Marking = False
Tracing = False
Panning = False
FileProps = {}
PageProps = {}
PageCache = {}
CacheFile = None
CacheFileName = None
CacheFilePos = 0
CacheMagic = ""
MPlayerPID = 0
VideoPlaying = False
MouseDownX = 0
MouseDownY = 0
MarkUL = (0, 0)
MarkLR = (0, 0)
ZoomX0 = 0.0
ZoomY0 = 0.0
ZoomArea = 1.0
ZoomMode = False
IsZoomed = False
ZoomWarningIssued = False
TransitionRunning = False
CurrentCaption = 0
OverviewNeedUpdate = False
FileStats = None
OSDFont = None
CurrentOSDCaption = ""
CurrentOSDPage = ""
CurrentOSDStatus = ""
CurrentOSDComment = ""
Lrender = create_lock()
Lcache = create_lock()
Loverview = create_lock()
RTrunning = False
RTrestart = False
StartTime = 0
CurrentTime = 0
PageEnterTime = 0
TimeDisplay = False
TimeTracking = False
FirstPage = True
ProgressBarPos = 0
CursorVisible = True
OverviewMode = False
LastPage = 0
WantStatus = False
# tool constants (used in info scripts)
FirstTimeOnly = 2
# event constants
USEREVENT_HIDE_MOUSE = USEREVENT
USEREVENT_PAGE_TIMEOUT = USEREVENT + 1
USEREVENT_POLL_FILE = USEREVENT + 2
USEREVENT_TIMER_UPDATE = USEREVENT + 3
# read and write the PageProps and FileProps meta-dictionaries
def GetProp(prop_dict, key, prop, default=None):
if not key in prop_dict: return default
if type(prop) == types.StringType:
return prop_dict[key].get(prop, default)
for subprop in prop:
try:
return prop_dict[key][subprop]
except KeyError:
pass
return default
def SetProp(prop_dict, key, prop, value):
if not key in prop_dict:
prop_dict[key] = {prop: value}
else:
prop_dict[key][prop] = value
def GetPageProp(page, prop, default=None):
global PageProps
return GetProp(PageProps, page, prop, default)
def SetPageProp(page, prop, value):
global PageProps
SetProp(PageProps, page, prop, value)
def GetTristatePageProp(page, prop, default=0):
res = GetPageProp(page, prop, default)
if res != FirstTimeOnly: return res
return (GetPageProp(page, '_shown', 0) == 1)
def GetFileProp(page, prop, default=None):
global FileProps
return GetProp(FileProps, page, prop, default)
def SetFileProp(page, prop, value):
global FileProps
SetProp(FileProps, page, prop, value)
# the Impressive logo (256x64 pixels grayscale PNG)
LOGO = '\x89PNG\r\n\x1a\n\x00\x00\x00\rIHDR\x00\x00\x01\x00\x00\x00\x00@\x08\x00\x00\x00\x00\xd06\xf6b\x00\x00\x0b\xf1IDATx\xda\xed[y|OW\x16\x7f\xbf\xfc\x12\x12K\x13\xb1\xc4R\x11\xbbN,c\xadHF\xa8\xd6RK\xa7Cf\x08>\xaa\xed\xa0\xa5\x8a\xd8\xe9Hc\x9dRK\x19'+\
'\xb4b\xd4V{\x8d}\xcd\xa0\x944\xb6PF\xc6RU\x82\xa4\x96HD\xf6\xbc\xfe\xee9\xf7\xdew\xee{?Lc>\x9fL\xe6\xe7\xfe\x11\xf7\x9c\xbb\x9c{\xcf\xbb\xf7\x9c\xef9\xf7G\xd3\x9e\x97\xe7\xa5\xa8\x97\x12#7\xdfN:2\xbc\x98\xab\xee\xbf\xd2\t\x1dJB\t\xd7\xdc\x7f\xe9\xeb:/'+\
'\x13\x9fa\x96\xe0O"\xeb\x16Q\x05\xf4\x12\xfb\xd7\xbf)\xf8$C\xf3u=\xa3C\xd1T\xc0F\xa9\x80\x1b\x05\x9e\xc3\'\x93\x8d\xbfZ4-`\xbaT\xc0\x99\x02O\xd2\n\'(U\x14\x15\xd0X\xee__W\xe0I*\xe6\xb3\xf1?\x17\xc9\x13\xd0\xd5P\xc0\xc7\x05\x9fe\xa6cx~\xbf\x82\x8e\x8e'+\
'\\\xeb(S\x0bI\x01\xef\x19\n\xe8\xf5\x0c\xd3\xbc\xb5u\xedk\x05\x1e|\x8dI\xdfTH\n\x98j(\xa0q!-\xa1x\x1e\x93>\xa3\x90\xa4/\x97\xfb\xcf/, T\x0f\xc4\xbf[H\xd2\xf7J\x05\xfcXXf\xa8\x0b\x88\x0f-$\xe9\xdfI\x05l-,\x05\x0c\x03\xf1\x95\x0bI\xfa\x05\xa9\x80\x91'+\
'\x85\xa5\x80\xf9L\xfaCV+\xe3\xfd\xab\xedG\xf9\xc7a3/\xa7\xec\x92\xa5\xcd\x9c\x9bR\x01\xcd\xfec\xa9\x1e~6\x95\x11\xd4\xc6Q\xeaa\xbd.\xab\x87`\xbd\xc1\x90\xd9_M\xedCv\xe5\xd19b\xf1\xd2\x0fB\xdc\x94\xd1\xbb\x98\xf4ko}\xba\xc7\xb1\x96\xcc3\x7f\xa9c\x92'+\
'\xe6\xcd&l\xe3\xeb\xa8\x15\xeb3k\xd5\xc4v\xb2\xa1\xfc\x07\xdf\xde\xd5\xf5\xa4\xed\x91\xadM#~\xbb\xe4p\x92\x9ewi\xf3\x94\xf6\n\xbb\xda\xbc\x98\xeb\xf9\xfa\xb5\x9d3\xc3\xec\x84\xfbP\xec\xff\x01pC\x98\xb0\xea\nT\x04\xf9U\x05\xf9B\xff\xfd\xc9\xf9\xfa'+\
'\xfd}\xd3^7\xba\xb8\x01\x12\xfe\x14\x89m\xac~\xd1Q\xb1\xf59\x863\xdf\xec!\xc6\x8e\xe2\x81\xd7\xfeJT\xc2%])y\x9f\xab_\xb5;p\x9bhZ\xe8UV\x89\x17\xeb\x9a\x99#\x87\xcc\xf5 \xfd\xcb.\xca\x93\r\xb1\x86\n\xbc"\x1fI\xf6\xbf\xc3\xe5\'\xb0K\xe6\x0e\xa0OZ '+\
'\xe18X\xd4KH\xb8\x8f1\x90\xf3Z\x89|\xab\x01\xfd\x1e\x12\t\xac\xbeM\xd3\x02b\x8c=\xa1\x06\xda\x1a\xa7-\xf97\x86\x00\xf7\x1c\xddTn7\xa2\x0b\x18\xc3av\xdd\xfb:Q@\xcb+t\xc4\xd1\x17e\xf7\xcaI\xca\\\x87\xf9\xf1\xf0:\xab\xb0\xcf\x8b\x8fRF\xb2F\x01\xbd'+\
'\xc0\xec\x0fJ\xdfe\x9c\xd5\xfcx\x9f\xa6\x93\\\x08\xe4}\xda\x01\x89@\xc8\x9e\xc5\xea\xb3\xb4\xb04\xd2\xf3\xe7\n\x8e\x86\xa8<\xc2\xd9mH\xa8\xa1[\xca\xfd\x96d\x05K\x18\'Q+\xcb\x0f\n* $M\x1d\x91\\\x81\xf7\xb6\xed5\xcd\x15O\x0c\r)\x99\xbc\x7f\x80'+\
'\xe44\x07:\x1c\xea~\x86\xf8\x89\x8c\xce\xc5X\xbf\xceMu\x92\x87\\\x03\x03\x81\xc2\x8bS\x1d\x9d\xfa\xbb\xb0\xdb\xeb\xbbn`\xcf\xf1\x9a\xdbj\xf6o\xce\xd9\x1d\x89\xc8\xe9(\xef\x0f"\x91Ss\xfe\xe8_;l\xeayl\xbdEVp\x801\xfe\xe9q\x90n(\x08\xf7\x9f\xb1k\xc6'+\
'\xb8u8\xe1\xe7\xbc\xf7\x87\xbc\xdb\x9dTE\x01\x1d\xc5E\xbfgR@C\xb1\x99T;Y\x7f7\xc3\x02\xc1\x80\x15\xd8\x86\xbb\xc9\x8d\x993j\x05\x1e\xc0=F$\xa0g\xe3\x04\xafA\xc3&\xf6g}s\x9bf\x8b\x04\xfa\xa0\xb6\x90\xadw\xaec_\xf6E\xc0Y..\xc0W(?\x80[\xf5Y\x10W\xe9{'+\
'\r\x05\x80\xddX\xb4\x94~Qo\xb4%G\xe0\xbb\x94\xde\x0c\xabj\x80\xbdOA\xcb\x02\x7f\xcd\xdet\xd8\xa6t\xa9\x00\x94\xb2\xae\x9ef\x0b\x1c\xb8\xea\x0eQ\xc0\xef\xc4\xbc;9\xe36#\x8c\xc0d\x12L^\x0b\x96\x8a\x90\xe1\\\x0b0\xe7\xb8\r\xb4\x84\x9b\x85\xdds\x94\xf7'+\
'\xc5\x84\xd9C\x90q\x1c\x889\xacGm`x&\xc3\xe2\xb9\x80\xc5\xd8;KZ\xa5W\xc0\xa2\xea\x9d\x05\xed\t\x1aa\xe1\xc2\x95\xd9\x1d\xab\x84\xaf\x8e\x91\xf0u\x97\x1b=\xf5\xfbP\x9f\x99t\xfd\xfe_\x0b\x05\xc0\xc9Z/\xee\xfd\xc2<\xa9\x80\xceb\xbd\xa39\x036\xb3_z\xd3'+\
'\x14F.3t\xa1\xc7{\xd1\xaby\xc1\x9dU\xbf\'\x1a\x9c\xc3\xe7K\x14\xd7x\x944Ge9g\x15\x18:\xac\xf7\xe0\x8d\t\xe6\xe8D(H\x0b\x14\xe3\xcfJw\xda\x16\x91\xab\xaf\xa0[\x02\rv\xb5\xbe\tTu\xba\x0c\n\xf0\xcce\xecW$\xbbY\x9cP@\xb8\x98\xfee\xce\x18\r7E|\x8f(\xb8'+\
'\xb85\xc0\x00\x80\xb1N\xa9)\xe6\xf0\xcf\x12G\xc0\x06\xfe\xe53\xe2\x05u\xfd\xae4\xf3=\x85\xd3(.8\xd3\x80\x06\x11U\xef\xeb{/2j;\xc1*x\xbe\xabq\xf2\r>\xfe\xa7*\xb2\xc7v\xd3=\xc5/\xd0\xdd\xb0\xc7\xc1F\x93go\xf6\xb7\n\xb0\xdf!\x9e\xbb?\xaf\x0c\xe2\xd3'+\
'\xa7\xb9+w\x82/\xdf\xf7\x01#\xa2\r\xff\xa0\x0f5\xe6\x80\xadF\xc8\xd9\x87\x12/\xa8\xa7\x1bf\xfc\x0f\xdc\xec\xdb\xd5O\x0c\xc8O\xfb\xbb\x1e\x83)\xa9\xb9\xc4\xec\x0f\x80\x01\x9d8\x15\x81\xe3\xef\x19\x8e\xb36\\\x8a\xcb\x04M\xfd\x831\xc6\x19\x1ey\x93\t'+\
'\xa8i6\x10r\xba\xb8\x15\xd4\x8d\xe1\n\xd8%\xf1B6#\xfb\x93\xa5f\x83}\xf2\x06\xbb\xfb\x80 \xc9\xb9\xc2\xf8\x86\x92K\x8b^0\xbb\xa39\xe1p\x81\xc0\xc1/\xe1\x83\xc2Vr\x0f\x95\xa8\x0c\xedC,I\xaa\x08N-2y\r~,\xf5\x0f5\xd3R\xbe\x84\x9d\xa2O\xd8^\xc6\xb4Op%'+\
'\xfa\x89\x80\xc7\xa6\x03\xc6J\x0e\x18\xad\xc5\x88\xa9R\r\x07\xf36t\x9bc\x8ea\x0e:*\xef@S]\xe2E\xc6\x92n\x1f\x03\xa7!\xe1\x1c\x02\x8c\xc6j\xb3N\x95\xd2Z\x9b\xf7\xa7\x95\x02\xceRN\xed\x03\xeak\xd2~XwZ\x8eA\xe3x$~h\xa2\xee\x93\x9f\xc3{\xaf\x9b\x15'+\
'\xb0\x80\x8f6\x8e\xecg\x86\xf3\x9c\x01\xf6\xd9\x1f\xea\xcb\x9cK\xbd\xe5h\x9a\x0eX\x11\x1f\x96\xda\x03\xb7-\x91\x00&\xef\x11=\x93\xf0\x91V\xb2\xda\x0e\x7f\xa1\xd9Z\x9a\xb9\xc31N\x00\xfe\xcd$\xe8\xdc+\xcb\xf9R\xd0\x8e\xfa\xfe\x84T\x86\x9a_\xb0'+\
'\xc7\xf1\xa4\xc7d5\x0e\xd1VpH\xe3\xae\xbe\x13\xe4\xb2\xe4Hy\x88\x13\x16"\xfb\xb2s\xa9\xcc\x98n 9q\xf4\x82U\x89\x84X\xc6\xf8\x9e\x06d\xd0e\x12\x843\xc2$\xe6\xb8\xd3E\xc5\x117Q\x0c\x10\xd5<\xd2\xdaR\x7f\xd2\t\xd0\x1a\n\x04\xb4L\x89\x07+^\xe3'+\
'\xec}j\xa4\xb1E\xa7\x88\xc6\xc0\x86\xad\x05\xbe\xc9D\x94\xed\xa3?\xfe\x04\x9c6\xdc0z\xc1\x0c\xfa\xbd\xef\x98Op#\x18\xd8[\x90\xeb\x19uEY\x14\x80\xaf\x0b\x1c}C\xef\xbe\x964nb\x82\xb9|\xc1\xdb;\x88\xf8\xeeLm:i}\x01co\x04[\x8d\x03ZP\x1a ;"\x03?'+\
'\xb0\x9c\xf3\xb9\xe5E\xc4m\x91\xcaB\xa84\xc3j\xa0\x87:Of\xc3`\xe3\xaf\x96\xe8N\xb8]\x84n{\xe8\x9a\xd0\xab\x1c\xa0@%\x88\xe6_-F\xc3T\x02/\n\xdc\xdb\x85\xb2+\x1d\xe1\xec\x9c\xc1\x84\x8b\xc8Q\x11\x000v\xa3\xa6\xcd\x86\x8fY\x99\x9e\xbbAN\x1f'+\
'\x05h:\x05\xbc\xe0\x16\xd2\xda\xdc\x92\xf0\x1b\x0b\x1c\x89b\xe0\xc4\xfe\x8dN\x88\xb8}\rM\x17\xd1c;\xfc\xa9\x19\\\xef\xad|\xab\x19AJ\x1aC\x18\xbc|\x92\xff\xae^\x0f\n\xcd\x10\x8c\x840F\xab\xf8\x88\xfa\xe7N0\xf2Mg\xe2B\xa0\xe9\xf7J,\xa8\xa9'+\
'&EI\xf8E\x839\x16\x94\x1f\xb4\x0f\xd7\xcc\x0b\x10,X\xf4\x03\xda\xdcW\xc1IN\x8b-*\x9f\x07\x89JjC\xeb\xcf\xedgf\xf0\x93F\x07#\x9a\x9c\x07\xd6\xbb\xa2\xf2!\x9d&.\xf1H\xd6\'^\x90\x1e\x94\x8f,\t?\xf0\x82q\xaa\xb4\xaet\xc2\x18\x8a\xc5v\xb3\xfa'+\
'I\xda\xfc24\xb7zr\xd2\xaa\x077\x04\x1bL\xa9\xab[\x1cVK+U\xb9k\xdf@\xbb\xda\xc9\x13\xa0\xd0\x90\x0c\x92\xe5q\x9c*\x18\x17\xeeL\xd6\x14\x92SG/\xf8\xaa\xd9\xcd<\xb4$\xe1V\x0b\xaa\x1f\x8cx\xc9b#\xaek\xc4\xfb(\x19\x1a_h\x7f\xfb)i\xbbFW1\xbddJ'+\
'\xb0U9\xae\xd3X+`?\xa4\xac\xba9I\x14\x83\x05L\xaf \x99\x10\xc2E9\x13\xb5\x16\x13\x16P\x06\xd3\xd0\x16\xcaQ\x9a\xc62\xbc\xa0|\x86\x9b\x0c\xcb$\x18\xb5$:\xf2H\x9a.R\x9f\xcd E\xf3\xc9\xd3\x12\x97\xe5b\x9d\xa6z=\xf15\x1c\xe1}\x90H\xab\xa8\x02'+\
'\xe6J\'G\xa4|K\xe3I\xa5\xc0\x0fL\x0e\x11/\x98E\xb1F\xb2\xf9 6R\xfd\xda\x1a\x08vI\xfbt4\xe0>H\xd5r\xf2\xb9!\xd5x\xda\xcd\xd9Zh\xce\xb7\x83\xb1S\xca\x0e\xc8\x97\xc1\xa6\xd7E\xf9(\xa4\xe4U\xff$3>\xc4\xf8\x02\x12\xbcY\xd2\x89\xd0\x14\x02\\\xb7'+\
'\x9bB[~u\xa6\xd1\xdb\xa9\xba\x1d8\x921\xc4\x02\xa1\x9d\x9a\xacx\x045\xed\x0b\x81\xb8>@]EU\xc8#\xc6D\x19\xbft\xb2\xdf\x96\xce\xe4\x8b\xa5\xde&$}\xda8\xaf\x18\xab\xd3\xb9\xfc\x05w:aFXv\xc2\x82\x05\x87)*G\x81D\x82)\xb4\xd5\x9a\xea$\xb6"^\xb0'+\
'\x9c \xef\xd3|\x96\xc3\xedc\xea\xf6\x9cx\xa6\x1b\xe2\xe4\xd1\xa4\x05\xe6\xbc|\xe9\xc1\xfe(\xbd\x1d~\x0c\xcc\xd7\x18\x87o\x1c^\xea\xc4\xae\xaa\x02\x96\xab\xcf\x82\xfa#\xbb\x05\x8b\xebzjY\xc2\xf3\x83/\x93E\xc1\x95\xfd\xfd\xbb\x7f\x16\x08!'+\
'\x0c9\xd9\xe6\x88\tOS\x08\xe1@n+E\xaa\x90$d\x1d\xd4|\xcc\x10\xa7\xd6U\xaec\xba\xe9\xcc!\xa2\x89\x0f\x94\x8c7\x1d\x16\xefE\x1e\x0c\xe7\xce\xf4\xa2G\x8dE\xd5n\xcc\xa0\xad\xe6\xbbi\x92-\x9dl\x1c\x81\xb45\xa8\x80\r\xc8\x9b\xa2H]\t\xbc\xab\xc6^B'+\
'\xcf\x80_\xec#\xd2\xf62\xc1K\x81\xd6\x04s\x92U\xfb\x06\xe2R\xd5\xa7\x01\xbe(\xd6~~\n\n\xce\xed\xae\xe6>\xce\x9a\x14\xd0\x1c\xd5\x941\x82\xcd\xeb\xd9j\x04\xcb\x97\xa6\xb1\x86n\x98\x8c\xd98\xa8\xb6\xe63\x1c\x0c\\\x0e\xebR\x07/\xf4\xce\x88F\xb6'+\
'\x92\xbdo\x1a^t\x8d\xb1\xff,\xe5G\x82#\xd0\x0e\xa91u\xb5\x07\xe8W\xa6\xb1H\xc7\xa3\xe9`@[\x954\r\xb3\x9e/\x10/H\x7f,\x05\xa6#\xd5\xe2\x05\xd7\n\xaa7f\xe9cc\xcf%\xe5\xca\xe3\xf8\x86\xd1;\xc1\x1cI\x10p\xc1"\xa6\xbdq\xd9X;)S\xd8\x98\xe0\xe1\xff'+\
'F\xd2\xbc\x9bC\t"=E\xf6\xa9+\xb8\x04\xbd\x83\xee\xcc\xe7\xf5\x15\x9d\xef\x1d8\xcc\x1fY\xd2D\xb8\x9bL\xbd`M\xf3i=i\xf1\x82\xc2\xc6q\xf5)\xe5.\xc18\x88,-.\xcf-\xda2j&\xa4\xed\xb6\x9a\xb8\xc7!\xca\xf4\x8b\xceU\xd9\x89h?|nHN\x17\xf5\xc5\x91\x89M'+\
'\xf1y\xac\xdee\xd9&\xc2\xdd\xa3\xc4\x0b*\xa1\xedm\xe5{K/\xd8O\xc9\x16\xd0\x92\xb3\xa8\xa2f\x0eM\x07X]\xcf\x8c|e\xd4Q\x81QC\x8fS\xf6%aK\xea\xefT^Q\xda\x08\x1f#\x1e\xa5\x96\x98\xa6?&\x02v\xb5\x0cTS\x11\xff\xean\x13\xe1\xeeJrc/Q\xbf\xac~oy\x1c'+\
'\x83\x95l\x01\xf9\xfa\xcbU\xe4\xf6*p\xdb9q\xbe-\x8e\x19\xa3\xce\x12$m\xeb\xf5\x83\xbc\xd7\x93=\r~\xbbS\xd2\xe7G\x1b\xfe\xa3q<\\Q\x8b\x86\x1d\x81\xe0=g/\xd5\xb5\xc8\x11\xbb\xda\x0f=GqOG\xe1\x1f\xbd\x18\xab+\xe6\x841<\xa9\x8b\xb1\x03\xc7\xa6d'+\
'\x0b2SRR\x92\xce\x1f\xda8\xb9\x95\xdd|\xd6\xd5\xdeJi0~g\xfc\xads\x1b\xa2\xc2\x1b\xab\x98\xc8V3l\xda\xee\xeb\xdfE\x0f3\xa3\xe0\xae\x93\xb6\xfcxj\xc5\xe8\xe6J\xa6\xa8\xd9\xe0\t\xed\xad[\rZ\xbc\xf81\xbf\x98\xaa>l\xcb\x89\x1b\x17\xb7\xcc\xe8\xd7'+\
'\xc2\xe3\xbf\xf1\xd3\x00\xcc\xb3L\xd0\\\xb64\x03\x05\xf4t]\x05\xf4\xfc\x95?\xcd\xf8\xbf+\xe8\xb8}\\W\x01\xf0Fr\xc7u\xf7\x8fAv\xac\x0b+\x00~\xce\xb2\xcau\xf7_\x9a&\x7f\\\xb14\xb6\xbcz\xb8X\t\xb3<J\xb8X\x19gy\xf5p\xb1\xb2\xd4\xf2\xea\xe1b\xe5'+\
'\x90\xe5\xd5\xc3\xc5J\xe2\xb3\xfdW\xa5\xe7\xe5y\xf9\x1f(\xbf\x00\x8e\xf2\xeb\x86\xaa\xb6u\xc1\x00\x00\x00\x00IEND\xaeB`\x82'
# determine the next power of two
def npot(x):
res = 1
while res < x: res <<= 1
return res
# convert boolean value to string
def b2s(b):
if b: return "Y"
return "N"
# extract a number at the beginning of a string
def num(s):
s = s.strip()
r = ""
while s[0] in "0123456789":
r += s[0]
s = s[1:]
try:
return int(r)
except ValueError:
return -1
# get a representative subset of file statistics
def my_stat(filename):
try:
s = os.stat(filename)
except OSError:
return None
return (s.st_size, s.st_mtime, s.st_ctime, s.st_mode)
# determine (pagecount,width,height) of a PDF file
def analyze_pdf(filename):
f = file(filename,"rb")
pdf = f.read()
f.close()
box = map(float, pdf.split("/MediaBox",1)[1].split("]",1)[0].split("[",1)[1].strip().split())
return (max(map(num, pdf.split("/Count")[1:])), box[2]-box[0], box[3]-box[1])
# unescape { literals in PDF files
re_unescape = re.compile(r'&#[0-9]+;')
def decode_literal(m):
try:
return chr(int(m.group(0)[2:-1]))
except ValueError:
return '?'
def unescape_pdf(s):
return re_unescape.sub(decode_literal, s)
# parse pdftk output
def pdftkParse(filename, page_offset=0):
f = file(filename, "r")
InfoKey = None
BookmarkTitle = None
Title = None
Pages = 0
for line in f.xreadlines():
try:
key, value = [item.strip() for item in line.split(':', 1)]
except IndexError:
continue
key = key.lower()
if key == "numberofpages":
Pages = int(value)
elif key == "infokey":
InfoKey = value.lower()
elif (key == "infovalue") and (InfoKey == "title"):
Title = unescape_pdf(value)
InfoKey = None
elif key == "bookmarktitle":
BookmarkTitle = unescape_pdf(value)
elif key == "bookmarkpagenumber" and BookmarkTitle:
try:
page = int(value)
if not GetPageProp(page + page_offset, '_title'):
SetPageProp(page + page_offset, '_title', BookmarkTitle)
except ValueError:
pass
BookmarkTitle = None
f.close()
if AutoOverview:
SetPageProp(page_offset + 1, '_overview', True)
for page in xrange(page_offset + 2, page_offset + Pages):
SetPageProp(page, '_overview', \
not(not(GetPageProp(page + AutoOverview - 1, '_title'))))
SetPageProp(page_offset + Pages, '_overview', True)
return (Title, Pages)
# translate pixel coordinates to normalized screen coordinates
def MouseToScreen(mousepos):
return (ZoomX0 + mousepos[0] * ZoomArea / ScreenWidth,
ZoomY0 + mousepos[1] * ZoomArea / ScreenHeight)
# normalize rectangle coordinates so that the upper-left point comes first
def NormalizeRect(X0, Y0, X1, Y1):
return (min(X0, X1), min(Y0, Y1), max(X0, X1), max(Y0, Y1))
# check if a point is inside a box (or a list of boxes)
def InsideBox(x, y, box):
return (x >= box[0]) and (y >= box[1]) and (x < box[2]) and (y < box[3])
def FindBox(x, y, boxes):
for i in xrange(len(boxes)):
if InsideBox(x, y, boxes[i]):
return i
raise ValueError
# zoom an image size to a destination size, preserving the aspect ratio
def ZoomToFit(size, dest=None):
if not dest:
dest = (ScreenWidth, ScreenHeight)
newx = dest[0]
newy = size[1] * newx / size[0]
if newy > dest[1]:
newy = dest[1]
newx = size[0] * newy / size[1]
return (newx, newy)
# get the overlay grid screen coordinates for a specific page
def OverviewPos(page):
return ( \
int(page % OverviewGridSize) * OverviewCellX + OverviewOfsX, \
int(page / OverviewGridSize) * OverviewCellY + OverviewOfsY \
)
def StopMPlayer():
global MPlayerPID, VideoPlaying
if not MPlayerPID: return
try:
if os.name == 'nt':
win32api.TerminateProcess(MPlayerPID, 0)
else:
os.kill(MPlayerPID, 2)
MPlayerPID = 0
except:
pass
VideoPlaying = False
def FormatTime(t, minutes=False):
if minutes and (t < 3600):
return "%d min" % (t / 60)
elif minutes:
return "%d:%02d" % (t / 3600, (t / 60) % 60)
elif t < 3600:
return "%d:%02d" % (t / 60, t % 60)
else:
ms = t % 3600
return "%d:%02d:%02d" % (t / 3600, ms / 60, ms % 60)
def SafeCall(func, args=[], kwargs={}):
if not func: return None
try:
return func(*args, **kwargs)
except:
print >>sys.stderr, "----- Exception in user function ----"
traceback.print_exc(file=sys.stderr)
print >>sys.stderr, "----- End of traceback -----"
def Quit(code=0):
print >>sys.stderr, "Total presentation time: %s." % \
FormatTime((pygame.time.get_ticks() - StartTime) / 1000)
sys.exit(code)
##### RENDERING TOOL CODE ######################################################
class FrameCoordinates:
RESOLUTION_REGEX = re.compile(
r"(\d+)x(\d+)")
GEOMETRY_REGEX = re.compile(
r"""(\d+)x(\d+) # resolution
\+(\d+)\+(\d+) # offset""", re.VERBOSE)
# offset_x, offset_y, width, height
def __init__(self, width=None, height=None, offset_x=0, offset_y=0, size_tuple=None, full_tuple=None):
if full_tuple:
self.__init__(full_tuple[0], full_tuple[1], full_tuple[2], full_tuple[3])
elif size_tuple:
self.__init__(size_tuple[0], size_tuple[1], offset_x, offset_y)
elif width and height:
self.width = width
self.height = height
self.offset_x = offset_x
self.offset_y = offset_y
else:
raise ValueError, "no size given"
@classmethod
def parse(cls, geometry):
"""Creates new FrameCoordinates object by parsing a X11 like geometry string.
Example: 1024x768+1280+0"""
parsed = FrameCoordinates.GEOMETRY_REGEX.match(geometry)
if parsed:
return cls(full_tuple=[int(elem) for elem in parsed.groups()])
else:
parsed = FrameCoordinates.RESOLUTION_REGEX.match(geometry)
if parsed:
return cls(size_tuple=[int(elem) for elem in parsed.groups()])
else:
raise ValueError, "Geometry string '%s' could not be parsed" % geometry
def __repr__(self):
return "size %d,%d offset %d,%d" % (self.width, self.height, self.offset_x, self.offset_y)
def as_tuple(self):
return (self.width, self.height, self.offset_x, self.offset_y)
def size(self):
return (self.width, self.height)
def offset(self):
return (self.offset_x, self.offset_y)
def divide_padding(self, amount, ratio):
sum = ratio[0] + ratio[1]
if sum > 0:
return amount * ratio[0] / sum
else:
return 0
def adjust_to_aspect_ratio(self, aspect_ratio, vertical_padding_ratio=(1,1), horizontal_padding_ratio=(1,1)):
"""The new box will completely fit into the old one, have desired aspect ratio,
and will be padded with white space in required proportion.
Ratio parameters are two-element-tuples
(1,1) for padding means 'center!'
(1,2) for horizontal_padding means one third left and two thirds right
vertical ratio is bottom to top (upside down), horizontal is left to right """
new_width = self.height * aspect_ratio[0] / aspect_ratio[1]
new_height = self.width * aspect_ratio[1] / aspect_ratio[0]
if new_width <= self.width:
self.offset_x += self.divide_padding(self.width-new_width, horizontal_padding_ratio)
self.width = new_width
else:
self.offset_y += self.divide_padding(self.height-new_height, vertical_padding_ratio)
self.height = new_height
def glViewport(self):
glViewport(self.offset_x, self.offset_y, self.width, self.height)
# draw a fullscreen quad
def DrawFullQuad():
glBegin(GL_QUADS)
glTexCoord2d( 0.0, 0.0); glVertex2i(0, 0)
glTexCoord2d(TexMaxS, 0.0); glVertex2i(1, 0)
glTexCoord2d(TexMaxS, TexMaxT); glVertex2i(1, 1)
glTexCoord2d( 0.0, TexMaxT); glVertex2i(0, 1)
glEnd()
def DrawFullQuadProbe(): # TODO: probe for rendering twice
glBegin(GL_QUADS)
glTexCoord2d( 0.0, 0.0); glVertex2d(0, 0)
glTexCoord2d(TexMaxS, 0.0); glVertex2d(0.5, 0)
glTexCoord2d(TexMaxS, TexMaxT); glVertex2d(0.5, 0.5)
glTexCoord2d( 0.0, TexMaxT); glVertex2d(0, 0.5)
glTexCoord2d( 0.0, 0.0); glVertex2d(0.5, 0)
glTexCoord2d(TexMaxS, 0.0); glVertex2d(1, 0)
glTexCoord2d(TexMaxS, TexMaxT); glVertex2d(1, 0.5)
glTexCoord2d( 0.0, TexMaxT); glVertex2d(0.5, 0.5)
glEnd()
# draw a generic 2D quad
def DrawQuad(x0=0.0, y0=0.0, x1=1.0, y1=1.0):
glBegin(GL_QUADS)
glTexCoord2d( 0.0, 0.0); glVertex2d(x0, y0)
glTexCoord2d(TexMaxS, 0.0); glVertex2d(x1, y0)
glTexCoord2d(TexMaxS, TexMaxT); glVertex2d(x1, y1)
glTexCoord2d( 0.0, TexMaxT); glVertex2d(x0, y1)
glEnd()
# helper function: draw a translated fullscreen quad
def DrawTranslatedFullQuad(dx, dy, i, a):
glColor4d(i, i, i, a)
glPushMatrix()
glTranslated(dx, dy, 0.0)
DrawFullQuad()
glPopMatrix()
# draw a vertex in normalized screen coordinates,
# setting texture coordinates appropriately
def DrawPoint(x, y):
glTexCoord2d(x *TexMaxS, y * TexMaxT)
glVertex2d(x, y)
def DrawPointEx(x, y, a):
glColor4d(1.0, 1.0, 1.0, a)
glTexCoord2d(x * TexMaxS, y * TexMaxT)
glVertex2d(x, y)
# a mesh transformation function: it gets the relative transition time (in the
# [0.0,0.1) interval) and the normalized 2D screen coordinates, and returns a
# 7-tuple containing the desired 3D screen coordinates, 2D texture coordinates,
# and intensity/alpha color values.
def meshtrans_null(t, u, v):
return (u, v, 0.0, u, v, 1.0, t)
# (x, y, z, s, t, i, a)
# draw a quad, applying a mesh transformation function
def DrawMeshQuad(time=0.0, f=meshtrans_null):
line0 = [f(time, u * MeshStepX, 0.0) for u in xrange(MeshResX + 1)]
for v in xrange(1, MeshResY + 1):
line1 = [f(time, u * MeshStepX, v * MeshStepY) for u in xrange(MeshResX + 1)]
glBegin(GL_QUAD_STRIP)
for col in zip(line0, line1):
for x, y, z, s, t, i, a in col:
glColor4d(i, i, i, a)
glTexCoord2d(s * TexMaxS, t * TexMaxT)
glVertex3d(x, y, z)
glEnd()
line0 = line1
def GenerateSpotMesh():
global SpotMesh
rx0 = SpotRadius * PixelX
ry0 = SpotRadius * PixelY
rx1 = (SpotRadius + BoxEdgeSize) * PixelX
ry1 = (SpotRadius + BoxEdgeSize) * PixelY
steps = max(6, int(2.0 * pi * SpotRadius / SpotDetail / ZoomArea))
SpotMesh=[(rx0 * sin(a), ry0 * cos(a), rx1 * sin(a), ry1 * cos(a)) for a in \
[i * 2.0 * pi / steps for i in range(steps + 1)]]
##### TRANSITIONS ##############################################################
# Each transition is represented by a class derived from impressive.Transition
# The interface consists of only two methods: the __init__ method may perform
# some transition-specific initialization, and render() finally renders a frame
# of the transition, using the global texture identifiers Tcurrent and Tnext.
# Transition itself is an abstract class
class AbstractError(StandardError):
pass
class Transition:
def __init__(self):
pass
def render(self, t):
raise AbstractError
# an array containing all possible transition classes
AllTransitions=[]
# a helper function doing the common task of directly blitting a background page
def DrawPageDirect(tex):
glDisable(GL_BLEND)
glBindTexture(TextureTarget, tex)
glColor3d(1, 1, 1)
DrawFullQuad()
# a helper function that enables alpha blending
def EnableAlphaBlend():
glEnable(GL_BLEND)
glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA)
# Crossfade: one of the simplest transition you can think of :)
class Crossfade(Transition):
"""simple crossfade"""
def render(self,t):
DrawPageDirect(Tcurrent)
EnableAlphaBlend()
glBindTexture(TextureTarget, Tnext)
glColor4d(1, 1, 1, t)
DrawFullQuad()
AllTransitions.append(Crossfade)
# Slide: a class of transitions that simply slide the new page in from one side
# after an idea from Joachim B Haga
class Slide(Transition):
def origin(self, t):
raise AbstractError
def render(self, t):
cx, cy, nx, ny = self.origin(t)
glBindTexture(TextureTarget, Tcurrent)
DrawQuad(cx, cy, cx+1.0, cy+1.0)
glBindTexture(TextureTarget, Tnext)
DrawQuad(nx, ny, nx+1.0, ny+1.0)
class SlideLeft(Slide):
"""Slide to the left"""
def origin(self, t): return (-t, 0.0, 1.0-t, 0.0)
class SlideRight(Slide):
"""Slide to the right"""
def origin(self, t): return (t, 0.0, t-1.0, 0.0)
class SlideUp(Slide):
"""Slide upwards"""
def origin(self, t): return (0.0, -t, 0.0, 1.0-t)
class SlideDown(Slide):
"""Slide downwards"""
def origin(self, t): return (0.0, t, 0.0, t-1.0)
AllTransitions.extend([SlideLeft, SlideRight, SlideUp, SlideDown])
# Squeeze: a class of transitions that squeeze the new page in from one size
class Squeeze(Transition):
def params(self, t):
raise AbstractError
def inv(self): return 0
def render(self, t):
cx1, cy1, nx0, ny0 = self.params(t)
if self.inv():
t1, t2 = (Tnext, Tcurrent)
else:
t1, t2 = (Tcurrent, Tnext)
glBindTexture(TextureTarget, t1)
DrawQuad(0.0, 0.0, cx1, cy1)
glBindTexture(TextureTarget, t2)
DrawQuad(nx0, ny0, 1.0, 1.0)
class SqueezeHorizontal(Squeeze):
def split(self, t): raise AbstractError
def params(self, t):
t = self.split(t)
return (t, 1.0, t, 0.0)
class SqueezeVertical(Squeeze):
def split(self, t): raise AbstractError
def params(self, t):
t = self.split(t)
return (1.0, t, 0.0, t)
class SqueezeLeft(SqueezeHorizontal):
"""Squeeze to the left"""
def split(self, t): return 1.0 - t
class SqueezeRight(SqueezeHorizontal):
"""Squeeze to the right"""
def split(self, t): return t
def inv(self): return 1
class SqueezeUp(SqueezeVertical):
"""Squeeze upwards"""
def split(self, t): return 1.0 - t
class SqueezeDown(SqueezeVertical):
"""Squeeze downwards"""
def split(self, t): return t
def inv(self): return 1
AllTransitions.extend([SqueezeLeft, SqueezeRight, SqueezeUp, SqueezeDown])
# Wipe: a class of transitions that softly "wipe" the new image over the old
# one along a path specified by a gradient function that maps normalized screen
# coordinates to a number in the range [0.0,1.0]
WipeWidth = 0.25
class Wipe(Transition):
def grad(self, u, v):
raise AbstractError
def afunc(self, g):
pos = (g - self.Wipe_start) / WipeWidth
return max(min(pos, 1.0), 0.0)
def render(self, t):
DrawPageDirect(Tnext)
EnableAlphaBlend()
glBindTexture(TextureTarget, Tcurrent)
self.Wipe_start = t * (1.0 + WipeWidth) - WipeWidth
DrawMeshQuad(t, lambda t, u, v: \
(u, v, 0.0, u,v, 1.0, self.afunc(self.grad(u, v))))
class WipeDown(Wipe):
"""wipe downwards"""
def grad(self, u, v): return v
class WipeUp(Wipe):
"""wipe upwards"""
def grad(self, u, v): return 1.0 - v
class WipeRight(Wipe):
"""wipe from left to right"""
def grad(self, u, v): return u
class WipeLeft(Wipe):
"""wipe from right to left"""
def grad(self, u, v): return 1.0 - u
class WipeDownRight(Wipe):
"""wipe from the upper-left to the lower-right corner"""
def grad(self, u, v): return 0.5 * (u + v)
class WipeUpLeft(Wipe):
"""wipe from the lower-right to the upper-left corner"""
def grad(self, u, v): return 1.0 - 0.5 * (u + v)
class WipeCenterOut(Wipe):
"""wipe from the center outwards"""
def grad(self, u, v):
u -= 0.5
v -= 0.5
return sqrt(u * u * 1.777 + v * v) / 0.833
class WipeCenterIn(Wipe):
"""wipe from the edges inwards"""
def grad(self, u, v):
u -= 0.5
v -= 0.5
return 1.0 - sqrt(u * u * 1.777 + v * v) / 0.833
AllTransitions.extend([WipeDown, WipeUp, WipeRight, WipeLeft, \
WipeDownRight, WipeUpLeft, WipeCenterOut, WipeCenterIn])
class WipeBlobs(Wipe):
"""wipe using nice \"blob\"-like patterns"""
def __init__(self):
self.uscale = (5.0 + random.random() * 15.0) * 1.333
self.vscale = 5.0 + random.random() * 15.0
self.uofs = random.random() * 6.2
self.vofs = random.random() * 6.2
def grad(self,u,v):
return 0.5 + 0.25 * (cos(self.uofs + u * self.uscale) \
+ cos(self.vofs + v * self.vscale))
AllTransitions.append(WipeBlobs)
class PagePeel(Transition):
"""an unrealistic, but nice page peel effect"""
def render(self,t):
glDisable(GL_BLEND)
glBindTexture(TextureTarget, Tnext)
DrawMeshQuad(t, lambda t, u, v: \
(u, v, 0.0, u, v, 1.0 - 0.5 * (1.0 - u) * (1.0 - t), 1.0))
EnableAlphaBlend()
glBindTexture(TextureTarget, Tcurrent)
DrawMeshQuad(t, lambda t, u, v: \
(u * (1.0 - t), 0.5 + (v - 0.5) * (1.0 + u * t) * (1.0 + u * t), 0.0,
u, v, 1.0 - u * t * t, 1.0))
AllTransitions.append(PagePeel)
### additional transition by Ronan Le Hy <[email protected]> ###
class PageTurn(Transition):
"""another page peel effect, slower but more realistic than PagePeel"""
alpha = 2.
alpha_square = alpha * alpha
sqrt_two = sqrt(2.)
inv_sqrt_two = 1. / sqrt(2.)
def warp(self, t, u, v):
# distance from the 2d origin to the folding line
dpt = PageTurn.sqrt_two * (1.0 - t)
# distance from the 2d origin to the projection of (u,v) on the folding line
d = PageTurn.inv_sqrt_two * (u + v)
dmdpt = d - dpt
# the smaller rho is, the closer to asymptotes are the x(u) and y(v) curves
# ie, smaller rho => neater fold
rho = 0.001
common_sq = sqrt(4. - 8 * t - 4.*(u+v) + 4.*t*(t + v + u) + (u+v)*(u+v) + 4 * rho) / 2.
x = 1. - t + 0.5 * (u - v) - common_sq
y = 1. - t + 0.5 * (v - u) - common_sq
z = - 0.5 * (PageTurn.alpha * dmdpt + sqrt(PageTurn.alpha_square * dmdpt*dmdpt + 4))
if dmdpt < 0:
# part of the sheet still flat on the screen: lit and opaque
i = 1.0
alpha = 1.0
else:
# part of the sheet in the air, after the fold: shadowed and transparent
# z goes from -0.8 to -2 approximately
i = -0.5 * z
alpha = 0.5 * z + 1.5
# the corner of the page that you hold between your fingers
dthumb = 0.6 * u + 1.4 * v - 2 * 0.95
if dthumb > 0:
z -= dthumb
x += dthumb
y += dthumb
i = 1.0
alpha = 1.0
return (x,y,z, u,v, i, alpha)
def render(self, t):
glDisable(GL_BLEND)
glBindTexture(TextureTarget, Tnext)
DrawMeshQuad(t,lambda t, u, v: \
(u, v, 0.0, u, v, 1.0 - 0.5 * (1.0 - u) * (1.0 - t), 1.0))
EnableAlphaBlend()
glBindTexture(TextureTarget, Tcurrent)
DrawMeshQuad(t, self.warp)
AllTransitions.append(PageTurn)
##### some additional transitions by Rob Reid <[email protected]> #####
class ZoomOutIn(Transition):
"""zooms the current page out, and the next one in."""
def render(self, t):
glColor3d(0.0, 0.0, 0.0)
DrawFullQuad()
if t < 0.5:
glBindTexture(TextureTarget, Tcurrent)
scalfact = 1.0 - 2.0 * t
DrawMeshQuad(t, lambda t, u, v: (0.5 + scalfact * (u - 0.5), \
0.5 + scalfact * (v - 0.5), 0.0, \
u, v, 1.0, 1.0))
else:
glBindTexture(TextureTarget, Tnext)
scalfact = 2.0 * t - 1.0
EnableAlphaBlend()
DrawMeshQuad(t, lambda t, u, v: (0.5 + scalfact * (u - 0.5), \
0.5 + scalfact * (v - 0.5), 0.0, \
u, v, 1.0, 1.0))
AllTransitions.append(ZoomOutIn)
class SpinOutIn(Transition):
"""spins the current page out, and the next one in."""
def render(self, t):
glColor3d(0.0, 0.0, 0.0)
DrawFullQuad()
if t < 0.5:
glBindTexture(TextureTarget, Tcurrent)
scalfact = 1.0 - 2.0 * t
else:
glBindTexture(TextureTarget, Tnext)
scalfact = 2.0 * t - 1.0
sa = scalfact * sin(16.0 * t)
ca = scalfact * cos(16.0 * t)
DrawMeshQuad(t,lambda t, u, v: (0.5 + ca * (u - 0.5) - 0.75 * sa * (v - 0.5),\
0.5 + 1.333 * sa * (u - 0.5) + ca * (v - 0.5),\
0.0, u, v, 1.0, 1.0))
AllTransitions.append(SpinOutIn)
class SpiralOutIn(Transition):
"""flushes the current page away to have the next one overflow"""
def render(self, t):
glColor3d(0.0, 0.0, 0.0)
DrawFullQuad()
if t < 0.5:
glBindTexture(TextureTarget,Tcurrent)
scalfact = 1.0 - 2.0 * t
else:
glBindTexture(TextureTarget,Tnext)
scalfact = 2.0 * t - 1.0
sa = scalfact * sin(16.0 * t)
ca = scalfact * cos(16.0 * t)
DrawMeshQuad(t, lambda t, u, v: (0.5 + sa + ca * (u - 0.5) - 0.75 * sa * (v - 0.5),\
0.5 + ca + 1.333 * sa * (u - 0.5) + ca * (v - 0.5),\
0.0, u, v, 1.0, 1.0))
AllTransitions.append(SpiralOutIn)
# the AvailableTransitions array contains a list of all transition classes that
# can be randomly assigned to pages
AvailableTransitions=[ # from coolest to lamest
# PagePeel, # deactivated: too intrusive
# WipeBlobs,
# WipeCenterOut,WipeCenterIn,
# WipeDownRight,WipeUpLeft,WipeDown,WipeUp,WipeRight,WipeLeft,
Crossfade
]
##### OSD FONT RENDERER ########################################################
# force a string or sequence of ordinals into a unicode string
def ForceUnicode(s, charset='iso8859-15'):
if type(s) == types.UnicodeType:
return s
if type(s) == types.StringType:
return unicode(s, charset, 'ignore')
if type(s) in (types.TupleType, types.ListType):
return u''.join(map(unichr, s))
raise TypeError, "string argument not convertible to Unicode"
# search a system font path for a font file
def SearchFont(root, name):
if not os.path.isdir(root):
return None
infix = ""
fontfile = []
while (len(infix) < 10) and (len(fontfile) != 1):
fontfile = filter(os.path.isfile, glob.glob(root + infix + name))
infix += "*/"
if len(fontfile) != 1:
return None
else:
return fontfile[0]
# load a system font
def LoadFont(dirs, name, size):
# first try to load the font directly
try:
return ImageFont.truetype(name, size, encoding='unic')
except:
pass
# no need to search further on Windows
if os.name == 'nt':
return None
# start search for the font
for dir in dirs:
fontfile = SearchFont(dir + "/", name)
if fontfile:
try:
return ImageFont.truetype(fontfile, size, encoding='unic')
except:
pass
return None
# alignment constants
Left = 0
Right = 1
Center = 2
Down = 0
Up = 1
Auto = -1
# font renderer class
class GLFont:
def __init__(self, width, height, name, size, search_path=[], default_charset='iso8859-15', extend=1, blur=1):
self.width = width
self.height = height
self._i_extend = range(extend)
self._i_blur = range(blur)
self.feather = extend + blur + 1
self.current_x = 0
self.current_y = 0
self.max_height = 0
self.boxes = {}
self.widths = {}
self.line_height = 0
self.default_charset = default_charset
if type(name) == types.StringType:
self.font = LoadFont(search_path, name, size)
else:
for check_name in name:
self.font = LoadFont(search_path, check_name, size)
if self.font: break
if not self.font:
raise IOError, "font file not found"
self.img = Image.new('LA', (width, height))
self.alpha = Image.new('L', (width, height))
self.extend = ImageFilter.MaxFilter()
self.blur = ImageFilter.Kernel((3, 3), [1,2,1,2,4,2,1,2,1])
self.tex = glGenTextures(1)
glBindTexture(GL_TEXTURE_2D, self.tex)
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR)
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR)
self.AddString(range(32, 128))
def AddCharacter(self, c):
w, h = self.font.getsize(c)
self.line_height = max(self.line_height, h)
size = (w + 2 * self.feather, h + 2 * self.feather)
glyph = Image.new('L', size)
draw = ImageDraw.Draw(glyph)
draw.text((self.feather, self.feather), c, font=self.font, fill=255)
del draw
box = self.AllocateGlyphBox(*size)
self.img.paste(glyph, (box.orig_x, box.orig_y))
for i in self._i_extend: glyph = glyph.filter(self.extend)
for i in self._i_blur: glyph = glyph.filter(self.blur)
self.alpha.paste(glyph, (box.orig_x, box.orig_y))
self.boxes[c] = box
self.widths[c] = w
del glyph
def AddString(self, s, charset=None, fail_silently=False):
update_count = 0
try:
for c in ForceUnicode(s, self.GetCharset(charset)):
if c in self.widths:
continue
self.AddCharacter(c)
update_count += 1
except ValueError:
if fail_silently:
pass
else:
raise
if not update_count: return
self.img.putalpha(self.alpha)
glBindTexture(GL_TEXTURE_2D, self.tex)
glTexImage2D(GL_TEXTURE_2D, 0, GL_LUMINANCE_ALPHA, \
self.width, self.height, 0, \
GL_LUMINANCE_ALPHA, GL_UNSIGNED_BYTE, self.img.tostring())
def AllocateGlyphBox(self, w, h):
if self.current_x + w > self.width:
self.current_x = 0
self.current_y += self.max_height
self.max_height = 0
if self.current_y + h > self.height:
raise ValueError, "bitmap too small for all the glyphs"
box = self.GlyphBox()
box.orig_x = self.current_x
box.orig_y = self.current_y
box.size_x = w
box.size_y = h
box.x0 = self.current_x / float(self.width)
box.y0 = self.current_y / float(self.height)
box.x1 = (self.current_x + w) / float(self.width)
box.y1 = (self.current_y + h) / float(self.height)
box.dsx = w * PixelX
box.dsy = h * PixelY
self.current_x += w
self.max_height = max(self.max_height, h)
return box
def GetCharset(self, charset=None):
if charset: return charset
return self.default_charset
def SplitText(self, s, charset=None):
return ForceUnicode(s, self.GetCharset(charset)).split(u'\n')
def GetLineHeight(self):
return self.line_height
def GetTextWidth(self, s, charset=None):
return max([self.GetTextWidthEx(line) for line in self.SplitText(s, charset)])
def GetTextHeight(self, s, charset=None):
return len(self.SplitText(s, charset)) * self.line_height
def GetTextSize(self, s, charset=None):
lines = self.SplitText(s, charset)
return (max([self.GetTextWidthEx(line) for line in lines]), len(lines) * self.line_height)
def GetTextWidthEx(self, u):
if u: return sum([self.widths.get(c, 0) for c in u])
else: return 0
def GetTextHeightEx(self, u=[]):
return self.line_height
def AlignTextEx(self, x, u, align=Left):
if not align: return x
return x - (self.GetTextWidthEx(u) / align)
def Draw(self, origin, text, charset=None, align=Left, color=(1.0, 1.0, 1.0), alpha=1.0, beveled=True):
lines = self.SplitText(text, charset)
x0, y0 = origin
x0 -= self.feather
y0 -= self.feather
glEnable(GL_TEXTURE_2D)
glEnable(GL_BLEND)
glBindTexture(GL_TEXTURE_2D, self.tex)
if beveled:
glBlendFunc(GL_ZERO, GL_ONE_MINUS_SRC_ALPHA)
glColor4d(0.0, 0.0, 0.0, alpha)
self.DrawLinesEx(x0, y0, lines, align)
glBlendFunc(GL_ONE, GL_ONE)
glColor3d(color[0] * alpha, color[1] * alpha, color[2] * alpha)
self.DrawLinesEx(x0, y0, lines, align)
glDisable(GL_BLEND)
glDisable(GL_TEXTURE_2D)
def DrawLinesEx(self, x0, y, lines, align=Left):
global PixelX, PixelY
glBegin(GL_QUADS)
for line in lines:
sy = y * PixelY
x = self.AlignTextEx(x0, line, align)
for c in line:
if not c in self.widths: continue
self.boxes[c].render(x * PixelX, sy)
x += self.widths[c]
y += self.line_height
glEnd()
class GlyphBox:
def render(self, sx=0.0, sy=0.0):
glTexCoord2d(self.x0, self.y0); glVertex2d(sx, sy)
glTexCoord2d(self.x0, self.y1); glVertex2d(sx, sy+self.dsy)
glTexCoord2d(self.x1, self.y1); glVertex2d(sx+self.dsx, sy+self.dsy)
glTexCoord2d(self.x1, self.y0); glVertex2d(sx+self.dsx, sy)
# high-level draw function
def DrawOSD(x, y, text, halign=Auto, valign=Auto, alpha=1.0):
if not(OSDFont) or not(text) or (alpha <= 0.004): return
if alpha > 1.0: alpha = 1.0
if halign == Auto:
if x < 0:
x += ScreenWidth
halign = Right
else:
halign = Left
if valign == Auto:
if y < 0:
y += ScreenHeight
valign = Up
else:
valign = Down
if valign != Down:
y -= OSDFont.GetLineHeight() / valign
if TextureTarget != GL_TEXTURE_2D:
glDisable(TextureTarget)
OSDFont.Draw((x, y), text, align=halign, alpha=alpha)
# very high-level draw function
def DrawOSDEx(position, text, alpha_factor=1.0):
xpos = position >> 1
y = (1 - 2 * (position & 1)) * OSDMargin
if xpos < 2:
x = (1 - 2 * xpos) * OSDMargin
halign = Auto
else:
x = ScreenWidth / 2
halign = Center
DrawOSD(x, y, text, halign, alpha = OSDAlpha * alpha_factor)
##### PDF PARSER ###############################################################
class PDFError(Exception):
pass
class PDFref:
def __init__(self, ref):
self.ref = ref
def __repr__(self):
return "PDFref(%d)" % self.ref
re_pdfstring = re.compile(r'\(\)|\(.*?[^\\]\)')
pdfstringrepl = [("\\"+x[0], x[1:]) for x in "(( )) n\n r\r t\t".split(" ")]
def pdf_maskstring(s):
s = s[1:-1]
for a, b in pdfstringrepl:
s = s.replace(a, b)
return " <" + "".join(["%02X"%ord(c) for c in s]) + "> "
def pdf_mask_all_strings(s):
return re_pdfstring.sub(lambda x: pdf_maskstring(x.group(0)), s)
def pdf_unmaskstring(s):
return "".join([chr(int(s[i:i+2], 16)) for i in xrange(1, len(s)-1, 2)])
class PDFParser:
def __init__(self, filename):
self.f = file(filename, "rb")
# find the first cross-reference table
self.f.seek(0, 2)
filesize = self.f.tell()
self.f.seek(filesize - 128)
trailer = self.f.read()
i = trailer.rfind("startxref")
if i < 0:
raise PDFError, "cross-reference table offset missing"
try:
offset = int(trailer[i:].split("\n")[1].strip())
except (IndexError, ValueError):
raise PDFError, "malformed cross-reference table offset"
# follow the trailer chain
self.xref = {}
while offset:
newxref = self.xref
self.xref, rootref, offset = self.parse_trailer(offset)
self.xref.update(newxref)
# scan the page tree
self.obj2page = {}
self.page2obj = {}
self.annots = {}
self.page_count = 0
self.box = {}
root = self.getobj(rootref, 'Catalog')
try:
self.scan_page_tree(root['Pages'].ref)
except KeyError:
raise PDFError, "root page tree node missing"
def getline(self):
while True:
line = self.f.readline().strip()
if line: return line
def find_length(self, tokens, begin, end):
level = 1
for i in xrange(1, len(tokens)):
if tokens[i] == begin: level += 1
if tokens[i] == end: level -= 1
if not level: break
return i + 1
def parse_tokens(self, tokens, want_list=False):
res = []
while tokens:
t = tokens[0]
v = t
tlen = 1
if (len(tokens) >= 3) and (tokens[2] == 'R'):
v = PDFref(int(t))
tlen = 3
elif t == "<<":
tlen = self.find_length(tokens, "<<", ">>")
v = self.parse_tokens(tokens[1 : tlen - 1], True)
v = dict(zip(v[::2], v[1::2]))
elif t == "[":
tlen = self.find_length(tokens, "[", "]")
v = self.parse_tokens(tokens[1 : tlen - 1], True)
elif not(t) or (t[0] == "null"):
v = None
elif (t[0] == '<') and (t[-1] == '>'):
v = pdf_unmaskstring(t)
elif t[0] == '/':
v = t[1:]
elif t == 'null':
v = None
else:
try:
v = float(t)
v = int(t)
except ValueError:
pass
res.append(v)
del tokens[:tlen]
if want_list:
return res
if not res:
return None
if len(res) == 1:
return res[0]
return res
def parse(self, data):
data = pdf_mask_all_strings(data)
data = data.replace("<<", " << ").replace("[", " [ ").replace("(", " (")
data = data.replace(">>", " >> ").replace("]", " ] ").replace(")", ") ")
data = data.replace("/", " /")
return self.parse_tokens(filter(None, data.split()))
def getobj(self, obj, force_type=None):
offset = self.xref.get(obj, 0)
if not offset:
raise PDFError, "referenced non-existing PDF object"
self.f.seek(offset)
header = self.getline().split(None, 2)
if (header[-1] != "obj") or (header[0] != str(obj)):
raise PDFError, "object does not start where it's supposed to"
data = []
while True:
line = self.getline()
if line in ("endobj", "stream"): break
data.append(line)
data = self.parse(" ".join(data))
if force_type:
try:
t = data['Type']
except (KeyError, IndexError, ValueError):
t = None
if t != force_type:
raise PDFError, "object does not match the intended type"
return data
def parse_xref_section(self, start, count):
xref = {}
for obj in xrange(start, start + count):
line = self.getline()
if line[-1] == 'f':
xref[obj] = 0
else:
xref[obj] = int(line[:10], 10)
return xref
def parse_trailer(self, offset):
self.f.seek(offset)
xref = {}
rootref = 0
offset = 0
if self.getline() != "xref":
raise PDFError, "cross-reference table does not start where it's supposed to"
return (xref, rootref, offset) # no xref table found, abort
# parse xref sections
while True:
line = self.getline()
if line == "trailer": break
start, count = map(int, line.split())
xref.update(self.parse_xref_section(start, count))
# parse trailer
while True:
line = self.getline()
if line in ("startxref", "%%EOF"): break
if line[0] != '/': continue
parts = line[1:].split()
if parts[0] == 'Prev':
offset = int(parts[1])
if parts[0] == 'Root':
if (len(parts) != 4) or (parts[3] != 'R'):
raise PDFError, "root catalog entry is not a reference"
rootref = int(parts[1])
return (xref, rootref, offset)
def scan_page_tree(self, obj, mbox=None, cbox=None):
node = self.getobj(obj)
if node['Type'] == 'Pages':
for kid in node['Kids']:
self.scan_page_tree(kid.ref, node.get('MediaBox', mbox), node.get('CropBox', cbox))
else:
page = self.page_count + 1
self.page_count = page
self.obj2page[obj] = page
self.page2obj[page] = obj
self.annots[page] = [a.ref for a in node.get('Annots', [])]
self.box[page] = node.get('CropBox', cbox) or node.get('MediaBox', mbox)
def dest2page(self, dest):
if type(dest) != types.ListType:
return dest
elif dest[0].__class__ == PDFref:
return self.obj2page.get(dest[0].ref, None)
else:
return dest[0]
def get_href(self, obj):
node = self.getobj(obj, 'Annot')
if node['Subtype'] != 'Link': return None
dest = None
if 'Dest' in node:
dest = self.dest2page(node['Dest'])
elif 'A' in node:
action = node['A']['S']
if action == 'URI':
dest = node['A'].get('URI', None)
elif action == 'GoTo':
dest = self.dest2page(node['A'].get('D', None))
if dest:
return tuple(node['Rect'] + [dest])
def GetHyperlinks(self):
res = {}
for page in self.annots:
a = filter(None, map(self.get_href, self.annots[page]))
if a: res[page] = a
return res
def AddHyperlink(page_offset, page, target, linkbox, pagebox):
page += page_offset
if type(target) == types.IntType:
target += page_offset
w = 1.0 / (pagebox[2] - pagebox[0])
h = 1.0 / (pagebox[3] - pagebox[1])
x0 = (linkbox[0] - pagebox[0]) * w
y0 = (pagebox[3] - linkbox[3]) * h
x1 = (linkbox[2] - pagebox[0]) * w
y1 = (pagebox[3] - linkbox[1]) * h
href = (0, target, x0, y0, x1, y1)
if GetPageProp(page, '_href'):
PageProps[page]['_href'].append(href)
else:
SetPageProp(page, '_href', [href])
def FixHyperlinks(page):
if not(GetPageProp(page, '_box')) or not(GetPageProp(page, '_href')):
return # no hyperlinks or unknown page size
bx0, by0, bx1, by1 = GetPageProp(page, '_box')
bdx = bx1 - bx0
bdy = by1 - by0
href = []
for fixed, target, x0, y0, x1, y1 in GetPageProp(page, '_href'):
if fixed:
href.append((1, target, x0, y0, x1, y1))
else:
href.append((1, target, \
int(bx0 + bdx * x0), int(by0 + bdy * y0), \
int(bx0 + bdx * x1), int(by0 + bdy * y1)))
SetPageProp(page, '_href', href)
def ParsePDF(filename):
try:
assert 0 == spawn(os.P_WAIT, pdftkPath, \
["pdftk", FileNameEscape + filename + FileNameEscape, \
"output", FileNameEscape + TempFileName + ".pdf" + FileNameEscape,
"uncompress"])
except OSError:
print >>sys.stderr, "Note: pdftk not found, hyperlinks disabled."
return
except AssertionError:
print >>sys.stderr, "Note: pdftk failed, hyperlinks disabled."
return
count = 0
try:
try:
pdf = PDFParser(TempFileName + ".pdf")
for page, annots in pdf.GetHyperlinks().iteritems():
for page_offset in FileProps[filename]['offsets']:
for a in annots:
AddHyperlink(page_offset, page, a[4], a[:4], pdf.box[page])
count += len(annots)
FixHyperlinks(page)
del pdf
return count
except IOError:
print >>sys.stderr, "Note: file produced by pdftk not readable, hyperlinks disabled."
except PDFError, e:
print >>sys.stderr, "Note: error in file produced by pdftk, hyperlinks disabled."
print >>sys.stderr, " PDF parser error message:", e
finally:
try:
os.remove(TempFileName + ".pdf")
except OSError:
pass
##### PAGE CACHE MANAGEMENT ####################################################
# helper class that allows PIL to write and read image files with an offset
class IOWrapper:
def __init__(self, f, offset=0):
self.f = f
self.offset = offset
self.f.seek(offset)
def read(self, count=None):
if count is None:
return self.f.read()
else:
return self.f.read(count)
def write(self, data):
self.f.write(data)
def seek(self, pos, whence=0):
assert(whence in (0, 1))
if whence:
self.f.seek(pos, 1)
else:
self.f.seek(pos + self.offset)
def tell(self):
return self.f.tell() - self.offset
# generate a "magic number" that is used to identify persistent cache files
def UpdateCacheMagic():
global CacheMagic
pool = [PageCount, ScreenWidth, ScreenHeight, b2s(Scaling), b2s(Supersample), b2s(Rotation)]
flist = list(FileProps.keys())
flist.sort(lambda a,b: cmp(a.lower(), b.lower()))
for f in flist:
pool.append(f)
pool.extend(list(GetFileProp(f, 'stat', [])))
CacheMagic = md5.new("\0".join(map(str, pool))).hexdigest()
# set the persistent cache file position to the current end of the file
def UpdatePCachePos():
global CacheFilePos
CacheFile.seek(0, 2)
CacheFilePos = CacheFile.tell()
# rewrite the header of the persistent cache
def WritePCacheHeader(reset=False):
pages = ["%08x" % PageCache.get(page, 0) for page in range(1, PageCount+1)]
CacheFile.seek(0)
CacheFile.write(CacheMagic + "".join(pages))
if reset:
CacheFile.truncate()
UpdatePCachePos()
# return an image from the persistent cache or None if none is available
def GetPCacheImage(page):
if CacheMode != PersistentCache:
return # not applicable if persistent cache isn't used
Lcache.acquire()
try:
if page in PageCache:
img = Image.open(IOWrapper(CacheFile, PageCache[page]))
img.load()
return img
finally:
Lcache.release()
# returns an image from the non-persistent cache or None if none is available
def GetCacheImage(page):
if CacheMode in (NoCache, PersistentCache):
return # not applicable in uncached or persistent-cache mode
Lcache.acquire()
try:
if page in PageCache:
if CacheMode == FileCache:
CacheFile.seek(PageCache[page])
return CacheFile.read(TexSize)
else:
return PageCache[page]
finally:
Lcache.release()
# adds an image to the persistent cache
def AddToPCache(page, img):
if CacheMode != PersistentCache:
return # not applicable if persistent cache isn't used
Lcache.acquire()
try:
if page in PageCache:
return # page is already cached and we can't update it safely
# -> stop here (the new image will be identical to the old
# one anyway)
img.save(IOWrapper(CacheFile, CacheFilePos), "ppm")
PageCache[page] = CacheFilePos
WritePCacheHeader()
finally:
Lcache.release()
# adds an image to the non-persistent cache
def AddToCache(page, data):
global CacheFilePos
if CacheMode in (NoCache, PersistentCache):
return # not applicable in uncached or persistent-cache mode
Lcache.acquire()
try:
if CacheMode == FileCache:
if not(page in PageCache):
PageCache[page] = CacheFilePos
CacheFilePos += len(data)
CacheFile.seek(PageCache[page])
CacheFile.write(data)
else:
PageCache[page] = data
finally:
Lcache.release()
# invalidates the whole cache
def InvalidateCache():
global PageCache, CacheFilePos
Lcache.acquire()
try:
PageCache = {}
if CacheMode == PersistentCache:
UpdateCacheMagic()
WritePCacheHeader(True)
else:
CacheFilePos = 0
finally:
Lcache.release()
# initialize the persistent cache
def InitPCache():
global CacheFile, CacheMode
# try to open the pre-existing cache file
try:
CacheFile = file(CacheFileName, "rb+")
except IOError:
CacheFile = None
# check the cache magic
UpdateCacheMagic()
if CacheFile and (CacheFile.read(32) != CacheMagic):
print >>sys.stderr, "Cache file mismatch, recreating cache."
CacheFile.close()
CacheFile = None
if CacheFile:
# if the magic was valid, import cache data
print >>sys.stderr, "Using already existing persistent cache file."
for page in range(1, PageCount+1):
offset = int(CacheFile.read(8), 16)
if offset:
PageCache[page] = offset
UpdatePCachePos()
else:
# if the magic was invalid or the file didn't exist, (re-)create it
try:
CacheFile = file(CacheFileName, "wb+")
except IOError:
print >>sys.stderr, "Error: cannot write the persistent cache file (`%s')" % CacheFileName
print >>sys.stderr, "Falling back to temporary file cache."
CacheMode = FileCache
WritePCacheHeader()
##### PAGE RENDERING ###########################################################
# generate a dummy image
def DummyPage():
img = Image.new('RGB', (ScreenWidth, ScreenHeight))
img.paste(LogoImage, ((ScreenWidth - LogoImage.size[0]) / 2,
(ScreenHeight - LogoImage.size[1]) / 2))
return img
def RenderPDF(page, MayAdjustResolution, ZoomMode):
"""loads a page from a PDF file and returns an Image object"""
global UseGhostScript
UseGhostScriptOnce = False
SourceFile = GetPageProp(page, '_file')
Resolution = GetFileProp(SourceFile, 'res', 96)
RealPage = GetPageProp(page, '_page')
if Supersample and not(ZoomMode):
UseRes = int(0.5 + Resolution) * Supersample
AlphaBits = 1
else:
UseRes = int(0.5 + Resolution)
AlphaBits = 4
if ZoomMode:
UseRes = 2 * UseRes
# call pdftoppm to generate the page image
if not UseGhostScript:
renderer = "pdftoppm"
try:
assert 0 == spawn(os.P_WAIT, \
pdftoppmPath, ["pdftoppm", "-q"] + [ \
"-f", str(RealPage), "-l", str(RealPage),
"-r", str(int(UseRes)),
FileNameEscape + SourceFile + FileNameEscape,
TempFileName])
# determine output filename
digits = GetFileProp(SourceFile, 'digits', 6)
imgfile = TempFileName + ("-%%0%dd.ppm" % digits) % RealPage
if not os.path.exists(imgfile):
for digits in xrange(6, 0, -1):
imgfile = TempFileName + ("-%%0%dd.ppm" % digits) % RealPage
if os.path.exists(imgfile): break
SetFileProp(SourceFile, 'digits', digits)
except OSError, (errcode, errmsg):
print >>sys.stderr, "Warning: Cannot start pdftoppm -", errmsg
print >>sys.stderr, "Falling back to GhostScript (permanently)."
UseGhostScript = True
except AssertionError:
print >>sys.stderr, "There was an error while rendering page %d" % page
print >>sys.stderr, "Falling back to GhostScript for this page."
UseGhostScriptOnce = True
# fallback to GhostScript
if UseGhostScript or UseGhostScriptOnce:
imgfile = TempFileName + ".tif"
renderer = "GhostScript"
try:
assert 0 == spawn(os.P_WAIT, \
GhostScriptPath, ["gs", "-q"] + GhostScriptPlatformOptions + [ \
"-dBATCH", "-dNOPAUSE", "-sDEVICE=tiff24nc", "-dUseCropBox",
"-sOutputFile=" + imgfile, \
"-dFirstPage=%d" % RealPage, "-dLastPage=%d" % RealPage,
"-r%dx%d" % (UseRes, int(UseRes * PAR)), \
"-dTextAlphaBits=%d" % AlphaBits, \
"-dGraphicsAlphaBits=%s" % AlphaBits, \
FileNameEscape + SourceFile + FileNameEscape])
except OSError, (errcode, errmsg):
print >>sys.stderr, "Error: Cannot start GhostScript -", errmsg
return DummyPage()
except AssertionError:
print >>sys.stderr, "There was an error while rendering page %d" % page
return DummyPage()
# open the page image file with PIL
try:
img = Image.open(imgfile)
except:
print >>sys.stderr, "Error: %s produced an unreadable file (page %d)" % (renderer, page)
return DummyPage()
# try to delete the file again (this constantly fails on Win32 ...)
try:
os.remove(imgfile)
except OSError:
pass
# apply rotation
rot = GetPageProp(page, 'rotate')
if rot is None:
rot = Rotation
if rot:
img = img.rotate(90 * (4 - rot))
# determine real display size (don't care for ZoomMode, DisplayWidth and
# DisplayHeight are only used for Supersample and AdjustResolution anyway)
if Supersample:
DisplayWidth = img.size[0] / Supersample
DisplayHeight = img.size[1] / Supersample
else:
DisplayWidth = img.size[0]
DisplayHeight = img.size[1]
# if the image size is strange, re-adjust the rendering resolution
if MayAdjustResolution \
and not DualHead \
and ((abs(ScreenWidth - DisplayWidth) > 4) \
or (abs(ScreenHeight - DisplayHeight) > 4)):
newsize = ZoomToFit((DisplayWidth,DisplayHeight))
NewResolution = newsize[0] * Resolution/DisplayWidth
if abs(1.0 - NewResolution / Resolution) > 0.05:
# only modify anything if the resolution deviation is large enough
SetFileProp(SourceFile, 'res', NewResolution)
return RenderPDF(page, False, ZoomMode)
# downsample a supersampled image
if Supersample and not(ZoomMode):
return img.resize((DisplayWidth, DisplayHeight), Image.ANTIALIAS)
return img
# load a page from an image file
def LoadImage(page, ZoomMode):
# open the image file with PIL
try:
img = Image.open(GetPageProp(page, '_file'))
except:
print >>sys.stderr, "Image file `%s' is broken." % (FileList[page - 1])
return DummyPage()
# apply rotation
rot = GetPageProp(page, 'rotate')
if rot is None:
rot = Rotation
if rot:
img = img.rotate(90 * (4 - rot))
# determine destination size
newsize = ZoomToFit(img.size)
# don't scale if the source size is too close to the destination size
if abs(newsize[0] - img.size[0]) < 2: newsize = img.size
# don't scale if the source is smaller than the destination
if not(Scaling) and (newsize > img.size): newsize = img.size
# zoom up (if wanted)
if ZoomMode: newsize=(2 * newsize[0], 2 * newsize[1])
# skip processing if there was no change
if newsize == img.size: return img
# select a nice filter and resize the image
if newsize > img.size:
filter = Image.BICUBIC
else:
filter = Image.ANTIALIAS
return img.resize(newsize, filter)
# render a page to an OpenGL texture
def PageImage(page, ZoomMode=False, RenderMode=False):
global OverviewNeedUpdate
EnableCacheRead = not(ZoomMode or RenderMode)
EnableCacheWrite = EnableCacheRead and \
(page >= PageRangeStart) and (page <= PageRangeEnd)
# check for the image in the cache
if EnableCacheRead:
data = GetCacheImage(page)
if data: return data
# if it's not in the temporary cache, render it
Lrender.acquire()
try:
# retrieve the image from the persistent cache or fully re-render it
if EnableCacheRead:
img = GetPCacheImage(page)
else:
img = None
if not img:
if GetPageProp(page, '_page'):
img = RenderPDF(page, not(ZoomMode), ZoomMode)
else:
img = LoadImage(page, ZoomMode)
if EnableCacheWrite:
AddToPCache(page, img)
# create black background image to paste real image onto
if ZoomMode:
TextureImage = Image.new('RGB', (2 * TexWidth, 2 * TexHeight))
TextureImage.paste(img, ((2 * ScreenWidth - img.size[0]) / 2, \
(2 * ScreenHeight - img.size[1]) / 2))
else:
TextureImage = Image.new('RGB', (TexWidth, TexHeight))
x0 = (ProjectionFrame.width - img.size[0]) / 2
y0 = (ProjectionFrame.height - img.size[1]) / 2
TextureImage.paste(img, (x0, y0))
SetPageProp(page, '_box', (x0, y0, x0 + img.size[0], y0 + img.size[1]))
FixHyperlinks(page)
# paste thumbnail into overview image
if GetPageProp(page, ('overview', '_overview'), True) \
and (page >= PageRangeStart) and (page <= PageRangeEnd) \
and not(GetPageProp(page, '_overview_rendered')) \
and not(RenderMode):
pos = OverviewPos(OverviewPageMapInv[page])
Loverview.acquire()
try:
# first, fill the underlying area with black (i.e. remove the dummy logo)
blackness = Image.new('RGB', (OverviewCellX - OverviewBorder, \
OverviewCellY - OverviewBorder))
OverviewImage.paste(blackness, (pos[0] + OverviewBorder / 2, \
pos[1] + OverviewBorder))
del blackness
# then, scale down the original image and paste it
img.thumbnail((OverviewCellX - 2 * OverviewBorder, \
OverviewCellY - 2 * OverviewBorder), \
Image.ANTIALIAS)
OverviewImage.paste(img, \
(pos[0] + (OverviewCellX - img.size[0]) / 2, \
pos[1] + (OverviewCellY - img.size[1]) / 2))
finally:
Loverview.release()
SetPageProp(page, '_overview_rendered', True)
OverviewNeedUpdate = True
del img
# return texture data
if RenderMode:
return TextureImage
data=TextureImage.tostring()
del TextureImage
finally:
Lrender.release()
# finally add it back into the cache and return it
if EnableCacheWrite:
AddToCache(page, data)
return data
# render a page to an OpenGL texture
def RenderPage(page, target):
glBindTexture(TextureTarget ,target)
try:
glTexImage2D(TextureTarget, 0, 3, TexWidth, TexHeight, 0,\
GL_RGB, GL_UNSIGNED_BYTE, PageImage(page))
except GLerror:
print >>sys.stderr, "I'm sorry, but your graphics card is not capable of rendering presentations"
print >>sys.stderr, "in this resolution. Either the texture memory is exhausted, or there is no"
print >>sys.stderr, "support for large textures (%dx%d). Please try to run Impressive in a" % (TexWidth, TexHeight)
print >>sys.stderr, "smaller resolution using the -g command-line option."
sys.exit(1)
# background rendering thread
def RenderThread(p1, p2):
global RTrunning, RTrestart
RTrunning = True
RTrestart = True
while RTrestart:
RTrestart = False
for pdf in FileProps:
if not pdf.lower().endswith(".pdf"): continue
if RTrestart: break
ParsePDF(pdf)
if RTrestart: continue
for page in xrange(1, PageCount + 1):
if RTrestart: break
if (page != p1) and (page != p2) \
and (page >= PageRangeStart) and (page <= PageRangeEnd):
PageImage(page)
RTrunning = False
if CacheMode >= FileCache:
print >>sys.stderr, "Background rendering finished, used %.1f MiB of disk space." %\
(CacheFilePos / 1048576.0)
##### RENDER MODE ##############################################################
def DoRender():
global TexWidth, TexHeight
if DualHead:
TexWidth = ProjectionFrame.width
TexHeight = ProjectionFrame.height
else:
TexWidth = ScreenWidth
TexHeight = ScreenHeight
if os.path.exists(RenderToDirectory):
print >>sys.stderr, "Destination directory `%s' already exists," % RenderToDirectory
print >>sys.stderr, "refusing to overwrite anything."
return 1
try:
os.mkdir(RenderToDirectory)
except OSError, e:
print >>sys.stderr, "Cannot create destination directory `%s':" % RenderToDirectory
print >>sys.stderr, e.strerror
return 1
print >>sys.stderr, "Rendering presentation into `%s'" % RenderToDirectory
for page in xrange(1, PageCount + 1):
PageImage(page, RenderMode=True).save("%s/page%04d.png" % (RenderToDirectory, page))
sys.stdout.write("[%d] " % page)
sys.stdout.flush()
print >>sys.stderr
print >>sys.stderr, "Done."
return 0
##### INFO SCRIPT I/O ##########################################################
# info script reader
def LoadInfoScript():
global PageProps
try:
OldPageProps = PageProps
execfile(InfoScriptPath, globals())
NewPageProps = PageProps
PageProps = OldPageProps
del OldPageProps
for page in NewPageProps:
for prop in NewPageProps[page]:
SetPageProp(page, prop, NewPageProps[page][prop])
del NewPageProps
except IOError:
pass
except:
print >>sys.stderr, "----- Exception in info script ----"
traceback.print_exc(file=sys.stderr)
print >>sys.stderr, "----- End of traceback -----"
# we can't save lamba expressions, so we need to warn the user
# in every possible way
ScriptTainted = False
LambdaWarning = False
def here_was_a_lambda_expression_that_could_not_be_saved():
global LambdaWarning
if not LambdaWarning:
print >>sys.stderr, "WARNING: The info script for the current file contained lambda expressions that"
print >>sys.stderr, " were removed during the a save operation."
LambdaWarning = True
# "clean" a PageProps entry so that only 'public' properties are left
def GetPublicProps(props):
props = props.copy()
# delete private (underscore) props
for prop in list(props.keys()):
if str(prop)[0] == '_':
del props[prop]
# clean props to default values
if props.get('overview', False):
del props['overview']
if not props.get('skip', True):
del props['skip']
if ('boxes' in props) and not(props['boxes']):
del props['boxes']
return props
# Generate a string representation of a property value. Mainly this converts
# classes or instances to the name of the class.
def PropValueRepr(value):
global ScriptTainted
if type(value) == types.FunctionType:
if value.__name__ != "<lambda>":
return value.__name__
if not ScriptTainted:
print >>sys.stderr, "WARNING: The info script contains lambda expressions, which cannot be saved"
print >>sys.stderr, " back. The modifed script will be written into a separate file to"
print >>sys.stderr, " minimize data loss."
ScriptTainted = True
return "here_was_a_lambda_expression_that_could_not_be_saved"
elif type(value) == types.ClassType:
return value.__name__
elif type(value) == types.InstanceType:
return value.__class__.__name__
elif type(value) == types.DictType:
return "{ " + ", ".join([PropValueRepr(k) + ": " + PropValueRepr(value[k]) for k in value]) + " }"
else:
return repr(value)
# generate a nicely formatted string representation of a page's properties
def SinglePagePropRepr(page):
props = GetPublicProps(PageProps[page])
if not props: return None
return "\n%3d: {%s\n }" % (page, \
",".join(["\n " + repr(prop) + ": " + PropValueRepr(props[prop]) for prop in props]))
# generate a nicely formatted string representation of all page properties
def PagePropRepr():
pages = PageProps.keys()
pages.sort()
return "PageProps = {%s\n}" % (",".join(filter(None, map(SinglePagePropRepr, pages))))
# count the characters of a python dictionary source code, correctly handling
# embedded strings and comments, and nested dictionaries
def CountDictChars(s, start=0):
context = None
level = 0
for i in xrange(start, len(s)):
c = s[i]
if context is None:
if c == '{': level += 1
if c == '}': level -= 1
if c == '#': context = '#'
if c == '"': context = '"'
if c == "'": context = "'"
elif context[0] == "\\":
context=context[1]
elif context == '#':
if c in "\r\n": context = None
elif context == '"':
if c == "\\": context = "\\\""
if c == '"': context = None
elif context == "'":
if c == "\\": context = "\\'"
if c == "'": context = None
if level < 0: return i
raise ValueError, "the dictionary never ends"
# modify and save a file's info script
def SaveInfoScript(filename):
# read the old info script
try:
f = file(filename, "r")
script = f.read()
f.close()
except IOError:
script = ""
if not script:
script = "# -*- coding: iso-8859-1 -*-\n"
# replace the PageProps of the old info script with the current ones
try:
m = re.search("^.*(PageProps)\s*=\s*(\{).*$", script,re.MULTILINE)
if m:
script = script[:m.start(1)] + PagePropRepr() + \
script[CountDictChars(script, m.end(2)) + 1 :]
else:
script += "\n" + PagePropRepr() + "\n"
except (AttributeError, ValueError):
pass
if ScriptTainted:
filename += ".modified"
# write the script back
try:
f = file(filename, "w")
f.write(script)
f.close()
except:
print >>sys.stderr, "Oops! Could not write info script!"
##### OPENGL RENDERING #########################################################
# draw OSD overlays
def DrawOverlays():
reltime = pygame.time.get_ticks() - StartTime
if EstimatedDuration and (OverviewMode or GetPageProp(Pcurrent, 'progress', True)):
rel = (0.001 * reltime) / EstimatedDuration
x = int(ScreenWidth * rel)
y = 1.0 - ProgressBarSize * PixelX
a = min(255, max(0, x - ScreenWidth))
b = min(255, max(0, x - ScreenWidth - 256))
r = a
g = 255 - b
b = 0
glDisable(TextureTarget)
glDisable(GL_TEXTURE_2D)
glEnable(GL_BLEND)
glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA)
glBegin(GL_QUADS)
glColor4ub(r, g, b, 0)
glVertex2d(0, y)
glVertex2d(rel, y)
glColor4ub(r, g, b, ProgressBarAlpha)
glVertex2d(rel, 1.0)
glVertex2d(0, 1.0)
glEnd()
glDisable(GL_BLEND)
if WantStatus:
DrawOSDEx(OSDStatusPos, CurrentOSDStatus)
if TimeDisplay:
t = reltime / 1000
DrawOSDEx(OSDTimePos, FormatTime(t, MinutesOnly))
if CurrentOSDComment and (OverviewMode or not(TransitionRunning)):
DrawOSD(ScreenWidth/2, \
ScreenHeight - 3*OSDMargin - FontSize, \
CurrentOSDComment, Center, Up)
if CursorImage and CursorVisible:
x, y = pygame.mouse.get_pos()
x -= CursorHotspot[0]
y -= CursorHotspot[1]
X0 = x * PixelX
Y0 = y * PixelY
X1 = X0 + CursorSX
Y1 = Y0 + CursorSY
glDisable(TextureTarget)
glEnable(GL_TEXTURE_2D)
glBindTexture(GL_TEXTURE_2D, CursorTexture)
glEnable(GL_BLEND)
glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA)
glColor4ub(255, 255, 255, 255)
glBegin(GL_QUADS)
glTexCoord2d(0.0, 0.0); glVertex2d(X0, Y0)
glTexCoord2d(CursorTX, 0.0); glVertex2d(X1, Y0)
glTexCoord2d(CursorTX, CursorTY); glVertex2d(X1, Y1)
glTexCoord2d(0.0, CursorTY); glVertex2d(X0, Y1)
glEnd()
glDisable(GL_BLEND)
glDisable(GL_TEXTURE_2D)
def ClearScreen():
WholeWindow.glViewport()
# green for debugging glClearColor(0, 255, 0, 0)
glClear(GL_COLOR_BUFFER_BIT)
# draw the complete image of the current page
def DrawCurrentPage(dark=1.0, do_flip=True):
if VideoPlaying: return
if DualHead:
ProjectionFrame.glViewport()
DrawPageWorker(Tcurrent, dark, do_flip)
PrompterCurrentFrame.glViewport()
DrawPageWorker(Tcurrent, dark, do_flip)
WholeWindow.glViewport()
else:
DrawPageWorker(Tcurrent, dark, do_flip)
def DrawPageWorker(T, dark=1.0, do_flip=True):
boxes = GetPageProp(Pcurrent, 'boxes')
# pre-transform for zoom
glLoadIdentity()
glOrtho(ZoomX0, ZoomX0 + ZoomArea, ZoomY0 + ZoomArea, ZoomY0, -10.0, 10.0)
# background layer -- the page's image, darkened if it has boxes
glDisable(GL_BLEND)
glEnable(TextureTarget)
glBindTexture(TextureTarget, T)
if boxes or Tracing:
light = 1.0 - 0.25 * dark
else:
light = 1.0
glColor3d(light, light, light)
DrawFullQuad()
if boxes or Tracing:
# alpha-blend the same image some times to blur it
EnableAlphaBlend()
DrawTranslatedFullQuad(+PixelX * ZoomArea, 0.0, light, dark / 2)
DrawTranslatedFullQuad(-PixelX * ZoomArea, 0.0, light, dark / 3)
DrawTranslatedFullQuad(0.0, +PixelY * ZoomArea, light, dark / 4)
DrawTranslatedFullQuad(0.0, -PixelY * ZoomArea, light, dark / 5)
if boxes:
# draw outer box fade
EnableAlphaBlend()
for X0, Y0, X1, Y1 in boxes:
glBegin(GL_QUAD_STRIP)
DrawPointEx(X0, Y0, 1); DrawPointEx(X0 - EdgeX, Y0 - EdgeY, 0)
DrawPointEx(X1, Y0, 1); DrawPointEx(X1 + EdgeX, Y0 - EdgeY, 0)
DrawPointEx(X1, Y1, 1); DrawPointEx(X1 + EdgeX, Y1 + EdgeY, 0)
DrawPointEx(X0, Y1, 1); DrawPointEx(X0 - EdgeX, Y1 + EdgeY, 0)
DrawPointEx(X0, Y0, 1); DrawPointEx(X0 - EdgeX, Y0 - EdgeY, 0)
glEnd()
# draw boxes
glDisable(GL_BLEND)
glBegin(GL_QUADS)
for X0, Y0, X1, Y1 in boxes:
DrawPoint(X0, Y0)
DrawPoint(X1, Y0)
DrawPoint(X1, Y1)
DrawPoint(X0, Y1)
glEnd()
if Tracing:
x, y = MouseToScreen(pygame.mouse.get_pos())
# outer spot fade
EnableAlphaBlend()
glBegin(GL_TRIANGLE_STRIP)
for x0, y0, x1, y1 in SpotMesh:
DrawPointEx(x + x0, y + y0, 1)
DrawPointEx(x + x1, y + y1, 0)
glEnd()
# inner spot
glDisable(GL_BLEND)
glBegin(GL_TRIANGLE_FAN)
DrawPoint(x, y)
for x0, y0, x1, y1 in SpotMesh:
DrawPoint(x + x0, y + y0)
glEnd()
if Marking:
# soft alpha-blended rectangle
glDisable(TextureTarget)
glColor4d(*MarkColor)
EnableAlphaBlend()
glBegin(GL_QUADS)
glVertex2d(MarkUL[0], MarkUL[1])
glVertex2d(MarkLR[0], MarkUL[1])
glVertex2d(MarkLR[0], MarkLR[1])
glVertex2d(MarkUL[0], MarkLR[1])
glEnd()
# bright red frame
glDisable(GL_BLEND)
glBegin(GL_LINE_STRIP)
glVertex2d(MarkUL[0], MarkUL[1])
glVertex2d(MarkLR[0], MarkUL[1])
glVertex2d(MarkLR[0], MarkLR[1])
glVertex2d(MarkUL[0], MarkLR[1])
glVertex2d(MarkUL[0], MarkUL[1])
glEnd()
glEnable(TextureTarget)
# unapply the zoom transform
glLoadIdentity()
glOrtho(0.0, 1.0, 1.0, 0.0, -10.0, 10.0)
# Done.
DrawOverlays()
if do_flip:
pygame.display.flip()
# draw a black screen with the Impressive logo at the center
def DrawLogo():
glClear(GL_COLOR_BUFFER_BIT)
glColor3ub(255, 255, 255)
if TextureTarget != GL_TEXTURE_2D:
glDisable(TextureTarget)
glEnable(GL_TEXTURE_2D)
glBindTexture(GL_TEXTURE_2D, LogoTexture)
glBegin(GL_QUADS)
glTexCoord2d(0, 0); glVertex2d(0.5 - 128.0 / ScreenWidth, 0.5 - 32.0 / ScreenHeight)
glTexCoord2d(1, 0); glVertex2d(0.5 + 128.0 / ScreenWidth, 0.5 - 32.0 / ScreenHeight)
glTexCoord2d(1, 1); glVertex2d(0.5 + 128.0 / ScreenWidth, 0.5 + 32.0 / ScreenHeight)
glTexCoord2d(0, 1); glVertex2d(0.5 - 128.0 / ScreenWidth, 0.5 + 32.0 / ScreenHeight)
glEnd()
if OSDFont:
OSDFont.Draw((ScreenWidth / 2, ScreenHeight / 2 + 48), \
__version__, align=Center, alpha=0.25)
glDisable(GL_TEXTURE_2D)
# draw the prerender progress bar
def DrawProgress(position):
glDisable(TextureTarget)
x0 = 0.1
x2 = 1.0 - x0
x1 = position * x2 + (1.0 - position) * x0
y1 = 0.9
y0 = y1 - 16.0 / ScreenHeight
glBegin(GL_QUADS)
glColor3ub( 64, 64, 64); glVertex2d(x0, y0); glVertex2d(x2, y0)
glColor3ub(128, 128, 128); glVertex2d(x2, y1); glVertex2d(x0, y1)
glColor3ub( 64, 128, 255); glVertex2d(x0, y0); glVertex2d(x1, y0)
glColor3ub( 8, 32, 128); glVertex2d(x1, y1); glVertex2d(x0, y1)
glEnd()
glEnable(TextureTarget)
# fade mode
def DrawFadeMode(intensity, alpha):
if VideoPlaying: return
DrawCurrentPage(do_flip=False)
glDisable(TextureTarget)
EnableAlphaBlend()
glColor4d(intensity, intensity, intensity, alpha)
DrawFullQuad()
glEnable(TextureTarget)
pygame.display.flip()
def FadeMode(intensity):
t0 = pygame.time.get_ticks()
while True:
if pygame.event.get([KEYDOWN,MOUSEBUTTONUP]): break
t = (pygame.time.get_ticks() - t0) * 1.0 / BlankFadeDuration
if t >= 1.0: break
DrawFadeMode(intensity, t)
DrawFadeMode(intensity, 1.0)
while True:
event = pygame.event.wait()
if event.type == QUIT:
PageLeft()
Quit()
elif event.type == VIDEOEXPOSE:
DrawFadeMode(intensity, 1.0)
elif event.type == MOUSEBUTTONUP:
break
elif event.type == KEYDOWN:
if event.unicode == u'q':
pygame.event.post(pygame.event.Event(QUIT))
else:
break
t0 = pygame.time.get_ticks()
while True:
if pygame.event.get([KEYDOWN,MOUSEBUTTONUP]): break
t = (pygame.time.get_ticks() - t0) * 1.0 / BlankFadeDuration
if t >= 1.0: break
DrawFadeMode(intensity, 1.0 - t)
DrawCurrentPage()
# gamma control
def SetGamma(new_gamma=None, new_black=None, force=False):
global Gamma, BlackLevel
if new_gamma is None: new_gamma = Gamma
if new_gamma < 0.1: new_gamma = 0.1
if new_gamma > 10.0: new_gamma = 10.0
if new_black is None: new_black = BlackLevel
if new_black < 0: new_black = 0
if new_black > 254: new_black = 254
if not(force) and (abs(Gamma - new_gamma) < 0.01) and (new_black == BlackLevel):
return
Gamma = new_gamma
BlackLevel = new_black
scale = 1.0 / (255 - BlackLevel)
power = 1.0 / Gamma
ramp = [int(65535.0 * ((max(0, x - BlackLevel) * scale) ** power)) for x in range(256)]
return pygame.display.set_gamma_ramp(ramp, ramp, ramp)
# cursor image
def PrepareCustomCursor(cimg):
global CursorTexture, CursorSX, CursorSY, CursorTX, CursorTY
w, h = cimg.size
tw, th = map(npot, cimg.size)
if (tw > 256) or (th > 256):
print >>sys.stderr, "Custom cursor is rediculously large, reverting to normal one."
return False
img = Image.new('RGBA', (tw, th))
img.paste(cimg, (0, 0))
CursorTexture = glGenTextures(1)
glBindTexture(GL_TEXTURE_2D, CursorTexture)
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, tw, th, 0, GL_RGBA, GL_UNSIGNED_BYTE, img.tostring())
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST)
CursorSX = w * PixelX
CursorSY = h * PixelY
CursorTX = w / float(tw)
CursorTY = h / float(th)
return True
##### CONTROL AND NAVIGATION ###################################################
# update the applications' title bar
def UpdateCaption(page=0, force=False):
global CurrentCaption, CurrentOSDCaption, CurrentOSDPage, CurrentOSDStatus
global CurrentOSDComment
if (page == CurrentCaption) and not(force):
return
CurrentCaption = page
caption = __title__
if DocumentTitle:
caption += " - " + DocumentTitle
if page < 1:
CurrentOSDCaption = ""
CurrentOSDPage = ""
CurrentOSDStatus = ""
CurrentOSDComment = ""
pygame.display.set_caption(caption, __title__)
return
CurrentOSDPage = "%d/%d" % (page, PageCount)
caption = "%s (%s)" % (caption, CurrentOSDPage)
title = GetPageProp(page, 'title') or GetPageProp(page, '_title')
if title:
caption += ": %s" % title
CurrentOSDCaption = title
else:
CurrentOSDCaption = ""
status = []
if GetPageProp(page, 'skip', False):
status.append("skipped: yes")
if not GetPageProp(page, ('overview', '_overview'), True):
status.append("on overview page: no")
CurrentOSDStatus = ", ".join(status)
CurrentOSDComment = GetPageProp(page, 'comment')
pygame.display.set_caption(caption, __title__)
# get next/previous page
def GetNextPage(page, direction):
try_page = page
while True:
try_page += direction
if try_page == page:
return 0 # tried all pages, but none found
if Wrap:
if try_page < 1: try_page = PageCount
if try_page > PageCount: try_page = 1
else:
if try_page < 1 or try_page > PageCount:
return 0 # start or end of presentation
if not GetPageProp(try_page, 'skip', False):
return try_page
# pre-load the following page into Pnext/Tnext
def PreloadNextPage(page):
global Pnext, Tnext
if (page < 1) or (page > PageCount):
Pnext = 0
return 0
if page == Pnext:
return 1
RenderPage(page, Tnext)
Pnext = page
return 1
# perform box fading; the fade animation time is mapped through func()
def BoxFade(func):
t0 = pygame.time.get_ticks()
while 1:
if pygame.event.get([KEYDOWN,MOUSEBUTTONUP]): break
t = (pygame.time.get_ticks() - t0) * 1.0 / BoxFadeDuration
if t >= 1.0: break
DrawCurrentPage(func(t))
DrawCurrentPage(func(1.0))
return 0
# reset the timer
def ResetTimer():
global StartTime, PageEnterTime
if TimeTracking and not(FirstPage):
print "--- timer was reset here ---"
StartTime = pygame.time.get_ticks()
PageEnterTime = 0
# start video playback
def PlayVideo(video):
global MPlayerPID, VideoPlaying
if not video: return
StopMPlayer()
try:
MPlayerPID = spawn(os.P_NOWAIT, \
MPlayerPath, [MPlayerPath, "-quiet", \
"-monitorpixelaspect", "1:1", "-autosync", "100"] + \
MPlayerPlatformOptions + [ "-slave", \
"-wid", str(pygame.display.get_wm_info()['window']), \
FileNameEscape + video + FileNameEscape])
if MPlayerColorKey:
glClear(GL_COLOR_BUFFER_BIT)
pygame.display.flip()
VideoPlaying = True
except OSError:
MPlayerPID = 0
# called each time a page is entered
def PageEntered(update_time=True):
global PageEnterTime, MPlayerPID, IsZoomed, WantStatus
if update_time:
PageEnterTime = pygame.time.get_ticks() - StartTime
IsZoomed = False # no, we don't have a pre-zoomed image right now
WantStatus = False # don't show status unless it's changed interactively
timeout = AutoAdvance
shown = GetPageProp(Pcurrent, '_shown', 0)
if not shown:
timeout = GetPageProp(Pcurrent, 'timeout', timeout)
video = GetPageProp(Pcurrent, 'video')
sound = GetPageProp(Pcurrent, 'sound')
PlayVideo(video)
if sound and not(video):
StopMPlayer()
try:
MPlayerPID = spawn(os.P_NOWAIT, \
MPlayerPath, [MPlayerPath, "-quiet", "-really-quiet", \
FileNameEscape + sound + FileNameEscape])
except OSError:
MPlayerPID = 0
SafeCall(GetPageProp(Pcurrent, 'OnEnterOnce'))
SafeCall(GetPageProp(Pcurrent, 'OnEnter'))
if timeout: pygame.time.set_timer(USEREVENT_PAGE_TIMEOUT, timeout)
SetPageProp(Pcurrent, '_shown', shown + 1)
# called each time a page is left
def PageLeft(overview=False):
global FirstPage, LastPage, WantStatus
WantStatus = False
if not overview:
if GetTristatePageProp(Pcurrent, 'reset'):
ResetTimer()
FirstPage = False
LastPage = Pcurrent
if GetPageProp(Pcurrent, '_shown', 0) == 1:
SafeCall(GetPageProp(Pcurrent, 'OnLeaveOnce'))
SafeCall(GetPageProp(Pcurrent, 'OnLeave'))
if TimeTracking:
t1 = pygame.time.get_ticks() - StartTime
dt = (t1 - PageEnterTime + 500) / 1000
if overview:
p = "over"
else:
p = "%4d" % Pcurrent
print "%s%9s%9s%9s" % (p, FormatTime(dt), \
FormatTime(PageEnterTime / 1000), \
FormatTime(t1 / 1000))
# perform a transition to a specified page
def TransitionTo(page):
global Pcurrent, Pnext, Tcurrent, Tnext
global PageCount, Marking, Tracing, Panning, TransitionRunning
# first, stop the auto-timer
pygame.time.set_timer(USEREVENT_PAGE_TIMEOUT, 0)
# invalid page? go away
if not PreloadNextPage(page):
return 0
# notify that the page has been left
PageLeft()
ProjectionFrame.glViewport()
# box fade-out
if GetPageProp(Pcurrent, 'boxes') or Tracing:
skip = BoxFade(lambda t: 1.0 - t)
else:
skip = 0
# some housekeeping
Marking = False
Tracing = False
UpdateCaption(page)
# check if the transition is valid
tpage = min(Pcurrent, Pnext)
if 'transition' in PageProps[tpage]:
tkey = 'transition'
else:
tkey = '_transition'
trans = PageProps[tpage][tkey]
if trans is None:
transtime = 0
else:
transtime = GetPageProp(tpage, 'transtime', TransitionDuration)
try:
dummy = trans.__class__
except AttributeError:
# ah, gotcha! the transition is not yet intantiated!
trans = trans()
PageProps[tpage][tkey] = trans
# backward motion? then swap page buffers now
backward = (Pnext < Pcurrent)
if backward:
Pcurrent, Pnext = (Pnext, Pcurrent)
Tcurrent, Tnext = (Tnext, Tcurrent)
# transition animation
if not(skip) and transtime:
transtime = 1.0 / transtime
TransitionRunning = True
t0 = pygame.time.get_ticks()
while not(VideoPlaying):
if pygame.event.get([KEYDOWN,MOUSEBUTTONUP]):
skip = 1
break
t = (pygame.time.get_ticks() - t0) * transtime
if t >= 1.0: break
if backward: t = 1.0 - t
glEnable(TextureTarget)
trans.render(t)
DrawOverlays()
pygame.display.flip()
TransitionRunning = False
# forward motion => swap page buffers now
if not backward:
Pcurrent, Pnext = (Pnext, Pcurrent)
Tcurrent, Tnext = (Tnext, Tcurrent)
# box fade-in
if not(skip) and GetPageProp(Pcurrent, 'boxes'): BoxFade(lambda t: t)
# finally update the screen and preload the next page
DrawCurrentPage() # I do that twice because for some strange reason, the
PageEntered()
if not PreloadNextPage(GetNextPage(Pcurrent, 1)):
PreloadNextPage(GetNextPage(Pcurrent, -1))
DrawPreviewNextSlide()
return 1
def DrawPreviewNextSlide():
if DualHead:
PrompterNextFrame.glViewport()
DrawPageWorker(Tnext, 1.0, True)
WholeWindow.glViewport()
# zoom mode animation
def ZoomAnimation(targetx, targety, func):
global ZoomX0, ZoomY0, ZoomArea
t0 = pygame.time.get_ticks()
while True:
if pygame.event.get([KEYDOWN,MOUSEBUTTONUP]): break
t = (pygame.time.get_ticks() - t0)* 1.0 / ZoomDuration
if t >= 1.0: break
t = func(t)
t = (2.0 - t) * t
ZoomX0 = targetx * t
ZoomY0 = targety * t
ZoomArea = 1.0 - 0.5 * t
DrawCurrentPage()
t = func(1.0)
ZoomX0 = targetx * t
ZoomY0 = targety * t
ZoomArea = 1.0 - 0.5 * t
GenerateSpotMesh()
DrawCurrentPage()
# enter zoom mode
def EnterZoomMode(targetx, targety):
global ZoomMode, IsZoomed, ZoomWarningIssued
ZoomAnimation(targetx, targety, lambda t: t)
ZoomMode = True
if TextureTarget != GL_TEXTURE_2D:
if not ZoomWarningIssued:
print >>sys.stderr, "Sorry, but I can't increase the detail level in zoom mode any further when"
print >>sys.stderr, "GL_ARB_texture_rectangle is used. Please try running Impressive with the"
print >>sys.stderr, "'-e' parameter. If a modern nVidia or ATI graphics card is used, a driver"
print >>sys.stderr, "update may also fix the problem."
ZoomWarningIssued = True
return
if not IsZoomed:
glBindTexture(TextureTarget, Tcurrent)
try:
glTexImage2D(TextureTarget, 0, 3, TexWidth * 2, TexHeight * 2, 0, \
GL_RGB, GL_UNSIGNED_BYTE, PageImage(Pcurrent, True))
except GLerror:
if not ZoomWarningIssued:
print >>sys.stderr, "Sorry, but I can't increase the detail level in zoom mode any further, because"
print >>sys.stderr, "your OpenGL implementation does not support that. Either the texture memory is"
print >>sys.stderr, "exhausted, or there is no support for large textures (%dx%d). If you really" % (TexWidth * 2, TexHeight * 2)
print >>sys.stderr, "need high-res zooming, please try to run Impressive in a smaller resolution"
print >>sys.stderr, "using the -g command-line option."
ZoomWarningIssued = True
return
DrawCurrentPage()
IsZoomed = True
# leave zoom mode (if enabled)
def LeaveZoomMode():
global ZoomMode
if not ZoomMode: return
ZoomAnimation(ZoomX0, ZoomY0, lambda t: 1.0 - t)
ZoomMode = False
Panning = False
# increment/decrement spot radius
def IncrementSpotSize(delta):
global SpotRadius
if not Tracing:
return
SpotRadius = max(SpotRadius + delta, 8)
GenerateSpotMesh()
DrawCurrentPage()
# post-initialize the page transitions
def PrepareTransitions():
Unspecified = 0xAFFED00F
# STEP 1: randomly assign transitions where the user didn't specify them
cnt = sum([1 for page in xrange(1, PageCount + 1) \
if GetPageProp(page, 'transition', Unspecified) == Unspecified])
newtrans = ((cnt / len(AvailableTransitions) + 1) * AvailableTransitions)[:cnt]
random.shuffle(newtrans)
for page in xrange(1, PageCount + 1):
if GetPageProp(page, 'transition', Unspecified) == Unspecified:
SetPageProp(page, '_transition', newtrans.pop())
# STEP 2: instantiate transitions
for page in PageProps:
for key in ('transition', '_transition'):
if not key in PageProps[page]:
continue
trans = PageProps[page][key]
if trans is not None:
PageProps[page][key] = trans()
# update timer values and screen timer
def TimerTick():
global CurrentTime, ProgressBarPos
redraw = False
newtime = (pygame.time.get_ticks() - StartTime) * 0.001
if EstimatedDuration:
newpos = int(ScreenWidth * newtime / EstimatedDuration)
if newpos != ProgressBarPos:
redraw = True
ProgressBarPos = newpos
newtime = int(newtime)
if TimeDisplay and (CurrentTime != newtime):
redraw = True
CurrentTime = newtime
return redraw
# set cursor visibility
def SetCursor(visible):
global CursorVisible
CursorVisible = visible
if not CursorImage:
pygame.mouse.set_visible(visible)
# shortcut handling
def IsValidShortcutKey(key):
return ((key >= K_a) and (key <= K_z)) \
or ((key >= K_0) and (key <= K_9)) \
or ((key >= K_F1) and (key <= K_F12))
def FindShortcut(shortcut):
for page, props in PageProps.iteritems():
try:
check = props['shortcut']
if type(check) != types.StringType:
check = int(check)
elif (len(check) > 1) and (check[0] in "Ff"):
check = K_F1 - 1 + int(check[1:])
else:
check = ord(check.lower())
except (KeyError, TypeError, ValueError):
continue
if check == shortcut:
return page
return None
def AssignShortcut(page, key):
old_page = FindShortcut(key)
if old_page:
del PageProps[old_page]['shortcut']
if key < 127:
shortcut = chr(key)
elif (key >= K_F1) and (key <= K_F15):
shortcut = "F%d" % (key - K_F1 + 1)
else:
shortcut = int(key)
SetPageProp(page, 'shortcut', shortcut)
##### OVERVIEW MODE ############################################################
def UpdateOverviewTexture():
global OverviewNeedUpdate
glBindTexture(TextureTarget, Tnext)
Loverview.acquire()
try:
glTexImage2D(TextureTarget, 0, 3, TexWidth, TexHeight, 0, \
GL_RGB, GL_UNSIGNED_BYTE, OverviewImage.tostring())
finally:
Loverview.release()
OverviewNeedUpdate = False
# draw the overview page
def DrawOverview():
if VideoPlaying: return
glClear(GL_COLOR_BUFFER_BIT)
glDisable(GL_BLEND)
glEnable(TextureTarget)
glBindTexture(TextureTarget, Tnext)
glColor3ub(192, 192, 192)
DrawFullQuad()
pos = OverviewPos(OverviewSelection)
X0 = PixelX * pos[0]
Y0 = PixelY * pos[1]
X1 = PixelX * (pos[0] + OverviewCellX)
Y1 = PixelY * (pos[1] + OverviewCellY)
glColor3d(1.0, 1.0, 1.0)
glBegin(GL_QUADS)
DrawPoint(X0, Y0)
DrawPoint(X1, Y0)
DrawPoint(X1, Y1)
DrawPoint(X0, Y1)
glEnd()
DrawOSDEx(OSDTitlePos, CurrentOSDCaption)
DrawOSDEx(OSDPagePos, CurrentOSDPage)
DrawOSDEx(OSDStatusPos, CurrentOSDStatus)
DrawOverlays()
pygame.display.flip()
# overview zoom effect, time mapped through func
def OverviewZoom(func):
global TransitionRunning
pos = OverviewPos(OverviewSelection)
X0 = PixelX * (pos[0] + OverviewBorder)
Y0 = PixelY * (pos[1] + OverviewBorder)
X1 = PixelX * (pos[0] - OverviewBorder + OverviewCellX)
Y1 = PixelY * (pos[1] - OverviewBorder + OverviewCellY)
TransitionRunning = True
t0 = pygame.time.get_ticks()
while not(VideoPlaying):
t = (pygame.time.get_ticks() - t0) * 1.0 / ZoomDuration
if t >= 1.0: break
t = func(t)
t1 = t*t
t = 1.0 - t1
zoom = (t * (X1 - X0) + t1) / (X1 - X0)
OX = zoom * (t * X0 - X0) - (zoom - 1.0) * t * X0
OY = zoom * (t * Y0 - Y0) - (zoom - 1.0) * t * Y0
OX = t * X0 - zoom * X0
OY = t * Y0 - zoom * Y0
glDisable(GL_BLEND)
glEnable(TextureTarget)
glBindTexture(TextureTarget, Tnext)
glBegin(GL_QUADS)
glColor3ub(192, 192, 192)
glTexCoord2d( 0.0, 0.0); glVertex2d(OX, OY)
glTexCoord2d(TexMaxS, 0.0); glVertex2d(OX + zoom, OY)
glTexCoord2d(TexMaxS, TexMaxT); glVertex2d(OX + zoom, OY + zoom)
glTexCoord2d( 0.0, TexMaxT); glVertex2d(OX, OY + zoom)
glColor3ub(255, 255, 255)
glTexCoord2d(X0 * TexMaxS, Y0 * TexMaxT); glVertex2d(OX + X0*zoom, OY + Y0 * zoom)
glTexCoord2d(X1 * TexMaxS, Y0 * TexMaxT); glVertex2d(OX + X1*zoom, OY + Y0 * zoom)
glTexCoord2d(X1 * TexMaxS, Y1 * TexMaxT); glVertex2d(OX + X1*zoom, OY + Y1 * zoom)
glTexCoord2d(X0 * TexMaxS, Y1 * TexMaxT); glVertex2d(OX + X0*zoom, OY + Y1 * zoom)
glEnd()
EnableAlphaBlend()
glBindTexture(TextureTarget, Tcurrent)
glColor4d(1.0, 1.0, 1.0, 1.0 - t * t * t)
glBegin(GL_QUADS)
glTexCoord2d( 0.0, 0.0); glVertex2d(t * X0, t * Y0)
glTexCoord2d(TexMaxS, 0.0); glVertex2d(t * X1 + t1, t * Y0)
glTexCoord2d(TexMaxS, TexMaxT); glVertex2d(t * X1 + t1, t * Y1 + t1)
glTexCoord2d( 0.0, TexMaxT); glVertex2d(t * X0, t * Y1 + t1)
glEnd()
DrawOSDEx(OSDTitlePos, CurrentOSDCaption, alpha_factor=t)
DrawOSDEx(OSDPagePos, CurrentOSDPage, alpha_factor=t)
DrawOSDEx(OSDStatusPos, CurrentOSDStatus, alpha_factor=t)
DrawOverlays()
pygame.display.flip()
TransitionRunning = False
# overview keyboard navigation
def OverviewKeyboardNav(delta):
global OverviewSelection
dest = OverviewSelection + delta
if (dest >= OverviewPageCount) or (dest < 0):
return
OverviewSelection = dest
x, y = OverviewPos(OverviewSelection)
pygame.mouse.set_pos((x + (OverviewCellX / 2), y + (OverviewCellY / 2)))
# overview mode PageProp toggle
def OverviewTogglePageProp(prop, default):
if (OverviewSelection < 0) or (OverviewSelection >= len(OverviewPageMap)):
return
page = OverviewPageMap[OverviewSelection]
SetPageProp(page, prop, not(GetPageProp(page, prop, default)))
UpdateCaption(page, force=True)
DrawOverview()
# overview event handler
def HandleOverviewEvent(event):
global OverviewSelection, TimeDisplay
if event.type == QUIT:
PageLeft(overview=True)
Quit()
elif event.type == VIDEOEXPOSE:
DrawOverview()
elif event.type == KEYDOWN:
if (event.key == K_ESCAPE) or (event.unicode == u'q'):
pygame.event.post(pygame.event.Event(QUIT))
elif event.unicode == u'f':
SetFullscreen(not Fullscreen)
elif event.unicode == u't':
TimeDisplay = not(TimeDisplay)
DrawOverview()
elif event.unicode == u'r':
ResetTimer()
if TimeDisplay: DrawOverview()
elif event.unicode == u's':
SaveInfoScript(InfoScriptPath)
elif event.unicode == u'o':
OverviewTogglePageProp('overview', GetPageProp(Pcurrent, '_overview', True))
elif event.unicode == u'i':
OverviewTogglePageProp('skip', False)
elif event.key == K_UP: OverviewKeyboardNav(-OverviewGridSize)
elif event.key == K_LEFT: OverviewKeyboardNav(-1)
elif event.key == K_RIGHT: OverviewKeyboardNav(+1)
elif event.key == K_DOWN: OverviewKeyboardNav(+OverviewGridSize)
elif event.key == K_TAB:
OverviewSelection = -1
return 0
elif event.key in (K_RETURN, K_KP_ENTER):
return 0
elif IsValidShortcutKey(event.key):
if event.mod & KMOD_SHIFT:
try:
AssignShortcut(OverviewPageMap[OverviewSelection], event.key)
except IndexError:
pass # no valid page selected
else:
# load shortcut
page = FindShortcut(event.key)
if page:
OverviewSelection = OverviewPageMapInv[page]
x, y = OverviewPos(OverviewSelection)
pygame.mouse.set_pos((x + (OverviewCellX / 2), \
y + (OverviewCellY / 2)))
DrawOverview()
elif event.type == MOUSEBUTTONUP:
if event.button == 1:
return 0
elif event.button in (2, 3):
OverviewSelection = -1
return 0
elif event.type == MOUSEMOTION:
pygame.event.clear(MOUSEMOTION)
# mouse move in fullscreen mode -> show mouse cursor and reset mouse timer
if Fullscreen:
pygame.time.set_timer(USEREVENT_HIDE_MOUSE, MouseHideDelay)
SetCursor(True)
# determine highlighted page
OverviewSelection = \
int((event.pos[0] - OverviewOfsX) / OverviewCellX) + \
int((event.pos[1] - OverviewOfsY) / OverviewCellY) * OverviewGridSize
if (OverviewSelection < 0) or (OverviewSelection >= len(OverviewPageMap)):
UpdateCaption(0)
else:
UpdateCaption(OverviewPageMap[OverviewSelection])
DrawOverview()
elif event.type == USEREVENT_HIDE_MOUSE:
# mouse timer event -> hide fullscreen cursor
pygame.time.set_timer(USEREVENT_HIDE_MOUSE, 0)
SetCursor(False)
DrawOverview()
return 1
# overview mode entry/loop/exit function
def DoOverview():
global Pcurrent, Pnext, Tcurrent, Tnext, Tracing, OverviewSelection
global PageEnterTime, OverviewMode
if DualHead:
return # disable until it works in combination with dual head
pygame.time.set_timer(USEREVENT_PAGE_TIMEOUT, 0)
PageLeft()
UpdateOverviewTexture()
if GetPageProp(Pcurrent, 'boxes') or Tracing:
BoxFade(lambda t: 1.0 - t)
Tracing = False
OverviewSelection = OverviewPageMapInv[Pcurrent]
OverviewMode = True
OverviewZoom(lambda t: 1.0 - t)
DrawOverview()
PageEnterTime = pygame.time.get_ticks() - StartTime
while True:
event = pygame.event.poll()
if event.type == NOEVENT:
force_update = OverviewNeedUpdate
if OverviewNeedUpdate:
UpdateOverviewTexture()
if TimerTick() or force_update:
DrawOverview()
pygame.time.wait(20)
elif not HandleOverviewEvent(event):
break
PageLeft(overview=True)
if (OverviewSelection < 0) or (OverviewSelection >= OverviewPageCount):
OverviewSelection = OverviewPageMapInv[Pcurrent]
Pnext = Pcurrent
else:
Pnext = OverviewPageMap[OverviewSelection]
if Pnext != Pcurrent:
Pcurrent = Pnext
RenderPage(Pcurrent, Tcurrent)
UpdateCaption(Pcurrent)
OverviewZoom(lambda t: t)
OverviewMode = False
DrawCurrentPage()
if GetPageProp(Pcurrent, 'boxes'):
BoxFade(lambda t: t)
PageEntered()
if not PreloadNextPage(GetNextPage(Pcurrent, 1)):
PreloadNextPage(GetNextPage(Pcurrent, -1))
##### EVENT HANDLING ###########################################################
# set fullscreen mode
def SetFullscreen(fs, do_init=True):
global Fullscreen
# let pygame do the real work
if do_init:
if fs == Fullscreen: return
if not pygame.display.toggle_fullscreen(): return
Fullscreen=fs
# redraw the current page (pygame is too lazy to send an expose event ...)
ClearScreen()
DrawCurrentPage()
# show cursor and set auto-hide timer
if fs:
pygame.time.set_timer(USEREVENT_HIDE_MOUSE, MouseHideDelay)
else:
pygame.time.set_timer(USEREVENT_HIDE_MOUSE, 0)
SetCursor(True)
# PageProp toggle
def TogglePageProp(prop, default):
global WantStatus
SetPageProp(Pcurrent, prop, not(GetPageProp(Pcurrent, prop, default)))
UpdateCaption(Pcurrent, force=True)
WantStatus = True
DrawCurrentPage()
# main event handling function
def HandleEvent(event):
global HaveMark, ZoomMode, Marking, Tracing, Panning, SpotRadius, FileStats
global MarkUL, MarkLR, MouseDownX, MouseDownY, PanAnchorX, PanAnchorY
global ZoomX0, ZoomY0, RTrunning, RTrestart, StartTime, PageEnterTime
global CurrentTime, TimeDisplay, TimeTracking, ProgressBarPos
if event.type == QUIT:
PageLeft()
Quit()
elif event.type == VIDEOEXPOSE:
DrawCurrentPage()
elif event.type == KEYDOWN:
if VideoPlaying:
StopMPlayer()
DrawCurrentPage()
elif (event.key == K_ESCAPE) or (event.unicode == u'q'):
pygame.event.post(pygame.event.Event(QUIT))
elif event.unicode == u'f':
SetFullscreen(not Fullscreen)
elif (event.key == K_TAB) and (event.mod & KMOD_ALT) and Fullscreen:
SetFullscreen(False)
elif event.unicode == u's':
SaveInfoScript(InfoScriptPath)
elif event.unicode == u'z': # handle QWERTY and QWERTZ keyboards
if ZoomMode:
LeaveZoomMode()
else:
tx, ty = MouseToScreen(pygame.mouse.get_pos())
EnterZoomMode(0.5 * tx, 0.5 * ty)
elif event.unicode == u'b':
FadeMode(0.0)
elif event.unicode == u'w':
FadeMode(1.0)
elif event.unicode == u't':
TimeDisplay = not(TimeDisplay)
DrawCurrentPage()
if TimeDisplay and not(TimeTracking) and FirstPage:
print >>sys.stderr, "Time tracking mode enabled."
TimeTracking = True
print "page duration enter leave"
print "---- -------- -------- --------"
elif event.unicode == u'r':
ResetTimer()
if TimeDisplay: DrawCurrentPage()
elif event.unicode == u'l':
TransitionTo(LastPage)
elif event.unicode == u'o':
TogglePageProp('overview', GetPageProp(Pcurrent, '_overview', True))
elif event.unicode == u'i':
TogglePageProp('skip', False)
elif event.key == K_TAB:
LeaveZoomMode()
DoOverview()
elif event.key in (32, K_DOWN, K_RIGHT, K_PAGEDOWN):
LeaveZoomMode()
TransitionTo(GetNextPage(Pcurrent, 1))
elif event.key in (K_BACKSPACE, K_UP, K_LEFT, K_PAGEUP):
LeaveZoomMode()
TransitionTo(GetNextPage(Pcurrent, -1))
elif event.key == K_HOME:
if Pcurrent != 1:
TransitionTo(1)
elif event.key == K_END:
if Pcurrent != PageCount:
TransitionTo(PageCount)
elif event.key in (K_RETURN, K_KP_ENTER):
if not(GetPageProp(Pcurrent, 'boxes')) and Tracing:
BoxFade(lambda t: 1.0 - t)
Tracing = not(Tracing)
if not(GetPageProp(Pcurrent, 'boxes')) and Tracing:
BoxFade(lambda t: t)
elif event.unicode == u'+':
IncrementSpotSize(+8)
elif event.unicode == u'-':
IncrementSpotSize(-8)
elif event.unicode == u'[':
SetGamma(new_gamma=Gamma / GammaStep)
elif event.unicode == u']':
SetGamma(new_gamma=Gamma * GammaStep)
elif event.unicode == u'{':
SetGamma(new_black=BlackLevel - BlackLevelStep)
elif event.unicode == u'}':
SetGamma(new_black=BlackLevel + BlackLevelStep)
elif event.unicode == u'\\':
SetGamma(1.0, 0)
else:
keyfunc = GetPageProp(Pcurrent, 'keys', {}).get(event.unicode, None)
if keyfunc:
SafeCall(keyfunc)
elif IsValidShortcutKey(event.key):
if event.mod & KMOD_SHIFT:
AssignShortcut(Pcurrent, event.key)
else:
# load keyboard shortcut
page = FindShortcut(event.key)
if page and (page != Pcurrent):
TransitionTo(page)
elif event.type == MOUSEBUTTONDOWN:
if VideoPlaying:
Marking = False
Panning = False
return
MouseDownX, MouseDownY = event.pos
if event.button == 1:
MarkUL = MarkLR = MouseToScreen(event.pos)
elif (event.button == 3) and ZoomMode:
PanAnchorX = ZoomX0
PanAnchorY = ZoomY0
elif event.button == 4:
IncrementSpotSize(+8)
elif event.button == 5:
IncrementSpotSize(-8)
elif event.type == MOUSEBUTTONUP:
if VideoPlaying:
StopMPlayer()
DrawCurrentPage()
Marking = False
Panning = False
return
if event.button == 2:
LeaveZoomMode()
DoOverview()
return
if event.button == 1:
if Marking:
# left mouse button released in marking mode -> stop box marking
Marking = False
# reject too small boxes
if (abs(MarkUL[0] - MarkLR[0]) > 0.04) \
and (abs(MarkUL[1] - MarkLR[1]) > 0.03):
boxes = GetPageProp(Pcurrent, 'boxes', [])
oldboxcount = len(boxes)
boxes.append(NormalizeRect(MarkUL[0], MarkUL[1], MarkLR[0], MarkLR[1]))
SetPageProp(Pcurrent, 'boxes', boxes)
if not(oldboxcount) and not(Tracing):
BoxFade(lambda t: t)
DrawCurrentPage()
else:
# left mouse button released, but no marking
LeaveZoomMode()
dest = GetNextPage(Pcurrent, 1)
x, y = event.pos
for valid, target, x0, y0, x1, y1 in GetPageProp(Pcurrent, '_href', []):
if valid and (x >= x0) and (x < x1) and (y >= y0) and (y < y1):
dest = target
break
if type(dest) == types.IntType:
TransitionTo(dest)
else:
RunURL(dest)
if (event.button == 3) and not(Panning):
# right mouse button -> check if a box has to be killed
boxes = GetPageProp(Pcurrent, 'boxes', [])
x, y = MouseToScreen(event.pos)
try:
# if a box is already present around the clicked position, kill it
idx = FindBox(x, y, boxes)
if (len(boxes) == 1) and not(Tracing):
BoxFade(lambda t: 1.0 - t)
del boxes[idx]
SetPageProp(Pcurrent, 'boxes', boxes)
DrawCurrentPage()
except ValueError:
# no box present -> go to previous page
LeaveZoomMode()
TransitionTo(GetNextPage(Pcurrent, -1))
Panning = False
elif event.type == MOUSEMOTION:
pygame.event.clear(MOUSEMOTION)
# mouse move in fullscreen mode -> show mouse cursor and reset mouse timer
if Fullscreen:
pygame.time.set_timer(USEREVENT_HIDE_MOUSE, MouseHideDelay)
SetCursor(True)
# don't react on mouse input during video playback
if VideoPlaying: return
# activate marking if mouse is moved away far enough
if event.buttons[0] and not(Marking):
x, y = event.pos
if (abs(x - MouseDownX) > 4) and (abs(y - MouseDownY) > 4):
Marking = True
# mouse move while marking -> update marking box
if Marking:
MarkLR = MouseToScreen(event.pos)
# mouse move while RMB is pressed -> panning
if event.buttons[2] and ZoomMode:
x, y = event.pos
if not(Panning) and (abs(x - MouseDownX) > 4) and (abs(y - MouseDownY) > 4):
Panning = True
ZoomX0 = PanAnchorX + (MouseDownX - x) * ZoomArea / ScreenWidth
ZoomY0 = PanAnchorY + (MouseDownY - y) * ZoomArea / ScreenHeight
ZoomX0 = min(max(ZoomX0, 0.0), 1.0 - ZoomArea)
ZoomY0 = min(max(ZoomY0, 0.0), 1.0 - ZoomArea)
# if anything changed, redraw the page
if Marking or Tracing or event.buttons[2] or (CursorImage and CursorVisible):
DrawCurrentPage()
elif event.type == USEREVENT_HIDE_MOUSE:
# mouse timer event -> hide fullscreen cursor
pygame.time.set_timer(USEREVENT_HIDE_MOUSE, 0)
SetCursor(False)
DrawCurrentPage()
elif event.type == USEREVENT_PAGE_TIMEOUT:
TransitionTo(GetNextPage(Pcurrent, 1))
elif event.type == USEREVENT_POLL_FILE:
dirty = False
for f in FileProps:
if my_stat(f) != GetFileProp(f, 'stat'):
dirty = True
break
if dirty:
# first, check if the new file is valid
if not os.path.isfile(GetPageProp(Pcurrent, '_file')):
return
# invalidate everything we used to know about the input files
InvalidateCache()
for props in PageProps.itervalues():
for prop in ('_overview_rendered', '_box', '_href'):
if prop in props: del props[prop]
LoadInfoScript()
# force a transition to the current page, reloading it
Pnext=-1
TransitionTo(Pcurrent)
# restart the background renderer thread. this is not completely safe,
# i.e. there's a small chance that we fail to restart the thread, but
# this isn't critical
if CacheMode and BackgroundRendering:
if RTrunning:
RTrestart = True
else:
RTrunning = True
thread.start_new_thread(RenderThread, (Pcurrent, Pnext))
elif event.type == USEREVENT_TIMER_UPDATE:
if TimerTick():
DrawCurrentPage()
##### FILE LIST GENERATION #####################################################
def IsImageFileName(name):
return os.path.splitext(name)[1].lower() in \
(".jpg", ".jpeg", ".png", ".tif", ".tiff", ".bmp", ".ppm", ".pgm")
def IsPlayable(name):
return IsImageFileName(name) or name.lower().endswith(".pdf") or os.path.isdir(name)
def AddFile(name, title=None):
global FileList, FileName
if os.path.isfile(name):
FileList.append(name)
if title: SetFileProp(name, 'title', title)
elif os.path.isdir(name):
images = [os.path.join(name, f) for f in os.listdir(name) if IsImageFileName(f)]
images.sort(lambda a, b: cmp(a.lower(), b.lower()))
if not images:
print >>sys.stderr, "Warning: no image files in directory `%s'" % name
for img in images: AddFile(img)
elif name.startswith('@') and os.path.isfile(name[1:]):
name = name[1:]
dirname = os.path.dirname(name)
try:
f = file(name, "r")
next_title = None
for line in f:
line = [part.strip() for part in line.split('#', 1)]
if len(line) == 1:
subfile = line[0]
title = None
else:
subfile, title = line
if subfile:
AddFile(os.path.normpath(os.path.join(dirname, subfile)), title)
f.close()
except IOError:
print >>sys.stderr, "Error: cannot read list file `%s'" % name
if not FileName:
FileName = name
else:
FileName = ""
else:
files = list(filter(IsPlayable, glob.glob(name)))
if files:
for f in files: AddFile(f)
else:
print >>sys.stderr, "Error: input file `%s' not found" % name
##### INITIALIZATION ###########################################################
def main():
global ScreenWidth, ScreenHeight, TexWidth, TexHeight, TexSize, LogoImage
global TexMaxS, TexMaxT, MeshStepX, MeshStepY, EdgeX, EdgeY, PixelX, PixelY
global OverviewGridSize, OverviewCellX, OverviewCellY
global OverviewOfsX, OverviewOfsY, OverviewImage, OverviewPageCount
global OverviewPageMap, OverviewPageMapInv, FileName, FileList, PageCount
global DocumentTitle, PageProps, LogoTexture, OSDFont
global Pcurrent, Pnext, Tcurrent, Tnext, InitialPage
global CacheFile, CacheFileName
global Extensions, AllowExtensions, TextureTarget, PAR, DAR, TempFileName
global BackgroundRendering, FileStats, RTrunning, RTrestart, StartTime
global CursorImage, CursorVisible, InfoScriptPath
# for dual head support
global DualHead, ProjectionFrame, PrompterNextFrame, PrompterCurrentFrame, PrompterWholeFrame, WholeWindow
# initialize graphics
pygame.init()
print "before detecting screen size"
print Fullscreen, UseAutoScreenSize
if Fullscreen and UseAutoScreenSize:
size = GetScreenSize()
if size:
ScreenWidth, ScreenHeight = size
print "Detected screen size: %dx%d pixels" % (ScreenWidth, ScreenHeight)
print >>sys.stderr, "Detected screen size: %dx%d pixels" % (ScreenWidth, ScreenHeight)
flags = OPENGL|DOUBLEBUF
if Fullscreen:
flags |= FULLSCREEN
try:
if Fullscreen and UseAutoScreenSize:
pygame.display.set_mode((0,0), flags) # do not change the resolution - it is already OK
else:
pygame.display.set_mode((ScreenWidth, ScreenHeight), flags)
except:
print >>sys.stderr, "FATAL: cannot create rendering surface in the desired resolution (%dx%d)" % (ScreenWidth, ScreenHeight)
sys.exit(1)
pygame.display.set_caption(__title__)
pygame.key.set_repeat(500, 30)
if Fullscreen:
pygame.mouse.set_visible(False)
CursorVisible = False
glOrtho(0.0, 1.0, 1.0, 0.0, -10.0, 10.0)
if (Gamma <> 1.0) or (BlackLevel <> 0):
SetGamma(force=True)
WholeWindow = FrameCoordinates(ScreenWidth, ScreenHeight)
# defaults for single head - whole screen
if not DualHead:
ProjectionFrame = FrameCoordinates(ScreenWidth, ScreenHeight)
# allocate temporary file
TempFileName = tempfile.mktemp(prefix="impressive-", suffix="_tmp")
# some input guesswork
DocumentTitle = os.path.splitext(os.path.split(FileName)[1])[0]
if FileName and not(FileList):
AddFile(FileName)
if not(FileName) and (len(FileList) == 1):
FileName = FileList[0]
# fill the page list
PageCount = 0
for name in FileList:
ispdf = name.lower().endswith(".pdf")
if ispdf:
# PDF input -> try to pre-parse the PDF file
pages = 0
# phase 1: internal PDF parser
try:
pages, pdf_width, pdf_height = analyze_pdf(name)
if Rotation & 1:
pdf_width, pdf_height = (pdf_height, pdf_width)
if DualHead:
res = min(ProjectionFrame.width * 72.0 / pdf_width, \
ProjectionFrame.height * 72.0 / pdf_height)
else:
res = min(ProjectionFrame.width * 72.0 / pdf_width, \
ProjectionFrame.height * 72.0 / pdf_height)
except:
res = 72.0
# phase 2: use pdftk
try:
assert 0 == spawn(os.P_WAIT, pdftkPath, \
["pdftk", FileNameEscape + name + FileNameEscape, \
"dump_data", "output", TempFileName + ".txt"])
title, pages = pdftkParse(TempFileName + ".txt", PageCount)
if DocumentTitle and title: DocumentTitle = title
except:
pass
else:
# Image File
pages = 1
SetPageProp(PageCount + 1, '_title', os.path.split(name)[-1])
# validity check
if not pages:
print >>sys.stderr, "Warning: The input file `%s' could not be analyzed." % name
continue
# add pages and files into PageProps and FileProps
pagerange = list(range(PageCount + 1, PageCount + pages + 1))
for page in pagerange:
SetPageProp(page, '_file', name)
if ispdf: SetPageProp(page, '_page', page - PageCount)
title = GetFileProp(name, 'title')
if title: SetPageProp(page, '_title', title)
SetFileProp(name, 'pages', GetFileProp(name, 'pages', []) + pagerange)
SetFileProp(name, 'offsets', GetFileProp(name, 'offsets', []) + [PageCount])
if not GetFileProp(name, 'stat'): SetFileProp(name, 'stat', my_stat(name))
if ispdf: SetFileProp(name, 'res', res)
PageCount += pages
# no pages? strange ...
if not PageCount:
print >>sys.stderr, "The presentation doesn't have any pages, quitting."
sys.exit(1)
# if rendering is wanted, do it NOW
if RenderToDirectory:
sys.exit(DoRender())
# load and execute info script
if not InfoScriptPath:
InfoScriptPath = FileName + ".info"
LoadInfoScript()
# check if graphics are unaccelerated
renderer = glGetString(GL_RENDERER)
print >>sys.stderr, "OpenGL renderer:", renderer
if renderer.lower() in ("mesa glx indirect", "gdi generic"):
print >>sys.stderr, "WARNING: Using an OpenGL software renderer. Impressive will work, but it will"
print >>sys.stderr, " very likely be too slow to be usable."
# setup the OpenGL texture mode
Extensions = dict([(ext.split('_', 2)[-1], None) for ext in \
glGetString(GL_EXTENSIONS).split()])
if AllowExtensions and ("texture_non_power_of_two" in Extensions):
print >>sys.stderr, "Using GL_ARB_texture_non_power_of_two."
TextureTarget = GL_TEXTURE_2D
TexWidth = ProjectionFrame.width # was: ScreenWidth
TexHeight = ProjectionFrame.height # was: ScreenHeight
TexMaxS = 1.0
TexMaxT = 1.0
elif AllowExtensions and ("texture_rectangle" in Extensions):
print >>sys.stderr, "Using GL_ARB_texture_rectangle."
TextureTarget = 0x84F5 # GL_TEXTURE_RECTANGLE_ARB
TexWidth = ProjectionFrame.width # was: ScreenWidth
TexHeight = ProjectionFrame.height # was: ScreenHeight
TexMaxS = ScreenWidth
TexMaxT = ScreenHeight
else:
print >>sys.stderr, "Using conventional power-of-two textures with padding."
TextureTarget = GL_TEXTURE_2D
TexWidth = npot(ScreenWidth)
TexHeight = npot(ScreenHeight)
TexMaxS = ScreenWidth * 1.0 / TexWidth
TexMaxT = ScreenHeight * 1.0 / TexHeight
TexSize = TexWidth * TexHeight * 3
# set up some variables
if DAR is not None:
PAR = DAR / float(ScreenWidth) * float(ScreenHeight)
MeshStepX = 1.0 / MeshResX
MeshStepY = 1.0 / MeshResY
PixelX = 1.0 / ScreenWidth
PixelY = 1.0 / ScreenHeight
EdgeX = BoxEdgeSize * 1.0 / ScreenWidth
EdgeY = BoxEdgeSize * 1.0 / ScreenHeight
if InitialPage is None:
InitialPage = GetNextPage(0, 1)
Pcurrent = InitialPage
# prepare logo image
LogoImage = Image.open(StringIO.StringIO(LOGO))
LogoTexture = glGenTextures(1)
glBindTexture(GL_TEXTURE_2D, LogoTexture)
glTexImage2D(GL_TEXTURE_2D, 0, 1, 256, 64, 0, GL_LUMINANCE, GL_UNSIGNED_BYTE, LogoImage.tostring())
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST)
DrawLogo()
pygame.display.flip()
# initialize OSD font
try:
OSDFont = GLFont(FontTextureWidth, FontTextureHeight, FontList, FontSize, search_path=FontPath)
DrawLogo()
titles = []
for key in ('title', '_title'):
titles.extend([p[key] for p in PageProps.itervalues() if key in p])
if titles:
OSDFont.AddString("".join(titles))
except ValueError:
print >>sys.stderr, "The OSD font size is too large, the OSD will be rendered incompletely."
except IOError:
print >>sys.stderr, "Could not open OSD font file, disabling OSD."
except (NameError, AttributeError, TypeError):
print >>sys.stderr, "Your version of PIL is too old or incomplete, disabling OSD."
# initialize mouse cursor
if CursorImage:
try:
CursorImage = PrepareCustomCursor(Image.open(CursorImage))
except:
print >>sys.stderr, "Could not open the mouse cursor image, using standard cursor."
CursorImage = False
# set up page cache
if CacheMode == PersistentCache:
if not CacheFileName:
CacheFileName = FileName + ".cache"
InitPCache()
if CacheMode == FileCache:
CacheFile = tempfile.TemporaryFile(prefix="impressive-", suffix=".cache")
# initialize overview metadata
OverviewPageMap=[i for i in xrange(1, PageCount + 1) \
if GetPageProp(i, ('overview', '_overview'), True) \
and (i >= PageRangeStart) and (i <= PageRangeEnd)]
OverviewPageCount = max(len(OverviewPageMap), 1)
OverviewPageMapInv = {}
for page in xrange(1, PageCount + 1):
OverviewPageMapInv[page] = len(OverviewPageMap) - 1
for i in xrange(len(OverviewPageMap)):
if OverviewPageMap[i] >= page:
OverviewPageMapInv[page] = i
break
# initialize overview page geometry
OverviewGridSize = 1
while OverviewPageCount > OverviewGridSize * OverviewGridSize:
OverviewGridSize += 1
OverviewCellX = int(ScreenWidth / OverviewGridSize)
OverviewCellY = int(ScreenHeight / OverviewGridSize)
OverviewOfsX = int((ScreenWidth - OverviewCellX * OverviewGridSize)/2)
OverviewOfsY = int((ScreenHeight - OverviewCellY * \
int((OverviewPageCount + OverviewGridSize - 1) / OverviewGridSize)) / 2)
OverviewImage = Image.new('RGB', (TexWidth, TexHeight))
# fill overlay "dummy" images
dummy = LogoImage.copy()
border = max(OverviewLogoBorder, 2 * OverviewBorder)
maxsize = (OverviewCellX - border, OverviewCellY - border)
if (dummy.size[0] > maxsize[0]) or (dummy.size[1] > maxsize[1]):
dummy.thumbnail(ZoomToFit(dummy.size, maxsize), Image.ANTIALIAS)
margX = int((OverviewCellX - dummy.size[0]) / 2)
margY = int((OverviewCellY - dummy.size[1]) / 2)
dummy = dummy.convert(mode='RGB')
for page in range(OverviewPageCount):
pos = OverviewPos(page)
OverviewImage.paste(dummy, (pos[0] + margX, pos[1] + margY))
del dummy
# set up background rendering
if not EnableBackgroundRendering:
print >>sys.stderr, "Background rendering isn't available on this platform."
BackgroundRendering = False
# if caching is enabled, pre-render all pages
if CacheMode and not(BackgroundRendering):
DrawLogo()
DrawProgress(0.0)
pygame.display.flip()
for pdf in FileProps:
if pdf.lower().endswith(".pdf"):
ParsePDF(pdf)
stop = False
progress = 0.0
for page in range(InitialPage, PageCount + 1) + range(1, InitialPage):
event = pygame.event.poll()
while event.type != NOEVENT:
if event.type == KEYDOWN:
if (event.key == K_ESCAPE) or (event.unicode == u'q'):
Quit()
stop = True
elif event.type == MOUSEBUTTONUP:
stop = True
event = pygame.event.poll()
if stop: break
if (page >= PageRangeStart) and (page <= PageRangeEnd):
PageImage(page)
DrawLogo()
progress += 1.0 / PageCount;
DrawProgress(progress)
pygame.display.flip()
# create buffer textures
DrawLogo()
pygame.display.flip()
glEnable(TextureTarget)
Tcurrent = glGenTextures(1)
Tnext = glGenTextures(1)
for T in (Tcurrent, Tnext):
glBindTexture(TextureTarget, T)
glTexParameteri(TextureTarget, GL_TEXTURE_MIN_FILTER, GL_LINEAR)
glTexParameteri(TextureTarget, GL_TEXTURE_MAG_FILTER, GL_LINEAR)
glTexParameteri(TextureTarget, GL_TEXTURE_WRAP_S, GL_CLAMP)
glTexParameteri(TextureTarget, GL_TEXTURE_WRAP_T, GL_CLAMP)
# prebuffer current and next page
Pnext = 0
RenderPage(Pcurrent, Tcurrent)
PageEntered(update_time=False)
PreloadNextPage(GetNextPage(Pcurrent, 1))
# some other preparations
PrepareTransitions()
GenerateSpotMesh()
if PollInterval:
pygame.time.set_timer(USEREVENT_POLL_FILE, PollInterval * 1000)
# start the background rendering thread
if CacheMode and BackgroundRendering:
RTrunning = True
thread.start_new_thread(RenderThread, (Pcurrent, Pnext))
# start output and enter main loop
StartTime = pygame.time.get_ticks()
pygame.time.set_timer(USEREVENT_TIMER_UPDATE, 100)
if not(Fullscreen) and CursorImage:
pygame.mouse.set_visible(False)
DrawCurrentPage()
UpdateCaption(Pcurrent)
DrawPreviewNextSlide()
ClearScreen()
TransitionTo(1)
while True:
HandleEvent(pygame.event.wait())
# wrapper around main() that ensures proper uninitialization
def run_main():
global CacheFile
# HACK: exception handling commented out for better debugging
# try:
if True:
main()
# finally:
StopMPlayer()
# ensure that background rendering is halted
Lrender.acquire()
Lcache.acquire()
# remove all temp files
if 'CacheFile' in globals():
del CacheFile
for tmp in glob.glob(TempFileName + "*"):
try:
os.remove(tmp)
except OSError:
pass
pygame.quit()
##### COMMAND-LINE PARSER AND HELP #############################################
def if_op(cond, res_then, res_else):
if cond: return res_then
else: return res_else
def HelpExit(code=0):
print """A nice presentation tool.
Usage: """+os.path.basename(sys.argv[0])+""" [OPTION...] <INPUT(S)...>
You may either play a PDF file, a directory containing image files or
individual image files.
Input options:
-r, --rotate <n> rotate pages clockwise in 90-degree steps
--scale scale images to fit screen (not used in PDF mode)
--supersample use supersampling (only used in PDF mode)
-s --supersample for PDF files, --scale for image files
-I, --script <path> set the path of the info script
-u, --poll <seconds> check periodically if the source file has been
updated and reload it if it did
-o, --output <dir> don't display the presentation, only render to .png
-h, --help show this help text and exit
Output options:
-f, --fullscreen """+if_op(Fullscreen,"do NOT ","")+"""start in fullscreen mode
-g, --geometry <WxH> set window size or fullscreen resolution
--dual-head <projection-geometry>,<prompter-geometry>
enables dual head operation:
* external output shows current slide
* on the notebook you can see the current and
the next slide to better prepare your thoughts
On a modern linux system dual head is operated by xrandr
extension which defines a big screen containing both
outputs. Geometry strings are as defined by X11, consisting of
size and offset, example 800x600+1280+0
<projection-geometry> describes the area, which is seen
by your audience
<prompter-geometry> describes the area, which is seen
by you, e.g. your notebook
-A, --aspect <X:Y> adjust for a specific display aspect ratio (e.g. 5:4)
-G, --gamma <G[:BL]> specify startup gamma and black level
Page options:
-i, --initialpage <n> start with page <n>
-p, --pages <A-B> only cache pages in the specified range;
implicitly sets -i <A>
-w, --wrap go back to the first page after the last page
-a, --auto <seconds> automatically advance to next page after some seconds
-O, --autooverview <x> automatically derive page visibility on overview page
-O first = show pages with captions
-O last = show pages before pages with captions
Display options:
-t, --transition <trans[,trans2...]>
force a specific transitions or set of transitions
-l, --listtrans print a list of available transitions and exit
-F, --font <file> use a specific TrueType font file for the OSD
-S, --fontsize <px> specify the OSD font size in pixels
-C, --cursor <F[:X,Y]> use a .png image as the mouse cursor
-L, --layout <spec> set the OSD layout (please read the documentation)
Timing options:
-M, --minutes display time in minutes, not seconds
-d, --duration <time> set the desired duration of the presentation and show
a progress bar at the bottom of the screen
-T, --transtime <ms> set transition duration in milliseconds
-D, --mousedelay <ms> set mouse hide delay for fullscreen mode (in ms)
-B, --boxfade <ms> set highlight box fade duration in milliseconds
-Z, --zoom <ms> set zoom duration in milliseconds
Advanced options:
-c, --cache <mode> set page cache mode:
-c none = disable caching completely
-c memory = store cache in RAM
-c disk = store cache on disk temporarily
-c persistent = store cache on disk persistently
--cachefile <path> set the persistent cache file path (implies -cp)
-b, --noback don't pre-render images in the background
-P, --gspath <path> set path to GhostScript or pdftoppm executable
-R, --meshres <XxY> set mesh resolution for effects (default: 48x36)
-e, --noext don't use OpenGL texture size extensions
For detailed information, visit""", __website__
sys.exit(code)
def ListTransitions():
print "Available transitions:"
standard = dict([(tc.__name__, None) for tc in AvailableTransitions])
trans = [(tc.__name__, tc.__doc__) for tc in AllTransitions]
trans.append(('None', "no transition"))
trans.sort()
maxlen = max([len(item[0]) for item in trans])
for name, desc in trans:
if name in standard:
star = '*'
else:
star = ' '
print star, name.ljust(maxlen), '-', desc
print "(transitions with * are enabled by default)"
sys.exit(0)
def TryTime(s, regexp, func):
m = re.match(regexp, s, re.I)
if not m: return 0
return func(map(int, m.groups()))
def ParseTime(s):
return TryTime(s, r'([0-9]+)s?$', lambda m: m[0]) \
or TryTime(s, r'([0-9]+)m$', lambda m: m[0] * 60) \
or TryTime(s, r'([0-9]+)[m:]([0-9]+)[ms]?$', lambda m: m[0] * 60 + m[1]) \
or TryTime(s, r'([0-9]+)[h:]([0-9]+)[hm]?$', lambda m: m[0] * 3600 + m[1] * 60) \
or TryTime(s, r'([0-9]+)[h:]([0-9]+)[m:]([0-9]+)s?$', lambda m: m[0] * 3600 + m[1] * 60 + m[2])
def opterr(msg):
print >>sys.stderr, "command line parse error:", msg
print >>sys.stderr, "use `%s -h' to get help" % sys.argv[0]
print >>sys.stderr, "or visit", __website__, "for full documentation"
sys.exit(2)
def SetTransitions(list):
global AvailableTransitions
index = dict([(tc.__name__.lower(), tc) for tc in AllTransitions])
index['none'] = None
AvailableTransitions=[]
for trans in list.split(','):
try:
AvailableTransitions.append(index[trans.lower()])
except KeyError:
opterr("unknown transition `%s'" % trans)
def ParseLayoutPosition(value):
xpos = []
ypos = []
for c in value.strip().lower():
if c == 't': ypos.append(0)
elif c == 'b': ypos.append(1)
elif c == 'l': xpos.append(0)
elif c == 'r': xpos.append(1)
elif c == 'c': xpos.append(2)
else: opterr("invalid position specification `%s'" % value)
if not xpos: opterr("position `%s' lacks X component" % value)
if not ypos: opterr("position `%s' lacks Y component" % value)
if len(xpos)>1: opterr("position `%s' has multiple X components" % value)
if len(ypos)>1: opterr("position `%s' has multiple Y components" % value)
return (xpos[0] << 1) | ypos[0]
def SetLayoutSubSpec(key, value):
global OSDTimePos, OSDTitlePos, OSDPagePos, OSDStatusPos
global OSDAlpha, OSDMargin
lkey = key.strip().lower()
if lkey in ('a', 'alpha', 'opacity'):
try:
OSDAlpha = float(value)
except ValueError:
opterr("invalid alpha value `%s'" % value)
if OSDAlpha > 1.0:
OSDAlpha *= 0.01 # accept percentages, too
if (OSDAlpha < 0.0) or (OSDAlpha > 1.0):
opterr("alpha value %s out of range" % value)
elif lkey in ('margin', 'dist', 'distance'):
try:
OSDMargin = float(value)
except ValueError:
opterr("invalid margin value `%s'" % value)
if OSDMargin < 0:
opterr("margin value %s out of range" % value)
elif lkey in ('t', 'time'):
OSDTimePos = ParseLayoutPosition(value)
elif lkey in ('title', 'caption'):
OSDTitlePos = ParseLayoutPosition(value)
elif lkey in ('page', 'number'):
OSDPagePos = ParseLayoutPosition(value)
elif lkey in ('status', 'info'):
OSDStatusPos = ParseLayoutPosition(value)
else:
opterr("unknown layout element `%s'" % key)
def SetLayout(spec):
for sub in spec.replace(':', '=').split(','):
try:
key, value = sub.split('=')
except ValueError:
opterr("invalid layout spec `%s'" % sub)
SetLayoutSubSpec(key, value)
def ParseCacheMode(arg):
arg = arg.strip().lower()
if "none".startswith(arg): return NoCache
if "off".startswith(arg): return NoCache
if "memory".startswith(arg): return MemCache
if "disk".startswith(arg): return FileCache
if "file".startswith(arg): return FileCache
if "persistent".startswith(arg): return PersistentCache
opterr("invalid cache mode `%s'" % arg)
def ParseAutoOverview(arg):
arg = arg.strip().lower()
if "off".startswith(arg): return Off
if "first".startswith(arg): return First
if "last".startswith(arg): return Last
try:
i = int(arg)
assert (i >= Off) and (i <= Last)
except:
opterr("invalid auto-overview mode `%s'" % arg)
def ParseOptions(argv):
global FileName, FileList, Fullscreen, Scaling, Supersample, CacheMode
global TransitionDuration, MouseHideDelay, BoxFadeDuration, ZoomDuration
global ScreenWidth, ScreenHeight, MeshResX, MeshResY, InitialPage, Wrap
global AutoAdvance, RenderToDirectory, Rotation, AllowExtensions, DAR
global BackgroundRendering, UseAutoScreenSize, PollInterval, CacheFileName
global PageRangeStart, PageRangeEnd, FontList, FontSize, Gamma, BlackLevel
global EstimatedDuration, CursorImage, CursorHotspot, MinutesOnly
global GhostScriptPath, pdftoppmPath, UseGhostScript, InfoScriptPath
global AutoOverview
global DualHead, ProjectionFrame, PrompterNextFrame, PrompterCurrentFrame, PrompterWholeFrame, WholeWindow
try: # unused short options: jknqvxyzEHJKNQUVWXY
opts, args = getopt.getopt(argv, \
"hfg:sc:i:wa:t:lo:r:T:D:B:Z:P:R:eA:mbp:u:F:S:G:d:C:ML:I:O:", \
["help", "fullscreen", "geometry=", "scale", "supersample", \
"nocache", "initialpage=", "wrap", "auto", "listtrans", "output=", \
"rotate=", "transition=", "transtime=", "mousedelay=", "boxfade=", \
"zoom=", "gspath=", "meshres=", "noext", "aspect", "memcache", \
"noback", "pages=", "poll=", "font=", "fontsize=", "gamma=",
"duration=", "cursor=", "minutes", "layout=", "script=", "cache=",
"cachefile=", "autooverview=", "dual-head="])
except getopt.GetoptError, message:
opterr(message)
for opt, arg in opts:
if opt in ("-h", "--help"):
HelpExit()
if opt in ("-l", "--listtrans"):
ListTransitions()
if opt in ("-f", "--fullscreen"):
Fullscreen = not(Fullscreen)
if opt in ("-e", "--noext"):
AllowExtensions = not(AllowExtensions)
if opt in ("-s", "--scale"):
Scaling = not(Scaling)
if opt in ("-s", "--supersample"):
Supersample = 2
if opt in ("-w", "--wrap"):
Wrap = not(Wrap)
if opt in ("-O", "--autooverview"):
AutoOverview = ParseAutoOverview(arg)
if opt in ("-c", "--cache"):
CacheMode = ParseCacheMode(arg)
if opt == "--nocache":
print >>sys.stderr, "Note: The `--nocache' option is deprecated, use `--cache none' instead."
CacheMode = NoCache
if opt in ("-m", "--memcache"):
print >>sys.stderr, "Note: The `--memcache' option is deprecated, use `--cache memory' instead."
CacheMode = MemCache
if opt == "--cachefile":
CacheFileName = arg
CacheMode = PersistentCache
if opt in ("-M", "--minutes"):
MinutesOnly = not(MinutesOnly)
if opt in ("-b", "--noback"):
BackgroundRendering = not(BackgroundRendering)
if opt in ("-t", "--transition"):
SetTransitions(arg)
if opt in ("-L", "--layout"):
SetLayout(arg)
if opt in ("-o", "--output"):
RenderToDirectory = arg
if opt in ("-I", "--script"):
InfoScriptPath = arg
if opt in ("-F", "--font"):
FontList = [arg]
if opt in ("-P", "--gspath"):
UseGhostScript = (arg.replace("\\", "/").split("/")[-1].lower().find("pdftoppm") < 0)
if UseGhostScript:
GhostScriptPath = arg
else:
pdftoppmPath = arg
if opt in ("-S", "--fontsize"):
try:
FontSize = int(arg)
assert FontSize > 0
except:
opterr("invalid parameter for --fontsize")
if opt in ("-i", "--initialpage"):
try:
InitialPage = int(arg)
assert InitialPage > 0
except:
opterr("invalid parameter for --initialpage")
if opt in ("-d", "--duration"):
try:
EstimatedDuration = ParseTime(arg)
assert EstimatedDuration > 0
except:
opterr("invalid parameter for --duration")
if opt in ("-a", "--auto"):
try:
AutoAdvance = int(arg) * 1000
assert (AutoAdvance > 0) and (AutoAdvance <= 86400000)
except:
opterr("invalid parameter for --auto")
if opt in ("-T", "--transtime"):
try:
TransitionDuration = int(arg)
assert (TransitionDuration >= 0) and (TransitionDuration < 32768)
except:
opterr("invalid parameter for --transtime")
if opt in ("-D", "--mousedelay"):
try:
MouseHideDelay = int(arg)
assert (MouseHideDelay >= 0) and (MouseHideDelay < 32768)
except:
opterr("invalid parameter for --mousedelay")
if opt in ("-B", "--boxfade"):
try:
BoxFadeDuration = int(arg)
assert (BoxFadeDuration >= 0) and (BoxFadeDuration < 32768)
except:
opterr("invalid parameter for --boxfade")
if opt in ("-Z", "--zoom"):
try:
ZoomDuration = int(arg)
assert (ZoomDuration >= 0) and (ZoomDuration < 32768)
except:
opterr("invalid parameter for --zoom")
if opt in ("-r", "--rotate"):
try:
Rotation = int(arg)
except:
opterr("invalid parameter for --rotate")
while Rotation < 0: Rotation += 4
Rotation = Rotation & 3
if opt in ("-u", "--poll"):
try:
PollInterval = int(arg)
assert PollInterval >= 0
except:
opterr("invalid parameter for --poll")
if opt in ("-g", "--geometry"):
try:
ScreenWidth, ScreenHeight = map(int, arg.split("x"))
assert (ScreenWidth >= 320) and (ScreenWidth < 4096)
assert (ScreenHeight >= 200) and (ScreenHeight < 4096)
UseAutoScreenSize = False
except:
opterr("invalid parameter for --geometry")
if opt in ("--dual-head"):
try:
DualHead = True
if arg == None:
pass # TODO: run xrandr -q to automatically recognize
else:
projection, prompter = arg.split(",")
ProjectionFrame = FrameCoordinates.parse(projection)
PrompterWholeFrame = FrameCoordinates.parse(prompter)
print "ProjectionFrame: ", ProjectionFrame
print "PrompterWholeFrame: ", PrompterWholeFrame
prompter_width = PrompterWholeFrame.width*9/10/2
PrompterCurrentFrame = FrameCoordinates(
prompter_width, PrompterWholeFrame.height)
PrompterNextFrame = FrameCoordinates(
prompter_width, PrompterCurrentFrame.height,
PrompterWholeFrame.width-PrompterCurrentFrame.width)
PrompterCurrentFrame.adjust_to_aspect_ratio((4,3), (5,3), (0,1))
PrompterNextFrame.adjust_to_aspect_ratio((4,3), (5,3), (1,0))
UseAutoScreenSize = True
except:
opterr("invalid parameter for --dual-head")
if opt in ("-R", "--meshres"):
try:
MeshResX, MeshResY = map(int, arg.split("x"))
assert (MeshResX > 0) and (MeshResX <= ScreenWidth)
assert (MeshResY > 0) and (MeshResY <= ScreenHeight)
except:
opterr("invalid parameter for --meshres")
if opt in ("-p", "--pages"):
try:
PageRangeStart, PageRangeEnd = map(int, arg.split("-"))
assert PageRangeStart > 0
assert PageRangeStart <= PageRangeEnd
except:
opterr("invalid parameter for --pages")
InitialPage=PageRangeStart
if opt in ("-A", "--aspect"):
try:
if ':' in arg:
fx, fy = map(float, arg.split(':'))
DAR = fx / fy
else:
DAR = float(arg)
assert DAR > 0.0
except:
opterr("invalid parameter for --aspect")
if opt in ("-G", "--gamma"):
try:
if ':' in arg:
arg, bl = arg.split(':', 1)
BlackLevel = int(bl)
Gamma = float(arg)
assert Gamma > 0.0
assert (BlackLevel >= 0) and (BlackLevel < 255)
except:
opterr("invalid parameter for --gamma")
if opt in ("-C", "--cursor"):
try:
if ':' in arg:
arg = arg.split(':')
assert len(arg) > 1
CursorImage = ':'.join(arg[:-1])
CursorHotspot = map(int, arg[-1].split(','))
else:
CursorImage = arg
assert (BlackLevel >= 0) and (BlackLevel < 255)
except:
opterr("invalid parameter for --cursor")
for arg in args:
AddFile(arg)
if not FileList:
opterr("no playable files specified")
return
# glob and filter argument list
files = []
for arg in args:
files.extend(glob.glob(arg))
files = list(filter(IsPlayable, files))
# if only one argument is specified, use it as the informal file name
if len(files) == 1:
FileName = files[0]
else:
FileName = ""
# construct final FileList by expanding directories to image file lists
FileList = []
for item in files:
if os.path.isdir(item):
images = [os.path.join(item, f) for f in os.listdir(item) if IsImageFileName(f)]
images.sort(lambda a, b: cmp(a.lower(), b.lower()))
FileList.extend(images)
else:
FileList.append(item)
if not FileList:
opterr("no playable files specified")
# use this function if you intend to use Impressive as a library
def run():
try:
run_main()
except SystemExit, e:
return e.code
if __name__=="__main__":
ParseOptions(sys.argv[1:])
run_main()
|
geekq/impressive
|
impressive.py
|
Python
|
gpl-2.0
| 160,901
|
[
"VisIt"
] |
fffdc6fcb8de76d7e57df3f20f54f13ceb38cc738f306a65ea11b34faacf2cb4
|
import time
from netCDF4 import Dataset
from oceansar.ocs_io.netcdf import NETCDFHandler
class SkimRawFile(NETCDFHandler):
""" Raw data file generated by the OASIS Simulator
:param file_name: File name
:param mode: Access mode (w = write, r = read, r+ = read + append)
:param raw_dim: Raw data dimensions
:param format: netCDF format
.. note::
Refer to netCDF4 Python library for details on access mode and
available formats
"""
def __init__(self, file_name, mode, raw_dim=None, format='NETCDF4'):
self.__file__ = Dataset(file_name, mode, format)
# If writing, define file
if mode == 'w':
# Set file attributes
self.__file__.description = 'OCEANSAR SAR Raw Data File'
self.__file__.history = 'Created ' + time.ctime(time.time())
self.__file__.source = 'OCEANSAR Simulator'
# Dimensions
if not raw_dim:
raise ValueError('Raw data dimensions are needed when creating a new file!')
# For SKIM, not very nice to decide this like this, but ok
self.__file__.createDimension('pol_dim', raw_dim[0])
self.__file__.createDimension('az_dim', raw_dim[1])
self.__file__.createDimension('rg_dim', raw_dim[2])
raw_data_r = self.__file__.createVariable('raw_data_r',
'f8',
('pol_dim',
'az_dim',
'rg_dim'))
raw_data_i = self.__file__.createVariable('raw_data_i',
'f8',
('pol_dim',
'az_dim',
'rg_dim'))
dop_ref = self.__file__.createVariable('dop_ref', 'f8', ('rg_dim',))
dop_ref.units = '[Hz]'
# Variables
inc_angle = self.__file__.createVariable('inc_angle', 'f8')
inc_angle.units = '[deg]'
f0 = self.__file__.createVariable('f0', 'f8')
f0.units = '[Hz]'
ant_L = self.__file__.createVariable('ant_L', 'f8')
ant_L.units = '[m]'
prf = self.__file__.createVariable('prf', 'f8')
prf.units = '[Hz]'
v_ground = self.__file__.createVariable('v_ground', 'f8')
v_ground.units = '[m/s]'
orbit_alt = self.__file__.createVariable('orbit_alt', 'f8')
orbit_alt.units = '[m]'
sr0 = self.__file__.createVariable('sr0', 'f8')
sr0.units = '[m]'
rg_sampling = self.__file__.createVariable('rg_sampling', 'f8')
rg_sampling.units = '[Hz]'
rg_bw = self.__file__.createVariable('rg_bw', 'f8')
rg_bw.units = '[Hz]'
azimuth = self.__file__.createVariable('azimuth', 'f8')
azimuth.units = '[deg]'
raw_data_r.units = '[]'
raw_data_i.units = '[]'
NRCS_avg = self.__file__.createVariable('NRCS_avg',
'f8',
('pol_dim', 'az_dim'))
NRCS_avg.units = '[]'
class RawFile(NETCDFHandler):
""" Raw data file generated by the OASIS Simulator
:param file_name: File name
:param mode: Access mode (w = write, r = read, r+ = read + append)
:param raw_dim: Raw data dimensions
:param format: netCDF format
.. note::
Refer to netCDF4 Python library for details on access mode and
available formats
"""
def __init__(self, file_name, mode, raw_dim=None, format='NETCDF4'):
self.__file__ = Dataset(file_name, mode, format)
# If writing, define file
if mode == 'w':
# Set file attributes
self.__file__.description = 'OCEANSAR SAR Raw Data File'
self.__file__.history = 'Created ' + time.ctime(time.time())
self.__file__.source = 'OCEANSAR Simulator'
# Dimensions
if not raw_dim:
raise ValueError('Raw data dimensions are needed when creating a new file!')
if len(raw_dim) == 4:
self.__file__.createDimension('pol_dim', raw_dim[0])
self.__file__.createDimension('ch_dim', raw_dim[1])
self.__file__.createDimension('az_dim', raw_dim[2])
self.__file__.createDimension('rg_dim', raw_dim[3])
num_ch = self.__file__.createVariable('num_ch', 'i4')
num_ch.units = '[]'
raw_data_r = self.__file__.createVariable('raw_data_r',
'f8',
('pol_dim',
'ch_dim',
'az_dim',
'rg_dim'))
raw_data_i = self.__file__.createVariable('raw_data_i',
'f8',
('pol_dim',
'ch_dim',
'az_dim',
'rg_dim'))
else:
# For SKIM, not very nice to decide this like this, but ok
self.__file__.createDimension('pol_dim', raw_dim[0])
self.__file__.createDimension('az_dim', raw_dim[1])
self.__file__.createDimension('rg_dim', raw_dim[2])
raw_data_r = self.__file__.createVariable('raw_data_r',
'f8',
('pol_dim',
'az_dim',
'rg_dim'))
raw_data_i = self.__file__.createVariable('raw_data_i',
'f8',
('pol_dim',
'az_dim',
'rg_dim'))
# Variables
inc_angle = self.__file__.createVariable('inc_angle', 'f8')
inc_angle.units = '[deg]'
f0 = self.__file__.createVariable('f0', 'f8')
f0.units = '[Hz]'
ant_L = self.__file__.createVariable('ant_L', 'f8')
ant_L.units = '[m]'
prf = self.__file__.createVariable('prf', 'f8')
prf.units = '[Hz]'
v_ground = self.__file__.createVariable('v_ground', 'f8')
v_ground.units = '[m/s]'
orbit_alt = self.__file__.createVariable('orbit_alt', 'f8')
orbit_alt.units = '[m]'
sr0 = self.__file__.createVariable('sr0', 'f8')
sr0.units = '[m]'
rg_sampling = self.__file__.createVariable('rg_sampling', 'f8')
rg_sampling.units = '[Hz]'
rg_bw = self.__file__.createVariable('rg_bw', 'f8')
rg_bw.units = '[Hz]'
b_ati = self.__file__.createVariable('b_ati', 'f8', 'ch_dim')
b_ati.units = '[m]'
b_xti = self.__file__.createVariable('b_xti', 'f8', 'ch_dim')
b_xti.units = '[m]'
raw_data_r.units = '[]'
raw_data_i.units = '[]'
NRCS_avg = self.__file__.createVariable('NRCS_avg',
'f8',
('pol_dim', 'az_dim'))
NRCS_avg.units = '[]'
|
pakodekker/oceansar
|
oceansar/ocs_io/raw.py
|
Python
|
gpl-3.0
| 8,090
|
[
"NetCDF"
] |
0a0c094ae5e647fa77ab22e6158ab61854ddcfa6a7587247f7ebd64606cf787a
|
#
# Copyright 2008 The ndb Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Example from "Writing Property Subclasses".
from ndb import *
# Pay no attention to the testbed behind the curtain.
from google.appengine.ext import testbed
tb = testbed.Testbed()
tb.activate()
tb.init_datastore_v3_stub()
tb.init_memcache_stub()
from datetime import date
class FuzzyDate(object):
def __init__(self, first, last=None):
assert isinstance(first, date)
assert last is None or isinstance(last, date)
self.first = first
self.last = last or first
def __repr__(self):
return 'FuzzyDate(%r, %r)' % (self.first, self.last)
class FuzzyDateModel(Model):
first = DateProperty()
last = DateProperty()
class FuzzyDateProperty(StructuredProperty):
def __init__(self, **kwds):
super(FuzzyDateProperty, self).__init__(FuzzyDateModel, **kwds)
def _validate(self, value):
assert isinstance(value, FuzzyDate)
def _to_base_type(self, value):
return FuzzyDateModel(first=value.first, last=value.last)
def _from_base_type(self, value):
return FuzzyDate(value.first, value.last)
# Class to record historic people and events in their life.
class HistoricPerson(Model):
name = StringProperty()
birth = FuzzyDateProperty()
death = FuzzyDateProperty()
# Parallel lists:
event_dates = FuzzyDateProperty(repeated=True)
event_names = StringProperty(repeated=True)
# Record Christopher Columbus.
columbus = HistoricPerson(
name='Christopher Columbus',
birth=FuzzyDate(date(1451, 8, 22), date(1451, 10, 31)),
death=FuzzyDate(date(1506, 5, 20)),
event_dates=[FuzzyDate(date(1492, 1, 1), date(1492, 12, 31))],
event_names=['Discovery of America'])
columbus.put()
# Query for historic people born no later than 1451.
q = HistoricPerson.query(HistoricPerson.birth.last <= date(1451, 12, 31))
print q.fetch()
|
GoogleCloudPlatform/datastore-ndb-python
|
samples/columbus.py
|
Python
|
apache-2.0
| 2,388
|
[
"COLUMBUS"
] |
2189fc93c1f0273aa804882fcdd2d16f50b99278d5f5abd82eb900f7719d48a1
|
import string
import AST
from Memory import *
from Exceptions import *
from visit import *
class Interpreter(object):
def __init__(self):
self.memory_stack = MemoryStack()
@on('node')
def visit(self, node, create_memory=True):
pass
@when(AST.BinExpr)
def visit(self, node, create_memory=True):
l = node.left.accept(self)
r = node.right.accept(self)
op = node.op
return eval("a" + op + "b", {"a": l, "b": r})
@when(AST.ParenExpr)
def visit(self, node, create_memory=True):
return node.expression.accept(self)
@when(AST.WhileInstr)
def visit(self, node, create_memory=True):
while node.cond.accept(self):
try:
node.instr.accept(self)
except ContinueException:
pass
except BreakException:
break
@when(AST.RepeatInstr)
def visit(self, node, create_memory=True):
while True:
try:
node.instrs.accept(self)
except BreakException:
break
except ContinueException:
pass
if node.cond.accept(self):
break
@when(AST.IfInstr)
def visit(self, node, create_memory=True):
if node.cond.accept(self):
return node.instr.accept(self)
@when(AST.IfElseInstr)
def visit(self, node, create_memory=True):
if node.cond.accept(self):
return node.instr.accept(self)
else:
return node.elseinstr.accept(self)
@when(AST.ExprList)
def visit(self, node, create_memory=True):
for expr in node.expr_list:
expr.accept(self)
@when(AST.Instructions)
def visit(self, node, create_memory=True):
for instr in node.instructions:
instr.accept(self)
@when(AST.CompoundInstr)
def visit(self, node, create_memory=True):
if create_memory:
self.memory_stack.push(Memory("inner"))
node.decls.accept(self)
try:
node.instrs.accept(self)
finally:
if create_memory:
self.memory_stack.pop()
@when(AST.Fundef)
def visit(self, node, create_memory=True):
self.memory_stack.peek().put(node.id, node)
@when(AST.Funcall)
def visit(self, node, create_memory=True):
fun = self.memory_stack.get(node.id)
fun_memory = Memory(node.id)
if node.expr_list is not None:
for arg_expression, actual_arg in zip(node.expr_list.expr_list, fun.args_list.arg_list):
arg = actual_arg.accept(self)
expr = arg_expression.accept(self)
fun_memory.put(arg, expr)
self.memory_stack.push(fun_memory)
try:
fun.comp_instr.accept(self, False)
except ReturnValueException as e:
return e.value
finally:
self.memory_stack.pop()
@when(AST.Arg)
def visit(self, node, create_memory=True):
return node.idd
@when(AST.ArgList)
def visit(self, node, create_memory=True):
for arg in node.arg_list:
arg.accept(self)
@when(AST.Assignment)
def visit(self, node, create_memory=True):
expr = node.expr.accept(self)
self.memory_stack.set(node.var, expr)
return expr
@when(AST.BreakInstr)
def visit(self, node, create_memory=True):
raise BreakException()
@when(AST.ContinueInstr)
def visit(self, node, create_memory=True):
raise ContinueException()
@when(AST.Declaration)
def visit(self, node, create_memory=True):
node.inits.accept(self)
@when(AST.Declarations)
def visit(self, node, create_memory=True):
for declaration in node.declarations:
declaration.accept(self)
@when(AST.Init)
def visit(self, node, create_memory=True):
expr = node.expression.accept(self)
self.memory_stack.peek().put(node.var_name, expr)
return expr
@when(AST.Inits)
def visit(self, node, create_memory=True):
for init in node.inits:
init.accept(self)
@when(AST.LabeledInstruction)
def visit(self, node, create_memory=True):
return node.instr.accept(self)
@when(AST.Integer)
def visit(self, node, create_memory=True):
return int(node.val)
@when(AST.Float)
def visit(self, node, create_memory=True):
return float(node.val)
@when(AST.String)
def visit(self, node, create_memory=True):
return node.val
@when(AST.PrintInstr)
def visit(self, node, create_memory=True):
to_print = str(node.to_print.accept(self))
while to_print[0] == '"' and to_print[-1] == '"':
to_print = string.replace(string.replace(to_print, r'"', "", 1), r'"', "", -1)
print to_print
@when(AST.ReturnInstr)
def visit(self, node, create_memory=True):
raise ReturnValueException(node.expr.accept(self))
@when(AST.Variable)
def visit(self, node, create_memory=True):
return self.memory_stack.get(node.id)
@when(AST.FundefList)
def visit(self, node, create_memory=True):
for fundef in node.fundef_list:
fundef.accept(self)
@when(AST.Program)
def visit(self, node, create_memory=True):
node.declarations.accept(self)
node.fundefs.accept(self)
node.instructions.accept(self)
|
salceson/kompilatory
|
lab4/Interpreter.py
|
Python
|
mit
| 5,461
|
[
"VisIt"
] |
dbd30f92e8c6a64fbebd1024a76489b64dfe2f4e5d1f88656f96f70d75a65fd3
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Wrappers for primitive Neural Net (NN) Operations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import numbers
import os
import numpy as np
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import graph_util
from tensorflow.python.framework import ops
from tensorflow.python.framework import random_seed
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import gen_math_ops
from tensorflow.python.ops import gen_nn_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import variables as variables_lib
# go/tf-wildcard-import
# pylint: disable=wildcard-import
from tensorflow.python.ops.gen_nn_ops import *
# pylint: enable=wildcard-import
from tensorflow.python.platform import device_context
from tensorflow.python.util import deprecation
from tensorflow.python.util import dispatch
from tensorflow.python.util.compat import collections_abc
from tensorflow.python.util.deprecation import deprecated_args
from tensorflow.python.util.deprecation import deprecated_argument_lookup
from tensorflow.python.util.tf_export import tf_export
# Aliases for some automatically-generated names.
local_response_normalization = gen_nn_ops.lrn
# pylint: disable=protected-access
# Acceptable channels last formats (robust to H, W, D order).
_CHANNELS_LAST_FORMATS = frozenset({
"NWC", "NHC", "NHWC", "NWHC", "NDHWC", "NDWHC", "NHDWC", "NHWDC", "NWDHC",
"NWHDC"
})
def _get_sequence(value, n, channel_index, name):
"""Formats a value input for gen_nn_ops."""
# Performance is fast-pathed for common cases:
# `None`, `list`, `tuple` and `int`.
if value is None:
return [1] * (n + 2)
# Always convert `value` to a `list`.
if isinstance(value, list):
pass
elif isinstance(value, tuple):
value = list(value)
elif isinstance(value, int):
value = [value]
elif not isinstance(value, collections_abc.Sized):
value = [value]
else:
value = list(value) # Try casting to a list.
len_value = len(value)
# Fully specified, including batch and channel dims.
if len_value == n + 2:
return value
# Apply value to spatial dims only.
if len_value == 1:
value = value * n # Broadcast to spatial dimensions.
elif len_value != n:
raise ValueError("{} should be of length 1, {} or {} but was {}".format(
name, n, n + 2, len_value))
# Add batch and channel dims (always 1).
if channel_index == 1:
return [1, 1] + value
else:
return [1] + value + [1]
def _non_atrous_convolution(
input, # pylint: disable=redefined-builtin
filter, # pylint: disable=redefined-builtin
padding,
data_format=None, # pylint: disable=redefined-builtin
strides=None,
name=None):
"""Computes sums of N-D convolutions (actually cross correlation).
It is required that 1 <= N <= 3.
This is used to implement the more generic `convolution` function, which
extends the interface of this function with a `dilation_rate` parameter.
Args:
input: Rank N+2 tensor of type T of shape
`[batch_size] + input_spatial_shape + [in_channels]` if `data_format`
does not start with `"NC"`, or
`[batch_size, in_channels] + input_spatial_shape` if `data_format` starts
with `"NC"`.
filter: Rank N+2 tensor of type T of shape
`filter_spatial_shape + [in_channels, out_channels]`. Rank of either
`input` or `filter` must be known.
padding: Padding method to use, must be either "VALID" or "SAME".
data_format: A string or None. Specifies whether the channel dimension of
the `input` and output is the last dimension (default, or if `data_format`
does not start with "NC"), or the second dimension (if `data_format`
starts with "NC"). For N=1, the valid values are "NWC" (default) and
"NCW". For N=2, the valid values are "NHWC" (default) and "NCHW".
For N=3, the valid values are "NDHWC" (default) and "NCDHW".
strides: Sequence of N positive integers, defaults to `[1] * N`.
name: Name prefix to use.
Returns:
Rank N+2 tensor of type T of shape
`[batch_size] + output_spatial_shape + [out_channels]`, where
if padding == "SAME":
output_spatial_shape = input_spatial_shape
if padding == "VALID":
output_spatial_shape = input_spatial_shape - filter_spatial_shape + 1.
Raises:
ValueError: if ranks are incompatible.
"""
with ops.name_scope(name, "non_atrous_convolution", [input, filter]) as scope:
input = ops.convert_to_tensor(input, name="input") # pylint: disable=redefined-builtin
input_shape = input.shape
filter = ops.convert_to_tensor(filter, name="filter") # pylint: disable=redefined-builtin
filter_shape = filter.shape
op = _NonAtrousConvolution(
input_shape,
filter_shape=filter_shape,
padding=padding,
data_format=data_format,
strides=strides,
name=scope)
return op(input, filter)
class _NonAtrousConvolution(object):
"""Helper class for _non_atrous_convolution.
Note that this class assumes that shapes of input and filter passed to
`__call__` are compatible with `input_shape` and filter_shape passed to the
constructor.
Args:
input_shape: static input shape, i.e. input.shape.
filter_shape: static filter shape, i.e. filter.shape.
padding: see _non_atrous_convolution.
data_format: see _non_atrous_convolution.
strides: see _non_atrous_convolution.
name: see _non_atrous_convolution.
num_batch_dims: (Optional.) The number of batch dimensions in the input;
if not provided, the default of `1` is used.
"""
def __init__(
self,
input_shape,
filter_shape,
padding,
data_format=None,
strides=None,
name=None,
num_batch_dims=1):
# filter shape is always rank num_spatial_dims + 2
# and num_spatial_dims == input_shape.ndims - num_batch_dims - 1
if input_shape.ndims is not None:
filter_shape = filter_shape.with_rank(
input_shape.ndims - num_batch_dims + 1)
self.padding = padding
self.name = name
# input shape is == num_spatial_dims + num_batch_dims + 1
# and filter_shape is always rank num_spatial_dims + 2
if filter_shape.ndims is not None:
input_shape = input_shape.with_rank(
filter_shape.ndims + num_batch_dims - 1)
if input_shape.ndims is None:
raise ValueError(
"Rank of convolution must be known, but saw input_shape.ndims == {}"
.format(input_shape.ndims))
if input_shape.ndims < 3 or input_shape.ndims - num_batch_dims + 1 > 5:
raise ValueError(
"`input_shape.ndims - num_batch_dims + 1` must be at least 3 and at "
"most 5 but saw `input_shape.ndims == {}` and `num_batch_dims == {}`"
.format(input_shape.ndims, num_batch_dims))
conv_dims = input_shape.ndims - num_batch_dims - 1
if strides is None:
strides = [1] * conv_dims
elif len(strides) != conv_dims:
raise ValueError("len(strides)=%d, but should be %d" % (len(strides),
conv_dims))
if conv_dims == 1:
# conv1d uses the 2-d data format names
if data_format is None:
data_format = "NWC"
elif data_format not in {"NCW", "NWC", "NCHW", "NHWC"}:
raise ValueError("data_format must be \"NWC\" or \"NCW\".")
self.strides = strides[0]
self.data_format = data_format
self.conv_op = self._conv1d
elif conv_dims == 2:
if data_format is None or data_format == "NHWC":
data_format = "NHWC"
strides = [1] + list(strides) + [1]
elif data_format == "NCHW":
strides = [1, 1] + list(strides)
else:
raise ValueError("data_format must be \"NHWC\" or \"NCHW\".")
self.strides = strides
self.data_format = data_format
self.conv_op = conv2d
elif conv_dims == 3:
if data_format is None or data_format == "NDHWC":
strides = [1] + list(strides) + [1]
elif data_format == "NCDHW":
strides = [1, 1] + list(strides)
else:
raise ValueError("data_format must be \"NDHWC\" or \"NCDHW\". Have: %s"
% data_format)
self.strides = strides
self.data_format = data_format
self.conv_op = _conv3d_expanded_batch
# Note that we need this adapter since argument names for conv1d don't match
# those for gen_nn_ops.conv2d and gen_nn_ops.conv3d.
# pylint: disable=redefined-builtin
def _conv1d(self, input, filter, strides, padding, data_format, name):
return conv1d(
value=input,
filters=filter,
stride=strides,
padding=padding,
data_format=data_format,
name=name)
# pylint: enable=redefined-builtin
def __call__(self, inp, filter): # pylint: disable=redefined-builtin
return self.conv_op(
input=inp,
filter=filter,
strides=self.strides,
padding=self.padding,
data_format=self.data_format,
name=self.name)
def squeeze_batch_dims(inp, op, inner_rank, name=None):
"""Returns `unsqueeze_batch(op(squeeze_batch(inp)))`.
Where `squeeze_batch` reshapes `inp` to shape
`[prod(inp.shape[:-inner_rank])] + inp.shape[-inner_rank:]`
and `unsqueeze_batch` does the reverse reshape but on the output.
Args:
inp: A tensor with dims `batch_shape + inner_shape` where `inner_shape`
is length `inner_rank`.
op: A callable that takes a single input tensor and returns a single.
output tensor.
inner_rank: A python integer.
name: A string.
Returns:
`unsqueeze_batch_op(squeeze_batch(inp))`.
"""
with ops.name_scope(name, "squeeze_batch_dims", [inp]):
inp = ops.convert_to_tensor(inp, name="input")
shape = inp.shape
inner_shape = shape[-inner_rank:]
if not inner_shape.is_fully_defined():
inner_shape = array_ops.shape(inp)[-inner_rank:]
batch_shape = shape[:-inner_rank]
if not batch_shape.is_fully_defined():
batch_shape = array_ops.shape(inp)[:-inner_rank]
if isinstance(inner_shape, tensor_shape.TensorShape):
inp_reshaped = array_ops.reshape(inp, [-1] + inner_shape.as_list())
else:
inp_reshaped = array_ops.reshape(
inp, array_ops.concat(([-1], inner_shape), axis=-1))
out_reshaped = op(inp_reshaped)
out_inner_shape = out_reshaped.shape[-inner_rank:]
if not out_inner_shape.is_fully_defined():
out_inner_shape = array_ops.shape(out_reshaped)[-inner_rank:]
out = array_ops.reshape(
out_reshaped, array_ops.concat((batch_shape, out_inner_shape), axis=-1))
out.set_shape(inp.shape[:-inner_rank] + out.shape[-inner_rank:])
return out
@tf_export("nn.dilation2d", v1=[])
@dispatch.add_dispatch_support
def dilation2d_v2(
input, # pylint: disable=redefined-builtin
filters, # pylint: disable=redefined-builtin
strides,
padding,
data_format,
dilations,
name=None):
"""Computes the grayscale dilation of 4-D `input` and 3-D `filters` tensors.
The `input` tensor has shape `[batch, in_height, in_width, depth]` and the
`filters` tensor has shape `[filter_height, filter_width, depth]`, i.e., each
input channel is processed independently of the others with its own
structuring function. The `output` tensor has shape
`[batch, out_height, out_width, depth]`. The spatial dimensions of the output
tensor depend on the `padding` algorithm. We currently only support the
default "NHWC" `data_format`.
In detail, the grayscale morphological 2-D dilation is the max-sum correlation
(for consistency with `conv2d`, we use unmirrored filters):
output[b, y, x, c] =
max_{dy, dx} input[b,
strides[1] * y + rates[1] * dy,
strides[2] * x + rates[2] * dx,
c] +
filters[dy, dx, c]
Max-pooling is a special case when the filter has size equal to the pooling
kernel size and contains all zeros.
Note on duality: The dilation of `input` by the `filters` is equal to the
negation of the erosion of `-input` by the reflected `filters`.
Args:
input: A `Tensor`. Must be one of the following types: `float32`, `float64`,
`int32`, `uint8`, `int16`, `int8`, `int64`, `bfloat16`, `uint16`, `half`,
`uint32`, `uint64`.
4-D with shape `[batch, in_height, in_width, depth]`.
filters: A `Tensor`. Must have the same type as `input`.
3-D with shape `[filter_height, filter_width, depth]`.
strides: A list of `ints` that has length `>= 4`.
The stride of the sliding window for each dimension of the input
tensor. Must be: `[1, stride_height, stride_width, 1]`.
padding: A `string` from: `"SAME", "VALID"`.
The type of padding algorithm to use.
data_format: A `string`, only `"NHWC"` is currently supported.
dilations: A list of `ints` that has length `>= 4`.
The input stride for atrous morphological dilation. Must be:
`[1, rate_height, rate_width, 1]`.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `input`.
"""
if data_format != "NHWC":
raise ValueError("Data formats other than NHWC are not yet supported")
return gen_nn_ops.dilation2d(input=input,
filter=filters,
strides=strides,
rates=dilations,
padding=padding,
name=name)
@tf_export(v1=["nn.dilation2d"])
@dispatch.add_dispatch_support
def dilation2d_v1( # pylint: disable=missing-docstring
input, # pylint: disable=redefined-builtin
filter=None, # pylint: disable=redefined-builtin
strides=None,
rates=None,
padding=None,
name=None,
filters=None,
dilations=None):
filter = deprecated_argument_lookup("filters", filters, "filter", filter)
rates = deprecated_argument_lookup("dilations", dilations, "rates", rates)
return gen_nn_ops.dilation2d(input, filter, strides, rates, padding, name)
dilation2d_v1.__doc__ = gen_nn_ops.dilation2d.__doc__
@tf_export("nn.with_space_to_batch")
@dispatch.add_dispatch_support
def with_space_to_batch(
input, # pylint: disable=redefined-builtin
dilation_rate,
padding,
op,
filter_shape=None,
spatial_dims=None,
data_format=None):
"""Performs `op` on the space-to-batch representation of `input`.
This has the effect of transforming sliding window operations into the
corresponding "atrous" operation in which the input is sampled at the
specified `dilation_rate`.
In the special case that `dilation_rate` is uniformly 1, this simply returns:
op(input, num_spatial_dims, padding)
Otherwise, it returns:
batch_to_space_nd(
op(space_to_batch_nd(input, adjusted_dilation_rate, adjusted_paddings),
num_spatial_dims,
"VALID")
adjusted_dilation_rate,
adjusted_crops),
where:
adjusted_dilation_rate is an int64 tensor of shape [max(spatial_dims)],
adjusted_{paddings,crops} are int64 tensors of shape [max(spatial_dims), 2]
defined as follows:
We first define two int64 tensors `paddings` and `crops` of shape
`[num_spatial_dims, 2]` based on the value of `padding` and the spatial
dimensions of the `input`:
If `padding = "VALID"`, then:
paddings, crops = required_space_to_batch_paddings(
input_shape[spatial_dims],
dilation_rate)
If `padding = "SAME"`, then:
dilated_filter_shape =
filter_shape + (filter_shape - 1) * (dilation_rate - 1)
paddings, crops = required_space_to_batch_paddings(
input_shape[spatial_dims],
dilation_rate,
[(dilated_filter_shape - 1) // 2,
dilated_filter_shape - 1 - (dilated_filter_shape - 1) // 2])
Because `space_to_batch_nd` and `batch_to_space_nd` assume that the spatial
dimensions are contiguous starting at the second dimension, but the specified
`spatial_dims` may not be, we must adjust `dilation_rate`, `paddings` and
`crops` in order to be usable with these operations. For a given dimension,
if the block size is 1, and both the starting and ending padding and crop
amounts are 0, then space_to_batch_nd effectively leaves that dimension alone,
which is what is needed for dimensions not part of `spatial_dims`.
Furthermore, `space_to_batch_nd` and `batch_to_space_nd` handle this case
efficiently for any number of leading and trailing dimensions.
For 0 <= i < len(spatial_dims), we assign:
adjusted_dilation_rate[spatial_dims[i] - 1] = dilation_rate[i]
adjusted_paddings[spatial_dims[i] - 1, :] = paddings[i, :]
adjusted_crops[spatial_dims[i] - 1, :] = crops[i, :]
All unassigned values of `adjusted_dilation_rate` default to 1, while all
unassigned values of `adjusted_paddings` and `adjusted_crops` default to 0.
Note in the case that `dilation_rate` is not uniformly 1, specifying "VALID"
padding is equivalent to specifying `padding = "SAME"` with a filter_shape of
`[1]*N`.
Advanced usage. Note the following optimization: A sequence of
`with_space_to_batch` operations with identical (not uniformly 1)
`dilation_rate` parameters and "VALID" padding
net = with_space_to_batch(net, dilation_rate, "VALID", op_1)
...
net = with_space_to_batch(net, dilation_rate, "VALID", op_k)
can be combined into a single `with_space_to_batch` operation as follows:
def combined_op(converted_input, num_spatial_dims, _):
result = op_1(converted_input, num_spatial_dims, "VALID")
...
result = op_k(result, num_spatial_dims, "VALID")
net = with_space_to_batch(net, dilation_rate, "VALID", combined_op)
This eliminates the overhead of `k-1` calls to `space_to_batch_nd` and
`batch_to_space_nd`.
Similarly, a sequence of `with_space_to_batch` operations with identical (not
uniformly 1) `dilation_rate` parameters, "SAME" padding, and odd filter
dimensions
net = with_space_to_batch(net, dilation_rate, "SAME", op_1, filter_shape_1)
...
net = with_space_to_batch(net, dilation_rate, "SAME", op_k, filter_shape_k)
can be combined into a single `with_space_to_batch` operation as follows:
def combined_op(converted_input, num_spatial_dims, _):
result = op_1(converted_input, num_spatial_dims, "SAME")
...
result = op_k(result, num_spatial_dims, "SAME")
net = with_space_to_batch(net, dilation_rate, "VALID", combined_op)
Args:
input: Tensor of rank > max(spatial_dims).
dilation_rate: int32 Tensor of *known* shape [num_spatial_dims].
padding: str constant equal to "VALID" or "SAME"
op: Function that maps (input, num_spatial_dims, padding) -> output
filter_shape: If padding = "SAME", specifies the shape of the convolution
kernel/pooling window as an integer Tensor of shape [>=num_spatial_dims].
If padding = "VALID", filter_shape is ignored and need not be specified.
spatial_dims: Monotonically increasing sequence of `num_spatial_dims`
integers (which are >= 1) specifying the spatial dimensions of `input`
and output. Defaults to: `range(1, num_spatial_dims+1)`.
data_format: A string or None. Specifies whether the channel dimension of
the `input` and output is the last dimension (default, or if `data_format`
does not start with "NC"), or the second dimension (if `data_format`
starts with "NC"). For N=1, the valid values are "NWC" (default) and
"NCW". For N=2, the valid values are "NHWC" (default) and "NCHW".
For N=3, the valid values are "NDHWC" (default) and "NCDHW".
Returns:
The output Tensor as described above, dimensions will vary based on the op
provided.
Raises:
ValueError: if `padding` is invalid or the arguments are incompatible.
ValueError: if `spatial_dims` are invalid.
"""
input = ops.convert_to_tensor(input, name="input") # pylint: disable=redefined-builtin
input_shape = input.shape
def build_op(num_spatial_dims, padding):
return lambda inp, _: op(inp, num_spatial_dims, padding)
new_op = _WithSpaceToBatch(
input_shape,
dilation_rate,
padding,
build_op,
filter_shape=filter_shape,
spatial_dims=spatial_dims,
data_format=data_format)
return new_op(input, None)
class _WithSpaceToBatch(object):
"""Helper class for with_space_to_batch.
Note that this class assumes that shapes of input and filter passed to
`__call__` are compatible with `input_shape`, `filter_shape`, and
`spatial_dims` passed to the constructor.
Arguments
input_shape: static shape of input. i.e. input.shape.
dilation_rate: see `with_space_to_batch`.
padding: see `with_space_to_batch`.
build_op: Function that maps (num_spatial_dims, paddings) -> (function that
maps (input, filter) -> output).
filter_shape: see `with_space_to_batch`.
spatial_dims: `see with_space_to_batch`.
data_format: see `with_space_to_batch`.
num_batch_dims: (Optional). Number of batch dims in `input_shape`.
"""
def __init__(self,
input_shape,
dilation_rate,
padding,
build_op,
filter_shape=None,
spatial_dims=None,
data_format=None,
num_batch_dims=1):
"""Helper class for _with_space_to_batch."""
dilation_rate = ops.convert_to_tensor(
dilation_rate, dtypes.int32, name="dilation_rate")
if dilation_rate.shape.ndims not in (None, 1):
raise ValueError(
"rate must be rank 1 but saw {}".format(dilation_rate.shape.ndims))
if not dilation_rate.shape.is_fully_defined():
raise ValueError("rate must have known shape, but saw {}"
.format(dilation_rate.shape))
num_spatial_dims = dilation_rate.shape.dims[0].value
if data_format is not None and data_format.startswith("NC"):
starting_spatial_dim = num_batch_dims + 1
else:
starting_spatial_dim = num_batch_dims
if spatial_dims is None:
spatial_dims = range(starting_spatial_dim,
num_spatial_dims + starting_spatial_dim)
orig_spatial_dims = list(spatial_dims)
spatial_dims = sorted(set(int(x) for x in orig_spatial_dims))
if spatial_dims != orig_spatial_dims or any(x < 1 for x in spatial_dims):
raise ValueError(
"spatial_dims must be a monotonically increasing sequence of "
"positive integers, but saw: {}".format(orig_spatial_dims))
if data_format is not None and data_format.startswith("NC"):
expected_input_rank = spatial_dims[-1]
else:
expected_input_rank = spatial_dims[-1] + 1
try:
input_shape.with_rank_at_least(expected_input_rank)
except ValueError:
raise ValueError(
"input tensor must have rank at least {}, but saw rank {}"
.format(expected_input_rank, input_shape.ndims))
const_rate = tensor_util.constant_value(dilation_rate)
rate_or_const_rate = dilation_rate
if const_rate is not None:
rate_or_const_rate = const_rate
if np.any(const_rate < 1):
raise ValueError("dilation_rate must be positive, but saw: {}"
.format(const_rate))
if np.all(const_rate == 1):
self.call = build_op(num_spatial_dims, padding)
return
padding, explicit_paddings = convert_padding(padding)
# We have two padding contributions. The first is used for converting "SAME"
# to "VALID". The second is required so that the height and width of the
# zero-padded value tensor are multiples of rate.
# Padding required to reduce to "VALID" convolution
if padding == "SAME":
if filter_shape is None:
raise ValueError("filter_shape must be specified for SAME padding")
filter_shape = ops.convert_to_tensor(filter_shape, name="filter_shape")
const_filter_shape = tensor_util.constant_value(filter_shape)
if const_filter_shape is not None:
filter_shape = const_filter_shape
self.base_paddings = _with_space_to_batch_base_paddings(
const_filter_shape, num_spatial_dims, rate_or_const_rate)
else:
self.num_spatial_dims = num_spatial_dims
self.rate_or_const_rate = rate_or_const_rate
self.base_paddings = None
elif padding == "VALID":
self.base_paddings = np.zeros([num_spatial_dims, 2], np.int32)
elif padding == "EXPLICIT":
base_paddings = (np.array(explicit_paddings)
.reshape([num_spatial_dims + 2, 2]))
# Remove batch and channel dimensions
if data_format is not None and data_format.startswith("NC"):
self.base_paddings = base_paddings[2:]
else:
self.base_paddings = base_paddings[1:-1]
else:
raise ValueError("Invalid padding method %r" % padding)
self.input_shape = input_shape
self.spatial_dims = spatial_dims
self.dilation_rate = dilation_rate
self.data_format = data_format
self.op = build_op(num_spatial_dims, "VALID")
self.call = self._with_space_to_batch_call
def _with_space_to_batch_call(self, inp, filter): # pylint: disable=redefined-builtin
"""Call functionality for with_space_to_batch."""
# Handle input whose shape is unknown during graph creation.
input_spatial_shape = None
input_shape = self.input_shape
spatial_dims = self.spatial_dims
if input_shape.ndims is not None:
input_shape_list = input_shape.as_list()
input_spatial_shape = [input_shape_list[i] for i in spatial_dims]
if input_spatial_shape is None or None in input_spatial_shape:
input_shape_tensor = array_ops.shape(inp)
input_spatial_shape = array_ops.stack(
[input_shape_tensor[i] for i in spatial_dims])
base_paddings = self.base_paddings
if base_paddings is None:
# base_paddings could not be computed at build time since static filter
# shape was not fully defined.
filter_shape = array_ops.shape(filter)
base_paddings = _with_space_to_batch_base_paddings(
filter_shape, self.num_spatial_dims, self.rate_or_const_rate)
paddings, crops = array_ops.required_space_to_batch_paddings(
input_shape=input_spatial_shape,
base_paddings=base_paddings,
block_shape=self.dilation_rate)
dilation_rate = _with_space_to_batch_adjust(self.dilation_rate, 1,
spatial_dims)
paddings = _with_space_to_batch_adjust(paddings, 0, spatial_dims)
crops = _with_space_to_batch_adjust(crops, 0, spatial_dims)
input_converted = array_ops.space_to_batch_nd(
input=inp, block_shape=dilation_rate, paddings=paddings)
result = self.op(input_converted, filter)
result_converted = array_ops.batch_to_space_nd(
input=result, block_shape=dilation_rate, crops=crops)
# Recover channel information for output shape if channels are not last.
if self.data_format is not None and self.data_format.startswith("NC"):
if not result_converted.shape.dims[1].value and filter is not None:
output_shape = result_converted.shape.as_list()
output_shape[1] = filter.shape[-1]
result_converted.set_shape(output_shape)
return result_converted
def __call__(self, inp, filter): # pylint: disable=redefined-builtin
return self.call(inp, filter)
def _with_space_to_batch_base_paddings(filter_shape, num_spatial_dims,
rate_or_const_rate):
"""Helper function to compute base_paddings."""
# Spatial dimensions of the filters and the upsampled filters in which we
# introduce (rate - 1) zeros between consecutive filter values.
filter_spatial_shape = filter_shape[:num_spatial_dims]
pad_extra_shape = (filter_spatial_shape - 1) * rate_or_const_rate
# When full_padding_shape is odd, we pad more at end, following the same
# convention as conv2d.
pad_extra_start = pad_extra_shape // 2
pad_extra_end = pad_extra_shape - pad_extra_start
base_paddings = array_ops.stack(
[[pad_extra_start[i], pad_extra_end[i]] for i in range(num_spatial_dims)])
return base_paddings
def _with_space_to_batch_adjust(orig, fill_value, spatial_dims):
"""Returns an `adjusted` version of `orig` based on `spatial_dims`.
Tensor of the same type as `orig` and with shape
`[max(spatial_dims), ...]` where:
adjusted[spatial_dims[i] - 1, ...] = orig[i, ...]
for 0 <= i < len(spatial_dims), and
adjusted[j, ...] = fill_value
for j != spatial_dims[i] - 1 for some i.
If `orig` is a constant value, then the result will be a constant value.
Args:
orig: Tensor of rank > max(spatial_dims).
fill_value: Numpy scalar (of same data type as `orig) specifying the fill
value for non-spatial dimensions.
spatial_dims: See with_space_to_batch.
Returns:
`adjusted` tensor.
"""
fill_dims = orig.get_shape().as_list()[1:]
dtype = orig.dtype.as_numpy_dtype
parts = []
const_orig = tensor_util.constant_value(orig)
const_or_orig = const_orig if const_orig is not None else orig
prev_spatial_dim = 0
i = 0
while i < len(spatial_dims):
start_i = i
start_spatial_dim = spatial_dims[i]
if start_spatial_dim > 1:
# Fill in any gap from the previous spatial dimension (or dimension 1 if
# this is the first spatial dimension) with `fill_value`.
parts.append(
np.full(
[start_spatial_dim - 1 - prev_spatial_dim] + fill_dims,
fill_value,
dtype=dtype))
# Find the largest value of i such that:
# [spatial_dims[start_i], ..., spatial_dims[i]]
# == [start_spatial_dim, ..., start_spatial_dim + i - start_i],
# i.e. the end of a contiguous group of spatial dimensions.
while (i + 1 < len(spatial_dims) and
spatial_dims[i + 1] == spatial_dims[i] + 1):
i += 1
parts.append(const_or_orig[start_i:i + 1])
prev_spatial_dim = spatial_dims[i]
i += 1
if const_orig is not None:
return np.concatenate(parts)
else:
return array_ops.concat(parts, 0)
def _get_strides_and_dilation_rate(num_spatial_dims, strides, dilation_rate):
"""Helper function for verifying strides and dilation_rate arguments.
This is used by `convolution` and `pool`.
Args:
num_spatial_dims: int
strides: Optional. List of N ints >= 1. Defaults to [1]*N. If any value
of strides is > 1, then all values of dilation_rate must be 1.
dilation_rate: Optional. List of N ints >= 1. Defaults to [1]*N. If any
value of dilation_rate is > 1, then all values of strides must be 1.
Returns:
Normalized (strides, dilation_rate) as int32 numpy arrays of shape
[num_spatial_dims].
Raises:
ValueError: if the parameters are invalid.
"""
if dilation_rate is None:
dilation_rate = [1] * num_spatial_dims
elif len(dilation_rate) != num_spatial_dims:
raise ValueError("len(dilation_rate)=%d but should be %d" %
(len(dilation_rate), num_spatial_dims))
dilation_rate = np.array(dilation_rate, dtype=np.int32)
if np.any(dilation_rate < 1):
raise ValueError("all values of dilation_rate must be positive")
if strides is None:
strides = [1] * num_spatial_dims
elif len(strides) != num_spatial_dims:
raise ValueError("len(strides)=%d but should be %d" % (len(strides),
num_spatial_dims))
strides = np.array(strides, dtype=np.int32)
if np.any(strides < 1):
raise ValueError("all values of strides must be positive")
if np.any(strides > 1) and np.any(dilation_rate > 1):
raise ValueError(
"strides > 1 not supported in conjunction with dilation_rate > 1")
return strides, dilation_rate
@tf_export(v1=["nn.convolution"])
@dispatch.add_dispatch_support
def convolution(
input, # pylint: disable=redefined-builtin
filter, # pylint: disable=redefined-builtin
padding,
strides=None,
dilation_rate=None,
name=None,
data_format=None,
filters=None,
dilations=None): # pylint: disable=g-doc-args
"""Computes sums of N-D convolutions (actually cross-correlation).
This also supports either output striding via the optional `strides` parameter
or atrous convolution (also known as convolution with holes or dilated
convolution, based on the French word "trous" meaning holes in English) via
the optional `dilation_rate` parameter. Currently, however, output striding
is not supported for atrous convolutions.
Specifically, in the case that `data_format` does not start with "NC", given
a rank (N+2) `input` Tensor of shape
[num_batches,
input_spatial_shape[0],
...,
input_spatial_shape[N-1],
num_input_channels],
a rank (N+2) `filter` Tensor of shape
[spatial_filter_shape[0],
...,
spatial_filter_shape[N-1],
num_input_channels,
num_output_channels],
an optional `dilation_rate` tensor of shape [N] (defaulting to [1]*N)
specifying the filter upsampling/input downsampling rate, and an optional list
of N `strides` (defaulting [1]*N), this computes for each N-D spatial output
position (x[0], ..., x[N-1]):
```
output[b, x[0], ..., x[N-1], k] =
sum_{z[0], ..., z[N-1], q}
filter[z[0], ..., z[N-1], q, k] *
padded_input[b,
x[0]*strides[0] + dilation_rate[0]*z[0],
...,
x[N-1]*strides[N-1] + dilation_rate[N-1]*z[N-1],
q]
```
where b is the index into the batch, k is the output channel number, q is the
input channel number, and z is the N-D spatial offset within the filter. Here,
`padded_input` is obtained by zero padding the input using an effective
spatial filter shape of `(spatial_filter_shape-1) * dilation_rate + 1` and
output striding `strides`.
In the case that `data_format` does start with `"NC"`, the `input` and output
(but not the `filter`) are simply transposed as follows:
convolution(input, data_format, **kwargs) =
tf.transpose(convolution(tf.transpose(input, [0] + range(2,N+2) + [1]),
**kwargs),
[0, N+1] + range(1, N+1))
It is required that 1 <= N <= 3.
Args:
input: An (N+2)-D `Tensor` of type `T`, of shape
`[batch_size] + input_spatial_shape + [in_channels]` if data_format does
not start with "NC" (default), or
`[batch_size, in_channels] + input_spatial_shape` if data_format starts
with "NC".
filter: An (N+2)-D `Tensor` with the same type as `input` and shape
`spatial_filter_shape + [in_channels, out_channels]`.
padding: A string, either `"VALID"` or `"SAME"`. The padding algorithm.
`"valid"` means no padding. `"same"` results in padding evenly to
the left/right or up/down of the input such that output has the same
height/width dimension as the input.
strides: Optional. Sequence of N ints >= 1. Specifies the output stride.
Defaults to [1]*N. If any value of strides is > 1, then all values of
dilation_rate must be 1.
dilation_rate: Optional. Sequence of N ints >= 1. Specifies the filter
upsampling/input downsampling rate. In the literature, the same parameter
is sometimes called `input stride` or `dilation`. The effective filter
size used for the convolution will be `spatial_filter_shape +
(spatial_filter_shape - 1) * (rate - 1)`, obtained by inserting
(dilation_rate[i]-1) zeros between consecutive elements of the original
filter in each spatial dimension i. If any value of dilation_rate is > 1,
then all values of strides must be 1.
name: Optional name for the returned tensor.
data_format: A string or None. Specifies whether the channel dimension of
the `input` and output is the last dimension (default, or if `data_format`
does not start with "NC"), or the second dimension (if `data_format`
starts with "NC"). For N=1, the valid values are "NWC" (default) and
"NCW". For N=2, the valid values are "NHWC" (default) and "NCHW".
For N=3, the valid values are "NDHWC" (default) and "NCDHW".
Returns:
A `Tensor` with the same type as `input` of shape
`[batch_size] + output_spatial_shape + [out_channels]`
if data_format is None or does not start with "NC", or
`[batch_size, out_channels] + output_spatial_shape`
if data_format starts with "NC",
where `output_spatial_shape` depends on the value of `padding`.
If padding == "SAME":
output_spatial_shape[i] = ceil(input_spatial_shape[i] / strides[i])
If padding == "VALID":
output_spatial_shape[i] =
ceil((input_spatial_shape[i] -
(spatial_filter_shape[i]-1) * dilation_rate[i])
/ strides[i]).
Raises:
ValueError: If input/output depth does not match `filter` shape, if padding
is other than `"VALID"` or `"SAME"`, or if data_format is invalid.
"""
filter = deprecated_argument_lookup("filters", filters, "filter", filter)
dilation_rate = deprecated_argument_lookup(
"dilations", dilations, "dilation_rate", dilation_rate)
return convolution_internal(
input,
filter,
strides=strides,
padding=padding,
data_format=data_format,
dilations=dilation_rate,
name=name)
@tf_export("nn.convolution", v1=[])
@dispatch.add_dispatch_support
def convolution_v2( # pylint: disable=missing-docstring
input, # pylint: disable=redefined-builtin
filters,
strides=None,
padding="VALID",
data_format=None,
dilations=None,
name=None):
return convolution_internal(
input, # pylint: disable=redefined-builtin
filters,
strides=strides,
padding=padding,
data_format=data_format,
dilations=dilations,
name=name)
convolution_v2.__doc__ = deprecation.rewrite_argument_docstring(
deprecation.rewrite_argument_docstring(
convolution.__doc__, "dilation_rate", "dilations"),
"filter", "filters")
def convolution_internal(
input, # pylint: disable=redefined-builtin
filters,
strides=None,
padding="VALID",
data_format=None,
dilations=None,
name=None,
call_from_convolution=True,
num_spatial_dims=None):
"""Internal function which performs rank agnostic convolution.
Args:
input: See `convolution`.
filters: See `convolution`.
strides: See `convolution`.
padding: See `convolution`.
data_format: See `convolution`.
dilations: See `convolution`.
name: See `convolution`.
call_from_convolution: See `convolution`.
num_spatial_dims: (Optional.). It is a integer describing the
rank of the spatial dimensions. For `1-D`, `2-D` and `3-D` convolutions,
the value of `num_spatial_dims` is `1`, `2`, and `3`, respectively.
This argument is only required to disambiguate the rank of `batch_shape`
when `filter_shape.ndims is None` and `len(batch_shape) > 1`. For
backwards compatibility, if `num_spatial_dims is None` and
`filter_shape.ndims is None`, then `len(batch_shape)` is assumed to be
`1` (i.e., the input is expected to be
`[batch_size, num_channels] + input_spatial_shape`
or `[batch_size] + input_spatial_shape + [num_channels]`.
Returns:
A tensor of shape and dtype matching that of `input`.
Raises:
ValueError: If input and filter both have unknown shapes, or if
`num_spatial_dims` is provided and incompatible with the value
estimated from `filters.shape`.
"""
if (not isinstance(filters, variables_lib.Variable) and
not tensor_util.is_tf_type(filters)):
with ops.name_scope("convolution_internal", None, [filters, input]):
filters = ops.convert_to_tensor(filters, name='filters')
if (not isinstance(input, ops.Tensor) and not tensor_util.is_tf_type(input)):
with ops.name_scope("convolution_internal", None, [filters, input]):
input = ops.convert_to_tensor(input, name="input")
filters_rank = filters.shape.rank
inputs_rank = input.shape.rank
if num_spatial_dims is None:
if filters_rank:
num_spatial_dims = filters_rank - 2
elif inputs_rank:
num_spatial_dims = inputs_rank - 2
else:
raise ValueError("rank of input or filter must be known")
elif filters_rank and filters_rank - 2 != num_spatial_dims:
raise ValueError(
"inconsistent estimate of spatial dims ({}) vs. actual passed "
"num_spatial_dims ({}). n was estimated as len(filters.shape) - 2, "
"but filters shape is: {}".format(filters_rank, num_spatial_dims,
filters.shape))
if inputs_rank:
num_batch_dims = inputs_rank - num_spatial_dims - 1 # Channel dimension.
else:
num_batch_dims = 1 # By default, assume single batch dimension.
if num_spatial_dims not in {1, 2, 3}:
raise ValueError(
"num_spatial_dims (input.shape.ndims - num_batch_dims - 1) must be one "
"of 1, 2 or 3 but saw {}. num_batch_dims: {}.".format(
num_spatial_dims, num_batch_dims))
if data_format is None or data_format in _CHANNELS_LAST_FORMATS:
channel_index = num_batch_dims + num_spatial_dims
else:
channel_index = num_batch_dims
if dilations is None:
dilations = _get_sequence(dilations, num_spatial_dims, channel_index,
"dilations")
is_dilated_conv = False
else:
dilations = _get_sequence(dilations, num_spatial_dims, channel_index,
"dilations")
is_dilated_conv = any(i != 1 for i in dilations)
strides = _get_sequence(strides, num_spatial_dims, channel_index, "strides")
has_tpu_context = device_context.enclosing_tpu_context() is not None
if name:
default_name = None
elif not has_tpu_context or call_from_convolution:
default_name = "convolution"
elif num_spatial_dims == 2: # Most common case.
default_name = "Conv2D"
elif num_spatial_dims == 3:
default_name = "Conv3D"
else:
default_name = "conv1d"
with ops.name_scope(name, default_name, [input, filters]) as name:
# Fast path for TPU or if no dilation, as gradient only supported on TPU
# for dilations.
if not is_dilated_conv or has_tpu_context:
if num_spatial_dims == 2: # Most common case.
op = _conv2d_expanded_batch
elif num_spatial_dims == 3:
op = _conv3d_expanded_batch
else:
op = conv1d
return op(
input,
filters,
strides,
padding=padding,
data_format=data_format,
dilations=dilations,
name=name)
else:
if channel_index == 1:
strides = strides[2:]
dilations = dilations[2:]
else:
strides = strides[1:-1]
dilations = dilations[1:-1]
op = Convolution(
tensor_shape.as_shape(input.shape),
tensor_shape.as_shape(filters.shape),
padding,
strides=strides,
dilation_rate=dilations,
name=name,
data_format=data_format,
num_spatial_dims=num_spatial_dims)
return op(input, filters)
class Convolution(object):
"""Helper class for convolution.
Note that this class assumes that shapes of input and filter passed to
`__call__` are compatible with `input_shape`, `filter_shape`, and
`num_spatial_dims` passed to the constructor.
Arguments
input_shape: static shape of input. i.e. input.shape. Its length is
`batch_shape + input_spatial_shape + [num_channels]` if `data_format`
does not start with `NC`, or
`batch_shape + [num_channels] + input_spatial_shape` if `data_format`
starts with `NC`.
filter_shape: static shape of the filter. i.e. filter.shape.
padding: The padding algorithm, must be "SAME" or "VALID".
strides: see convolution.
dilation_rate: see convolution.
name: see convolution.
data_format: A string or `None`. Specifies whether the channel dimension of
the `input` and output is the last dimension (if `data_format` is `None`
or does not start with `NC`), or the first post-batch dimension (i.e. if
`data_format` starts with `NC`).
num_spatial_dims: (Usually optional.) Python integer, the rank of the
spatial and channel dimensions. For `1-D`, `2-D` and `3-D` convolutions,
the value of `num_spatial_dims` is `1`, `2`, and `3`, respectively.
This argument is only required to disambiguate the rank of `batch_shape`
when `filter_shape.ndims is None` and `len(batch_shape) > 1`. For
backwards compatibility, if `num_spatial_dims is None` and
`filter_shape.ndims is None`, then `len(batch_shape)` is assumed to be
`1` (i.e., the input is expected to be
`[batch_size, num_channels] + input_spatial_shape`
or `[batch_size] + input_spatial_shape + [num_channels]`.
"""
def __init__(self,
input_shape,
filter_shape,
padding,
strides=None,
dilation_rate=None,
name=None,
data_format=None,
num_spatial_dims=None):
"""Helper function for convolution."""
num_batch_dims = None
filter_shape = tensor_shape.as_shape(filter_shape)
input_shape = tensor_shape.as_shape(input_shape)
if filter_shape.ndims is not None:
if (num_spatial_dims is not None and
filter_shape.ndims != num_spatial_dims + 2):
raise ValueError(
"Expected filter_shape.ndims == num_spatial_dims + 2, "
"but saw filter_shape.ndims == {} and num_spatial_dims == {}"
.format(filter_shape.ndims, num_spatial_dims))
else:
num_spatial_dims = filter_shape.ndims - 2
if input_shape.ndims is not None and num_spatial_dims is not None:
num_batch_dims = input_shape.ndims - num_spatial_dims - 1
if num_spatial_dims is None:
num_spatial_dims = input_shape.ndims - 2
else:
if input_shape.ndims is not None:
if input_shape.ndims < num_spatial_dims + 2:
raise ValueError(
"Expected input_shape.ndims >= num_spatial_dims + 2, but saw "
"input_shape.ndims == {} and num_spatial_dims == {}"
.format(input_shape.ndims, num_spatial_dims))
else:
if num_batch_dims is None:
num_batch_dims = input_shape.ndims - num_spatial_dims - 1
if num_spatial_dims is None:
raise ValueError(
"Cannot estimate num_spatial_dims since input_shape.ndims is None, "
"filter_shape.ndims is None, and argument num_spatial_dims is also "
"None.")
if num_batch_dims is None:
num_batch_dims = 1
if num_batch_dims < 1:
raise ValueError(
"num_batch_dims should be >= 1, but saw {}. num_batch_dims was "
"estimated as `input_shape.ndims - num_spatial_dims - 1` and "
"num_spatial_dims was either provided or estimated as "
"`filter_shape.ndims - 2`. input_shape.ndims: {}, "
"num_spatial_dims: {}, filter_shape.ndims: {}"
.format(num_batch_dims, input_shape.ndims, num_spatial_dims,
filter_shape.ndims))
if data_format is None or not data_format.startswith("NC"):
input_channels_dim = tensor_shape.dimension_at_index(
input_shape, num_spatial_dims + num_batch_dims)
spatial_dims = range(num_batch_dims, num_spatial_dims + num_batch_dims)
else:
input_channels_dim = tensor_shape.dimension_at_index(
input_shape, num_batch_dims)
spatial_dims = range(
num_batch_dims + 1, num_spatial_dims + num_batch_dims + 1)
filter_dim = tensor_shape.dimension_at_index(filter_shape, num_spatial_dims)
if not (input_channels_dim % filter_dim).is_compatible_with(0):
raise ValueError("The number of input channels is not divisible by the "
"corresponding number of output filters. Received: "
"input channels={}, output filters={}".format(
input_channels_dim, filter_dim))
strides, dilation_rate = _get_strides_and_dilation_rate(
num_spatial_dims, strides, dilation_rate)
self.input_shape = input_shape
self.filter_shape = filter_shape
self.data_format = data_format
self.strides = strides
self.padding = padding
self.name = name
self.dilation_rate = dilation_rate
self.num_batch_dims = num_batch_dims
self.num_spatial_dims = num_spatial_dims
self.conv_op = _WithSpaceToBatch(
input_shape,
dilation_rate=dilation_rate,
padding=padding,
build_op=self._build_op,
filter_shape=filter_shape,
spatial_dims=spatial_dims,
data_format=data_format,
num_batch_dims=num_batch_dims)
def _build_op(self, _, padding):
return _NonAtrousConvolution(
self.input_shape,
filter_shape=self.filter_shape,
padding=padding,
data_format=self.data_format,
strides=self.strides,
name=self.name,
num_batch_dims=self.num_batch_dims)
def __call__(self, inp, filter): # pylint: disable=redefined-builtin
# TPU convolution supports dilations greater than 1.
if device_context.enclosing_tpu_context() is not None:
return convolution_internal(
inp,
filter,
strides=self.strides,
padding=self.padding,
data_format=self.data_format,
dilations=self.dilation_rate,
name=self.name,
call_from_convolution=False,
num_spatial_dims=self.num_spatial_dims)
else:
return self.conv_op(inp, filter)
@tf_export(v1=["nn.pool"])
@dispatch.add_dispatch_support
def pool(
input, # pylint: disable=redefined-builtin
window_shape,
pooling_type,
padding,
dilation_rate=None,
strides=None,
name=None,
data_format=None,
dilations=None):
"""Performs an N-D pooling operation.
In the case that `data_format` does not start with "NC", computes for
0 <= b < batch_size,
0 <= x[i] < output_spatial_shape[i],
0 <= c < num_channels:
```
output[b, x[0], ..., x[N-1], c] =
REDUCE_{z[0], ..., z[N-1]}
input[b,
x[0] * strides[0] - pad_before[0] + dilation_rate[0]*z[0],
...
x[N-1]*strides[N-1] - pad_before[N-1] + dilation_rate[N-1]*z[N-1],
c],
```
where the reduction function REDUCE depends on the value of `pooling_type`,
and pad_before is defined based on the value of `padding` as described in
the "returns" section of `tf.nn.convolution` for details.
The reduction never includes out-of-bounds positions.
In the case that `data_format` starts with `"NC"`, the `input` and output are
simply transposed as follows:
```
pool(input, data_format, **kwargs) =
tf.transpose(pool(tf.transpose(input, [0] + range(2,N+2) + [1]),
**kwargs),
[0, N+1] + range(1, N+1))
```
Args:
input: Tensor of rank N+2, of shape
`[batch_size] + input_spatial_shape + [num_channels]` if data_format does
not start with "NC" (default), or
`[batch_size, num_channels] + input_spatial_shape` if data_format starts
with "NC". Pooling happens over the spatial dimensions only.
window_shape: Sequence of N ints >= 1.
pooling_type: Specifies pooling operation, must be "AVG" or "MAX".
padding: The padding algorithm, must be "SAME" or "VALID".
See the "returns" section of `tf.nn.convolution` for details.
dilation_rate: Optional. Dilation rate. List of N ints >= 1.
Defaults to [1]*N. If any value of dilation_rate is > 1, then all values
of strides must be 1.
strides: Optional. Sequence of N ints >= 1. Defaults to [1]*N.
If any value of strides is > 1, then all values of dilation_rate must be
1.
name: Optional. Name of the op.
data_format: A string or None. Specifies whether the channel dimension of
the `input` and output is the last dimension (default, or if `data_format`
does not start with "NC"), or the second dimension (if `data_format`
starts with "NC"). For N=1, the valid values are "NWC" (default) and
"NCW". For N=2, the valid values are "NHWC" (default) and "NCHW".
For N=3, the valid values are "NDHWC" (default) and "NCDHW".
dilations: Alias for dilation_rate
Returns:
Tensor of rank N+2, of shape
[batch_size] + output_spatial_shape + [num_channels]
if data_format is None or does not start with "NC", or
[batch_size, num_channels] + output_spatial_shape
if data_format starts with "NC",
where `output_spatial_shape` depends on the value of padding:
If padding = "SAME":
output_spatial_shape[i] = ceil(input_spatial_shape[i] / strides[i])
If padding = "VALID":
output_spatial_shape[i] =
ceil((input_spatial_shape[i] - (window_shape[i] - 1) * dilation_rate[i])
/ strides[i]).
Raises:
ValueError: if arguments are invalid.
"""
dilation_rate = deprecated_argument_lookup(
"dilations", dilations, "dilation_rate", dilation_rate)
# pylint: enable=line-too-long
with ops.name_scope(name, "%s_pool" % (pooling_type.lower()),
[input]) as scope:
input = ops.convert_to_tensor(input, name="input") # pylint: disable=redefined-builtin
num_spatial_dims = len(window_shape)
if num_spatial_dims < 1 or num_spatial_dims > 3:
raise ValueError("It is required that 1 <= num_spatial_dims <= 3.")
input.get_shape().with_rank(num_spatial_dims + 2)
strides, dilation_rate = _get_strides_and_dilation_rate(
num_spatial_dims, strides, dilation_rate)
if padding == "SAME" and np.any(dilation_rate > 1):
raise ValueError(
"pooling with SAME padding is not implemented for dilation_rate > 1")
if np.any(strides > window_shape):
raise ValueError(
"strides > window_shape not supported due to inconsistency between "
"CPU and GPU implementations")
pooling_ops = {
("MAX", 1): max_pool,
("MAX", 2): max_pool,
("MAX", 3): max_pool3d, # pylint: disable=undefined-variable
("AVG", 1): avg_pool,
("AVG", 2): avg_pool,
("AVG", 3): avg_pool3d, # pylint: disable=undefined-variable
}
op_key = (pooling_type, num_spatial_dims)
if op_key not in pooling_ops:
raise ValueError("%d-D %s pooling is not supported." % (op_key[1],
op_key[0]))
if data_format is None or not data_format.startswith("NC"):
adjusted_window_shape = [1] + list(window_shape) + [1]
adjusted_strides = [1] + list(strides) + [1]
spatial_dims = range(1, num_spatial_dims + 1)
else:
adjusted_window_shape = [1, 1] + list(window_shape)
adjusted_strides = [1, 1] + list(strides)
spatial_dims = range(2, num_spatial_dims + 2)
if num_spatial_dims == 1:
if data_format is None or data_format == "NWC":
data_format_kwargs = dict(data_format="NHWC")
elif data_format == "NCW":
data_format_kwargs = dict(data_format="NCHW")
else:
raise ValueError("data_format must be either \"NWC\" or \"NCW\".")
adjusted_window_shape = [1] + adjusted_window_shape
adjusted_strides = [1] + adjusted_strides
else:
data_format_kwargs = dict(data_format=data_format)
def op(converted_input, _, converted_padding): # pylint: disable=missing-docstring
if num_spatial_dims == 1:
converted_input = array_ops.expand_dims(converted_input,
spatial_dims[0])
result = pooling_ops[op_key](
converted_input,
adjusted_window_shape,
adjusted_strides,
converted_padding,
name=scope,
**data_format_kwargs)
if num_spatial_dims == 1:
result = array_ops.squeeze(result, [spatial_dims[0]])
return result
return with_space_to_batch(
input=input,
dilation_rate=dilation_rate,
padding=padding,
op=op,
spatial_dims=spatial_dims,
filter_shape=window_shape)
@tf_export("nn.pool", v1=[])
@dispatch.add_dispatch_support
def pool_v2(
input, # pylint: disable=redefined-builtin
window_shape,
pooling_type,
strides=None,
padding="VALID",
data_format=None,
dilations=None,
name=None):
# pylint: disable=line-too-long
"""Performs an N-D pooling operation.
In the case that `data_format` does not start with "NC", computes for
0 <= b < batch_size,
0 <= x[i] < output_spatial_shape[i],
0 <= c < num_channels:
```
output[b, x[0], ..., x[N-1], c] =
REDUCE_{z[0], ..., z[N-1]}
input[b,
x[0] * strides[0] - pad_before[0] + dilation_rate[0]*z[0],
...
x[N-1]*strides[N-1] - pad_before[N-1] + dilation_rate[N-1]*z[N-1],
c],
```
where the reduction function REDUCE depends on the value of `pooling_type`,
and pad_before is defined based on the value of `padding` as described in
the "returns" section of `tf.nn.convolution` for details.
The reduction never includes out-of-bounds positions.
In the case that `data_format` starts with `"NC"`, the `input` and output are
simply transposed as follows:
```
pool(input, data_format, **kwargs) =
tf.transpose(pool(tf.transpose(input, [0] + range(2,N+2) + [1]),
**kwargs),
[0, N+1] + range(1, N+1))
```
Args:
input: Tensor of rank N+2, of shape `[batch_size] + input_spatial_shape +
[num_channels]` if data_format does not start with "NC" (default), or
`[batch_size, num_channels] + input_spatial_shape` if data_format starts
with "NC". Pooling happens over the spatial dimensions only.
window_shape: Sequence of N ints >= 1.
pooling_type: Specifies pooling operation, must be "AVG" or "MAX".
strides: Optional. Sequence of N ints >= 1. Defaults to [1]*N. If any value of
strides is > 1, then all values of dilation_rate must be 1.
padding: The padding algorithm, must be "SAME" or "VALID". Defaults to "SAME".
See the "returns" section of `tf.nn.convolution` for details.
data_format: A string or None. Specifies whether the channel dimension of
the `input` and output is the last dimension (default, or if `data_format`
does not start with "NC"), or the second dimension (if `data_format`
starts with "NC"). For N=1, the valid values are "NWC" (default) and
"NCW". For N=2, the valid values are "NHWC" (default) and "NCHW". For
N=3, the valid values are "NDHWC" (default) and "NCDHW".
dilations: Optional. Dilation rate. List of N ints >= 1. Defaults to
[1]*N. If any value of dilation_rate is > 1, then all values of strides
must be 1.
name: Optional. Name of the op.
Returns:
Tensor of rank N+2, of shape
[batch_size] + output_spatial_shape + [num_channels]
if data_format is None or does not start with "NC", or
[batch_size, num_channels] + output_spatial_shape
if data_format starts with "NC",
where `output_spatial_shape` depends on the value of padding:
If padding = "SAME":
output_spatial_shape[i] = ceil(input_spatial_shape[i] / strides[i])
If padding = "VALID":
output_spatial_shape[i] =
ceil((input_spatial_shape[i] - (window_shape[i] - 1) * dilation_rate[i])
/ strides[i]).
Raises:
ValueError: if arguments are invalid.
"""
return pool(
input=input,
window_shape=window_shape,
pooling_type=pooling_type,
padding=padding,
dilation_rate=dilations,
strides=strides,
name=name,
data_format=data_format)
@tf_export("nn.atrous_conv2d")
@dispatch.add_dispatch_support
def atrous_conv2d(value, filters, rate, padding, name=None):
"""Atrous convolution (a.k.a. convolution with holes or dilated convolution).
This function is a simpler wrapper around the more general
`tf.nn.convolution`, and exists only for backwards compatibility. You can
use `tf.nn.convolution` to perform 1-D, 2-D, or 3-D atrous convolution.
Computes a 2-D atrous convolution, also known as convolution with holes or
dilated convolution, given 4-D `value` and `filters` tensors. If the `rate`
parameter is equal to one, it performs regular 2-D convolution. If the `rate`
parameter is greater than one, it performs convolution with holes, sampling
the input values every `rate` pixels in the `height` and `width` dimensions.
This is equivalent to convolving the input with a set of upsampled filters,
produced by inserting `rate - 1` zeros between two consecutive values of the
filters along the `height` and `width` dimensions, hence the name atrous
convolution or convolution with holes (the French word trous means holes in
English).
More specifically:
```
output[batch, height, width, out_channel] =
sum_{dheight, dwidth, in_channel} (
filters[dheight, dwidth, in_channel, out_channel] *
value[batch, height + rate*dheight, width + rate*dwidth, in_channel]
)
```
Atrous convolution allows us to explicitly control how densely to compute
feature responses in fully convolutional networks. Used in conjunction with
bilinear interpolation, it offers an alternative to `conv2d_transpose` in
dense prediction tasks such as semantic image segmentation, optical flow
computation, or depth estimation. It also allows us to effectively enlarge
the field of view of filters without increasing the number of parameters or
the amount of computation.
For a description of atrous convolution and how it can be used for dense
feature extraction, please see: (Chen et al., 2015). The same operation is
investigated further in (Yu et al., 2016). Previous works that effectively
use atrous convolution in different ways are, among others,
(Sermanet et al., 2014) and (Giusti et al., 2013).
Atrous convolution is also closely related to the so-called noble identities
in multi-rate signal processing.
There are many different ways to implement atrous convolution (see the refs
above). The implementation here reduces
```python
atrous_conv2d(value, filters, rate, padding=padding)
```
to the following three operations:
```python
paddings = ...
net = space_to_batch(value, paddings, block_size=rate)
net = conv2d(net, filters, strides=[1, 1, 1, 1], padding="VALID")
crops = ...
net = batch_to_space(net, crops, block_size=rate)
```
Advanced usage. Note the following optimization: A sequence of `atrous_conv2d`
operations with identical `rate` parameters, 'SAME' `padding`, and filters
with odd heights/ widths:
```python
net = atrous_conv2d(net, filters1, rate, padding="SAME")
net = atrous_conv2d(net, filters2, rate, padding="SAME")
...
net = atrous_conv2d(net, filtersK, rate, padding="SAME")
```
can be equivalently performed cheaper in terms of computation and memory as:
```python
pad = ... # padding so that the input dims are multiples of rate
net = space_to_batch(net, paddings=pad, block_size=rate)
net = conv2d(net, filters1, strides=[1, 1, 1, 1], padding="SAME")
net = conv2d(net, filters2, strides=[1, 1, 1, 1], padding="SAME")
...
net = conv2d(net, filtersK, strides=[1, 1, 1, 1], padding="SAME")
net = batch_to_space(net, crops=pad, block_size=rate)
```
because a pair of consecutive `space_to_batch` and `batch_to_space` ops with
the same `block_size` cancel out when their respective `paddings` and `crops`
inputs are identical.
Args:
value: A 4-D `Tensor` of type `float`. It needs to be in the default "NHWC"
format. Its shape is `[batch, in_height, in_width, in_channels]`.
filters: A 4-D `Tensor` with the same type as `value` and shape
`[filter_height, filter_width, in_channels, out_channels]`. `filters`'
`in_channels` dimension must match that of `value`. Atrous convolution is
equivalent to standard convolution with upsampled filters with effective
height `filter_height + (filter_height - 1) * (rate - 1)` and effective
width `filter_width + (filter_width - 1) * (rate - 1)`, produced by
inserting `rate - 1` zeros along consecutive elements across the
`filters`' spatial dimensions.
rate: A positive int32. The stride with which we sample input values across
the `height` and `width` dimensions. Equivalently, the rate by which we
upsample the filter values by inserting zeros across the `height` and
`width` dimensions. In the literature, the same parameter is sometimes
called `input stride` or `dilation`.
padding: A string, either `'VALID'` or `'SAME'`. The padding algorithm.
name: Optional name for the returned tensor.
Returns:
A `Tensor` with the same type as `value`.
Output shape with `'VALID'` padding is:
[batch, height - 2 * (filter_width - 1),
width - 2 * (filter_height - 1), out_channels].
Output shape with `'SAME'` padding is:
[batch, height, width, out_channels].
Raises:
ValueError: If input/output depth does not match `filters`' shape, or if
padding is other than `'VALID'` or `'SAME'`.
References:
Multi-Scale Context Aggregation by Dilated Convolutions:
[Yu et al., 2016](https://arxiv.org/abs/1511.07122)
([pdf](https://arxiv.org/pdf/1511.07122.pdf))
Semantic Image Segmentation with Deep Convolutional Nets and Fully
Connected CRFs:
[Chen et al., 2015](http://arxiv.org/abs/1412.7062)
([pdf](https://arxiv.org/pdf/1412.7062))
OverFeat - Integrated Recognition, Localization and Detection using
Convolutional Networks:
[Sermanet et al., 2014](https://arxiv.org/abs/1312.6229)
([pdf](https://arxiv.org/pdf/1312.6229.pdf))
Fast Image Scanning with Deep Max-Pooling Convolutional Neural Networks:
[Giusti et al., 2013]
(https://ieeexplore.ieee.org/abstract/document/6738831)
([pdf](https://arxiv.org/pdf/1302.1700.pdf))
"""
return convolution(
input=value,
filter=filters,
padding=padding,
dilation_rate=np.broadcast_to(rate, (2,)),
name=name)
def convert_padding(padding, expected_length=4):
"""Converts Python padding to C++ padding for ops which take EXPLICIT padding.
Args:
padding: the `padding` argument for a Python op which supports EXPLICIT
padding.
expected_length: Expected number of entries in the padding list when
explicit padding is used.
Returns:
(padding, explicit_paddings) pair, which should be passed as attributes to a
C++ op.
Raises:
ValueError: If padding is invalid.
"""
explicit_paddings = []
if padding == "EXPLICIT":
# Give a better error message if EXPLICIT is passed.
raise ValueError('"EXPLICIT" is not a valid value for the padding '
"parameter. To use explicit padding, the padding "
"parameter must be a list.")
if isinstance(padding, (list, tuple)):
for i, dim_paddings in enumerate(padding):
if not isinstance(dim_paddings, (list, tuple)):
raise ValueError("When padding is a list, each element of padding must "
"be a list/tuple of size 2. Element with index %d of "
"padding is not a list/tuple" % i)
if len(dim_paddings) != 2:
raise ValueError("When padding is a list, each element of padding must "
"be a list/tuple of size 2. Element with index %d of "
"padding has size %d" % (i, len(dim_paddings)))
explicit_paddings.extend(dim_paddings)
if len(padding) != expected_length:
raise ValueError("When padding is a list, it must be of size %d. Got "
"padding of size: %d" % (expected_length, len(padding)))
padding = "EXPLICIT"
return padding, explicit_paddings
@tf_export(v1=["nn.conv1d"])
@dispatch.add_dispatch_support
@deprecation.deprecated_arg_values(
None,
"`NCHW` for data_format is deprecated, use `NCW` instead",
warn_once=True,
data_format="NCHW")
@deprecation.deprecated_arg_values(
None,
"`NHWC` for data_format is deprecated, use `NWC` instead",
warn_once=True,
data_format="NHWC")
def conv1d(
value=None,
filters=None,
stride=None,
padding=None,
use_cudnn_on_gpu=None,
data_format=None,
name=None,
input=None, # pylint: disable=redefined-builtin
dilations=None):
r"""Computes a 1-D convolution of input with rank `>=3` and a `3-D` filter.
Given an input tensor of shape
`batch_shape + [in_width, in_channels]`
if `data_format` is `"NWC"`, or
`batch_shape + [in_channels, in_width]`
if `data_format` is `"NCW"`,
and a filter / kernel tensor of shape
`[filter_width, in_channels, out_channels]`, this op reshapes
the arguments to pass them to `conv2d` to perform the equivalent
convolution operation.
Internally, this op reshapes the input tensors and invokes `tf.nn.conv2d`.
For example, if `data_format` does not start with "NC", a tensor of shape
`batch_shape + [in_width, in_channels]`
is reshaped to
`batch_shape + [1, in_width, in_channels]`,
and the filter is reshaped to
`[1, filter_width, in_channels, out_channels]`.
The result is then reshaped back to
`batch_shape + [out_width, out_channels]`
\(where out_width is a function of the stride and padding as in conv2d\) and
returned to the caller.
Args:
value: A Tensor of rank at least 3. Must be of type `float16`, `float32`, or
`float64`.
filters: A Tensor of rank at least 3. Must have the same type as `value`.
stride: An int or list of `ints` that has length `1` or `3`. The number of
entries by which the filter is moved right at each step.
padding: 'SAME' or 'VALID'
use_cudnn_on_gpu: An optional `bool`. Defaults to `True`.
data_format: An optional `string` from `"NWC", "NCW"`. Defaults to `"NWC"`,
the data is stored in the order of `batch_shape + [in_width,
in_channels]`. The `"NCW"` format stores data as `batch_shape +
[in_channels, in_width]`.
name: A name for the operation (optional).
input: Alias for value.
dilations: An int or list of `ints` that has length `1` or `3` which
defaults to 1. The dilation factor for each dimension of input. If set to
k > 1, there will be k-1 skipped cells between each filter element on that
dimension. Dilations in the batch and depth dimensions must be 1.
Returns:
A `Tensor`. Has the same type as input.
Raises:
ValueError: if `data_format` is invalid.
"""
value = deprecation.deprecated_argument_lookup("input", input, "value", value)
with ops.name_scope(name, "conv1d", [value, filters]) as name:
# Reshape the input tensor to batch_shape + [1, in_width, in_channels]
if data_format is None or data_format == "NHWC" or data_format == "NWC":
data_format = "NHWC"
spatial_start_dim = -3
channel_index = 2
elif data_format == "NCHW" or data_format == "NCW":
data_format = "NCHW"
spatial_start_dim = -2
channel_index = 1
else:
raise ValueError("data_format must be \"NWC\" or \"NCW\".")
strides = [1] + _get_sequence(stride, 1, channel_index, "stride")
dilations = [1] + _get_sequence(dilations, 1, channel_index, "dilations")
value = array_ops.expand_dims(value, spatial_start_dim)
filters = array_ops.expand_dims(filters, 0)
if value.shape.ndims in (4, 3, 2, 1, 0, None):
result = gen_nn_ops.conv2d(
value,
filters,
strides,
padding,
use_cudnn_on_gpu=use_cudnn_on_gpu,
data_format=data_format,
dilations=dilations,
name=name)
else:
result = squeeze_batch_dims(
value,
functools.partial(
gen_nn_ops.conv2d,
filter=filters,
strides=strides,
padding=padding,
use_cudnn_on_gpu=use_cudnn_on_gpu,
data_format=data_format,
dilations=dilations,
),
inner_rank=3,
name=name)
return array_ops.squeeze(result, [spatial_start_dim])
@tf_export("nn.conv1d", v1=[])
@dispatch.add_dispatch_support
def conv1d_v2(
input, # pylint: disable=redefined-builtin
filters,
stride,
padding,
data_format="NWC",
dilations=None,
name=None):
r"""Computes a 1-D convolution given 3-D input and filter tensors.
Given an input tensor of shape
`batch_shape + [in_width, in_channels]`
if `data_format` is `"NWC"`, or
`batch_shape + [in_channels, in_width]`
if `data_format` is `"NCW"`,
and a filter / kernel tensor of shape
`[filter_width, in_channels, out_channels]`, this op reshapes
the arguments to pass them to `conv2d` to perform the equivalent
convolution operation.
Internally, this op reshapes the input tensors and invokes `tf.nn.conv2d`.
For example, if `data_format` does not start with `"NC"`, a tensor of shape
`batch_shape + [in_width, in_channels]`
is reshaped to
`batch_shape + [1, in_width, in_channels]`,
and the filter is reshaped to
`[1, filter_width, in_channels, out_channels]`.
The result is then reshaped back to
`batch_shape + [out_width, out_channels]`
\(where out_width is a function of the stride and padding as in conv2d\) and
returned to the caller.
Args:
input: A Tensor of rank at least 3. Must be of type `float16`, `float32`, or
`float64`.
filters: A Tensor of rank at least 3. Must have the same type as `input`.
stride: An int or list of `ints` that has length `1` or `3`. The number of
entries by which the filter is moved right at each step.
padding: 'SAME' or 'VALID'
data_format: An optional `string` from `"NWC", "NCW"`. Defaults to `"NWC"`,
the data is stored in the order of
`batch_shape + [in_width, in_channels]`. The `"NCW"` format stores data
as `batch_shape + [in_channels, in_width]`.
dilations: An int or list of `ints` that has length `1` or `3` which
defaults to 1. The dilation factor for each dimension of input. If set to
k > 1, there will be k-1 skipped cells between each filter element on that
dimension. Dilations in the batch and depth dimensions must be 1.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as input.
Raises:
ValueError: if `data_format` is invalid.
"""
return conv1d(
input, # pylint: disable=redefined-builtin
filters,
stride,
padding,
use_cudnn_on_gpu=True,
data_format=data_format,
name=name,
dilations=dilations)
@tf_export("nn.conv1d_transpose")
@dispatch.add_dispatch_support
def conv1d_transpose(
input, # pylint: disable=redefined-builtin
filters,
output_shape,
strides,
padding="SAME",
data_format="NWC",
dilations=None,
name=None):
"""The transpose of `conv1d`.
This operation is sometimes called "deconvolution" after
(Zeiler et al., 2010), but is actually the transpose (gradient) of `conv1d`
rather than an actual deconvolution.
Args:
input: A 3-D `Tensor` of type `float` and shape
`[batch, in_width, in_channels]` for `NWC` data format or
`[batch, in_channels, in_width]` for `NCW` data format.
filters: A 3-D `Tensor` with the same type as `input` and shape
`[filter_width, output_channels, in_channels]`. `filter`'s
`in_channels` dimension must match that of `input`.
output_shape: A 1-D `Tensor`, containing three elements, representing the
output shape of the deconvolution op.
strides: An int or list of `ints` that has length `1` or `3`. The number of
entries by which the filter is moved right at each step.
padding: A string, either `'VALID'` or `'SAME'`. The padding algorithm.
See the "returns" section of `tf.nn.convolution` for details.
data_format: A string. `'NWC'` and `'NCW'` are supported.
dilations: An int or list of `ints` that has length `1` or `3` which
defaults to 1. The dilation factor for each dimension of input. If set to
k > 1, there will be k-1 skipped cells between each filter element on that
dimension. Dilations in the batch and depth dimensions must be 1.
name: Optional name for the returned tensor.
Returns:
A `Tensor` with the same type as `input`.
Raises:
ValueError: If input/output depth does not match `filter`'s shape, if
`output_shape` is not at 3-element vector, if `padding` is other than
`'VALID'` or `'SAME'`, or if `data_format` is invalid.
References:
Deconvolutional Networks:
[Zeiler et al., 2010]
(https://ieeexplore.ieee.org/abstract/document/5539957)
([pdf]
(http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.232.4023&rep=rep1&type=pdf))
"""
with ops.name_scope(name, "conv1d_transpose",
[input, filters, output_shape]) as name:
# The format could be either NWC or NCW, map to NHWC or NCHW
if data_format is None or data_format == "NWC":
data_format = "NHWC"
spatial_start_dim = 1
channel_index = 2
elif data_format == "NCW":
data_format = "NCHW"
spatial_start_dim = 2
channel_index = 1
else:
raise ValueError("data_format must be \"NWC\" or \"NCW\".")
# Reshape the input tensor to [batch, 1, in_width, in_channels]
strides = [1] + _get_sequence(strides, 1, channel_index, "stride")
dilations = [1] + _get_sequence(dilations, 1, channel_index, "dilations")
input = array_ops.expand_dims(input, spatial_start_dim)
filters = array_ops.expand_dims(filters, 0)
output_shape = list(output_shape) if not isinstance(
output_shape, ops.Tensor) else output_shape
output_shape = array_ops.concat([output_shape[: spatial_start_dim], [1],
output_shape[spatial_start_dim:]], 0)
result = gen_nn_ops.conv2d_backprop_input(
input_sizes=output_shape,
filter=filters,
out_backprop=input,
strides=strides,
padding=padding,
data_format=data_format,
dilations=dilations,
name=name)
return array_ops.squeeze(result, spatial_start_dim)
@tf_export("nn.conv2d", v1=[])
@dispatch.add_dispatch_support
def conv2d_v2(input, # pylint: disable=redefined-builtin
filters,
strides,
padding,
data_format="NHWC",
dilations=None,
name=None):
# pylint: disable=line-too-long
r"""Computes a 2-D convolution given `input` and 4-D `filters` tensors.
The `input` tensor may have rank `4` or higher, where shape dimensions `[:-3]`
are considered batch dimensions (`batch_shape`).
Given an input tensor of shape
`batch_shape + [in_height, in_width, in_channels]` and a filter / kernel
tensor of shape `[filter_height, filter_width, in_channels, out_channels]`,
this op performs the following:
1. Flattens the filter to a 2-D matrix with shape
`[filter_height * filter_width * in_channels, output_channels]`.
2. Extracts image patches from the input tensor to form a *virtual*
tensor of shape `[batch, out_height, out_width,
filter_height * filter_width * in_channels]`.
3. For each patch, right-multiplies the filter matrix and the image patch
vector.
In detail, with the default NHWC format,
output[b, i, j, k] =
sum_{di, dj, q} input[b, strides[1] * i + di, strides[2] * j + dj, q] *
filter[di, dj, q, k]
Must have `strides[0] = strides[3] = 1`. For the most common case of the same
horizontal and vertical strides, `strides = [1, stride, stride, 1]`.
Usage Example:
>>> x_in = np.array([[
... [[2], [1], [2], [0], [1]],
... [[1], [3], [2], [2], [3]],
... [[1], [1], [3], [3], [0]],
... [[2], [2], [0], [1], [1]],
... [[0], [0], [3], [1], [2]], ]])
>>> kernel_in = np.array([
... [ [[2, 0.1]], [[3, 0.2]] ],
... [ [[0, 0.3]],[[1, 0.4]] ], ])
>>> x = tf.constant(x_in, dtype=tf.float32)
>>> kernel = tf.constant(kernel_in, dtype=tf.float32)
>>> tf.nn.conv2d(x, kernel, strides=[1, 1, 1, 1], padding='VALID')
<tf.Tensor: shape=(1, 4, 4, 2), dtype=float32, numpy=..., dtype=float32)>
Args:
input: A `Tensor`. Must be one of the following types:
`half`, `bfloat16`, `float32`, `float64`.
A Tensor of rank at least 4. The dimension order is interpreted according
to the value of `data_format`; with the all-but-inner-3 dimensions acting
as batch dimensions. See below for details.
filters: A `Tensor`. Must have the same type as `input`.
A 4-D tensor of shape
`[filter_height, filter_width, in_channels, out_channels]`
strides: An int or list of `ints` that has length `1`, `2` or `4`. The
stride of the sliding window for each dimension of `input`. If a single
value is given it is replicated in the `H` and `W` dimension. By default
the `N` and `C` dimensions are set to 1. The dimension order is determined
by the value of `data_format`, see below for details.
padding: Either the `string` `"SAME"` or `"VALID"` indicating the type of
padding algorithm to use, or a list indicating the explicit paddings at
the start and end of each dimension. When explicit padding is used and
data_format is `"NHWC"`, this should be in the form `[[0, 0], [pad_top,
pad_bottom], [pad_left, pad_right], [0, 0]]`. When explicit padding used
and data_format is `"NCHW"`, this should be in the form `[[0, 0], [0, 0],
[pad_top, pad_bottom], [pad_left, pad_right]]`.
data_format: An optional `string` from: `"NHWC", "NCHW"`.
Defaults to `"NHWC"`.
Specify the data format of the input and output data. With the
default format "NHWC", the data is stored in the order of:
`batch_shape + [height, width, channels]`.
Alternatively, the format could be "NCHW", the data storage order of:
`batch_shape + [channels, height, width]`.
dilations: An int or list of `ints` that has length `1`, `2` or `4`,
defaults to 1. The dilation factor for each dimension of`input`. If a
single value is given it is replicated in the `H` and `W` dimension. By
default the `N` and `C` dimensions are set to 1. If set to k > 1, there
will be k-1 skipped cells between each filter element on that dimension.
The dimension order is determined by the value of `data_format`, see above
for details. Dilations in the batch and depth dimensions if a 4-d tensor
must be 1.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `input` and the same outer batch shape.
"""
# pylint: enable=line-too-long
return conv2d(input, # pylint: disable=redefined-builtin
filters,
strides,
padding,
use_cudnn_on_gpu=True,
data_format=data_format,
dilations=dilations,
name=name)
@tf_export(v1=["nn.conv2d"])
@dispatch.add_dispatch_support
def conv2d( # pylint: disable=redefined-builtin,dangerous-default-value
input,
filter=None,
strides=None,
padding=None,
use_cudnn_on_gpu=True,
data_format="NHWC",
dilations=[1, 1, 1, 1],
name=None,
filters=None):
r"""Computes a 2-D convolution given 4-D `input` and `filter` tensors.
Given an input tensor of shape `[batch, in_height, in_width, in_channels]`
and a filter / kernel tensor of shape
`[filter_height, filter_width, in_channels, out_channels]`, this op
performs the following:
1. Flattens the filter to a 2-D matrix with shape
`[filter_height * filter_width * in_channels, output_channels]`.
2. Extracts image patches from the input tensor to form a *virtual*
tensor of shape `[batch, out_height, out_width,
filter_height * filter_width * in_channels]`.
3. For each patch, right-multiplies the filter matrix and the image patch
vector.
In detail, with the default NHWC format,
output[b, i, j, k] =
sum_{di, dj, q} input[b, strides[1] * i + di, strides[2] * j + dj, q]
* filter[di, dj, q, k]
Must have `strides[0] = strides[3] = 1`. For the most common case of the same
horizontal and vertical strides, `strides = [1, stride, stride, 1]`.
Args:
input: A `Tensor`. Must be one of the following types:
`half`, `bfloat16`, `float32`, `float64`.
A 4-D tensor. The dimension order is interpreted according to the value
of `data_format`, see below for details.
filter: A `Tensor`. Must have the same type as `input`.
A 4-D tensor of shape
`[filter_height, filter_width, in_channels, out_channels]`
strides: An int or list of `ints` that has length `1`, `2` or `4`. The
stride of the sliding window for each dimension of `input`. If a single
value is given it is replicated in the `H` and `W` dimension. By default
the `N` and `C` dimensions are set to 1. The dimension order is determined
by the value of `data_format`, see below for details.
padding: Either the `string` `"SAME"` or `"VALID"` indicating the type of
padding algorithm to use, or a list indicating the explicit paddings at
the start and end of each dimension. When explicit padding is used and
data_format is `"NHWC"`, this should be in the form `[[0, 0], [pad_top,
pad_bottom], [pad_left, pad_right], [0, 0]]`. When explicit padding used
and data_format is `"NCHW"`, this should be in the form `[[0, 0], [0, 0],
[pad_top, pad_bottom], [pad_left, pad_right]]`.
use_cudnn_on_gpu: An optional `bool`. Defaults to `True`.
data_format: An optional `string` from: `"NHWC", "NCHW"`.
Defaults to `"NHWC"`.
Specify the data format of the input and output data. With the
default format "NHWC", the data is stored in the order of:
[batch, height, width, channels].
Alternatively, the format could be "NCHW", the data storage order of:
[batch, channels, height, width].
dilations: An int or list of `ints` that has length `1`, `2` or `4`,
defaults to 1. The dilation factor for each dimension of`input`. If a
single value is given it is replicated in the `H` and `W` dimension. By
default the `N` and `C` dimensions are set to 1. If set to k > 1, there
will be k-1 skipped cells between each filter element on that dimension.
The dimension order is determined by the value of `data_format`, see above
for details. Dilations in the batch and depth dimensions if a 4-d tensor
must be 1.
name: A name for the operation (optional).
filters: Alias for filter.
Returns:
A `Tensor`. Has the same type as `input`.
"""
filter = deprecation.deprecated_argument_lookup(
"filters", filters, "filter", filter)
padding, explicit_paddings = convert_padding(padding)
if data_format is None:
data_format = "NHWC"
channel_index = 1 if data_format.startswith("NC") else 3
strides = _get_sequence(strides, 2, channel_index, "strides")
dilations = _get_sequence(dilations, 2, channel_index, "dilations")
shape = input.shape
# shape object may lack ndims, e.g., if input is an np.ndarray. In that case,
# we fall back to len(shape).
ndims = getattr(shape, "ndims", -1)
if ndims == -1:
ndims = len(shape)
if ndims in (4, 3, 2, 1, 0, None):
# We avoid calling squeeze_batch_dims to reduce extra python function
# call slowdown in eager mode. This branch doesn't require reshapes.
return gen_nn_ops.conv2d(
input,
filter=filter,
strides=strides,
padding=padding,
use_cudnn_on_gpu=use_cudnn_on_gpu,
explicit_paddings=explicit_paddings,
data_format=data_format,
dilations=dilations,
name=name)
return squeeze_batch_dims(
input,
functools.partial(
gen_nn_ops.conv2d,
filter=filter,
strides=strides,
padding=padding,
use_cudnn_on_gpu=use_cudnn_on_gpu,
explicit_paddings=explicit_paddings,
data_format=data_format,
dilations=dilations),
inner_rank=3,
name=name)
@tf_export(v1=["nn.conv2d_backprop_filter"])
@dispatch.add_dispatch_support
def conv2d_backprop_filter( # pylint: disable=redefined-builtin,dangerous-default-value
input,
filter_sizes,
out_backprop,
strides,
padding,
use_cudnn_on_gpu=True,
data_format="NHWC",
dilations=[1, 1, 1, 1],
name=None):
r"""Computes the gradients of convolution with respect to the filter.
Args:
input: A `Tensor`. Must be one of the following types:
`half`, `bfloat16`, `float32`, `float64`.
4-D with shape `[batch, in_height, in_width, in_channels]`.
filter_sizes: A `Tensor` of type `int32`.
An integer vector representing the tensor shape of `filter`,
where `filter` is a 4-D
`[filter_height, filter_width, in_channels, out_channels]` tensor.
out_backprop: A `Tensor`. Must have the same type as `input`.
4-D with shape `[batch, out_height, out_width, out_channels]`.
Gradients w.r.t. the output of the convolution.
strides: A list of `ints`.
The stride of the sliding window for each dimension of the input
of the convolution. Must be in the same order as the dimension specified
with format.
padding: Either the `string` `"SAME"` or `"VALID"` indicating the type of
padding algorithm to use, or a list indicating the explicit paddings at
the start and end of each dimension. When explicit padding is used and
data_format is `"NHWC"`, this should be in the form `[[0, 0], [pad_top,
pad_bottom], [pad_left, pad_right], [0, 0]]`. When explicit padding used
and data_format is `"NCHW"`, this should be in the form `[[0, 0], [0, 0],
[pad_top, pad_bottom], [pad_left, pad_right]]`.
use_cudnn_on_gpu: An optional `bool`. Defaults to `True`.
data_format: An optional `string` from: `"NHWC", "NCHW"`.
Defaults to `"NHWC"`.
Specify the data format of the input and output data. With the
default format "NHWC", the data is stored in the order of:
[batch, in_height, in_width, in_channels].
Alternatively, the format could be "NCHW", the data storage order of:
[batch, in_channels, in_height, in_width].
dilations: An optional list of `ints`. Defaults to `[1, 1, 1, 1]`.
1-D tensor of length 4. The dilation factor for each dimension of
`input`. If set to k > 1, there will be k-1 skipped cells between each
filter element on that dimension. The dimension order is determined by
the value of `data_format`, see above for details. Dilations in the batch
and depth dimensions must be 1.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `input`.
"""
padding, explicit_paddings = convert_padding(padding)
return gen_nn_ops.conv2d_backprop_filter(
input, filter_sizes, out_backprop, strides, padding, use_cudnn_on_gpu,
explicit_paddings, data_format, dilations, name)
@tf_export(v1=["nn.conv2d_backprop_input"])
@dispatch.add_dispatch_support
def conv2d_backprop_input( # pylint: disable=redefined-builtin,dangerous-default-value
input_sizes,
filter=None,
out_backprop=None,
strides=None,
padding=None,
use_cudnn_on_gpu=True,
data_format="NHWC",
dilations=[1, 1, 1, 1],
name=None,
filters=None):
r"""Computes the gradients of convolution with respect to the input.
Args:
input_sizes: A `Tensor` of type `int32`.
An integer vector representing the shape of `input`,
where `input` is a 4-D `[batch, height, width, channels]` tensor.
filter: A `Tensor`. Must be one of the following types:
`half`, `bfloat16`, `float32`, `float64`.
4-D with shape
`[filter_height, filter_width, in_channels, out_channels]`.
out_backprop: A `Tensor`. Must have the same type as `filter`.
4-D with shape `[batch, out_height, out_width, out_channels]`.
Gradients w.r.t. the output of the convolution.
strides: A list of `ints`.
The stride of the sliding window for each dimension of the input
of the convolution. Must be in the same order as the dimension specified
with format.
padding: Either the `string` `"SAME"` or `"VALID"` indicating the type of
padding algorithm to use, or a list indicating the explicit paddings at
the start and end of each dimension. When explicit padding is used and
data_format is `"NHWC"`, this should be in the form `[[0, 0], [pad_top,
pad_bottom], [pad_left, pad_right], [0, 0]]`. When explicit padding used
and data_format is `"NCHW"`, this should be in the form `[[0, 0], [0, 0],
[pad_top, pad_bottom], [pad_left, pad_right]]`.
use_cudnn_on_gpu: An optional `bool`. Defaults to `True`.
data_format: An optional `string` from: `"NHWC", "NCHW"`.
Defaults to `"NHWC"`.
Specify the data format of the input and output data. With the
default format "NHWC", the data is stored in the order of:
[batch, in_height, in_width, in_channels].
Alternatively, the format could be "NCHW", the data storage order of:
[batch, in_channels, in_height, in_width].
dilations: An optional list of `ints`. Defaults to `[1, 1, 1, 1]`.
1-D tensor of length 4. The dilation factor for each dimension of
`input`. If set to k > 1, there will be k-1 skipped cells between each
filter element on that dimension. The dimension order is determined by
the value of `data_format`, see above for details. Dilations in the batch
and depth dimensions must be 1.
name: A name for the operation (optional).
filters: Alias for filter.
Returns:
A `Tensor`. Has the same type as `filter`.
"""
filter = deprecation.deprecated_argument_lookup(
"filters", filters, "filter", filter)
padding, explicit_paddings = convert_padding(padding)
return gen_nn_ops.conv2d_backprop_input(
input_sizes, filter, out_backprop, strides, padding, use_cudnn_on_gpu,
explicit_paddings, data_format, dilations, name)
@tf_export(v1=["nn.conv2d_transpose"])
@dispatch.add_dispatch_support
def conv2d_transpose(
value=None,
filter=None, # pylint: disable=redefined-builtin
output_shape=None,
strides=None,
padding="SAME",
data_format="NHWC",
name=None,
input=None, # pylint: disable=redefined-builtin
filters=None,
dilations=None):
"""The transpose of `conv2d`.
This operation is sometimes called "deconvolution" after
(Zeiler et al., 2010), but is really the transpose (gradient) of `conv2d`
rather than an actual deconvolution.
Args:
value: A 4-D `Tensor` of type `float` and shape
`[batch, height, width, in_channels]` for `NHWC` data format or
`[batch, in_channels, height, width]` for `NCHW` data format.
filter: A 4-D `Tensor` with the same type as `value` and shape
`[height, width, output_channels, in_channels]`. `filter`'s
`in_channels` dimension must match that of `value`.
output_shape: A 1-D `Tensor` representing the output shape of the
deconvolution op.
strides: An int or list of `ints` that has length `1`, `2` or `4`. The
stride of the sliding window for each dimension of `input`. If a single
value is given it is replicated in the `H` and `W` dimension. By default
the `N` and `C` dimensions are set to 0. The dimension order is determined
by the value of `data_format`, see below for details.
padding: A string, either `'VALID'` or `'SAME'`. The padding algorithm.
See the "returns" section of `tf.nn.convolution` for details.
data_format: A string. 'NHWC' and 'NCHW' are supported.
name: Optional name for the returned tensor.
input: Alias for value.
filters: Alias for filter.
dilations: An int or list of `ints` that has length `1`, `2` or `4`,
defaults to 1. The dilation factor for each dimension of`input`. If a
single value is given it is replicated in the `H` and `W` dimension. By
default the `N` and `C` dimensions are set to 1. If set to k > 1, there
will be k-1 skipped cells between each filter element on that dimension.
The dimension order is determined by the value of `data_format`, see above
for details. Dilations in the batch and depth dimensions if a 4-d tensor
must be 1.
Returns:
A `Tensor` with the same type as `value`.
Raises:
ValueError: If input/output depth does not match `filter`'s shape, or if
padding is other than `'VALID'` or `'SAME'`.
References:
Deconvolutional Networks:
[Zeiler et al., 2010]
(https://ieeexplore.ieee.org/abstract/document/5539957)
([pdf]
(http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.232.4023&rep=rep1&type=pdf))
"""
value = deprecated_argument_lookup("input", input, "value", value)
filter = deprecated_argument_lookup("filters", filters, "filter", filter)
with ops.name_scope(name, "conv2d_transpose",
[value, filter, output_shape]) as name:
return conv2d_transpose_v2(
value,
filter,
output_shape,
strides,
padding=padding,
data_format=data_format,
dilations=dilations,
name=name)
@tf_export("nn.conv2d_transpose", v1=[])
@dispatch.add_dispatch_support
def conv2d_transpose_v2(
input, # pylint: disable=redefined-builtin
filters, # pylint: disable=redefined-builtin
output_shape,
strides,
padding="SAME",
data_format="NHWC",
dilations=None,
name=None):
"""The transpose of `conv2d`.
This operation is sometimes called "deconvolution" after
(Zeiler et al., 2010), but is really the transpose (gradient) of
`atrous_conv2d` rather than an actual deconvolution.
Args:
input: A 4-D `Tensor` of type `float` and shape `[batch, height, width,
in_channels]` for `NHWC` data format or `[batch, in_channels, height,
width]` for `NCHW` data format.
filters: A 4-D `Tensor` with the same type as `input` and shape `[height,
width, output_channels, in_channels]`. `filter`'s `in_channels` dimension
must match that of `input`.
output_shape: A 1-D `Tensor` representing the output shape of the
deconvolution op.
strides: An int or list of `ints` that has length `1`, `2` or `4`. The
stride of the sliding window for each dimension of `input`. If a single
value is given it is replicated in the `H` and `W` dimension. By default
the `N` and `C` dimensions are set to 0. The dimension order is determined
by the value of `data_format`, see below for details.
padding: Either the `string` `"SAME"` or `"VALID"` indicating the type of
padding algorithm to use, or a list indicating the explicit paddings at
the start and end of each dimension. When explicit padding is used and
data_format is `"NHWC"`, this should be in the form `[[0, 0], [pad_top,
pad_bottom], [pad_left, pad_right], [0, 0]]`. When explicit padding used
and data_format is `"NCHW"`, this should be in the form `[[0, 0], [0, 0],
[pad_top, pad_bottom], [pad_left, pad_right]]`.
data_format: A string. 'NHWC' and 'NCHW' are supported.
dilations: An int or list of `ints` that has length `1`, `2` or `4`,
defaults to 1. The dilation factor for each dimension of`input`. If a
single value is given it is replicated in the `H` and `W` dimension. By
default the `N` and `C` dimensions are set to 1. If set to k > 1, there
will be k-1 skipped cells between each filter element on that dimension.
The dimension order is determined by the value of `data_format`, see above
for details. Dilations in the batch and depth dimensions if a 4-d tensor
must be 1.
name: Optional name for the returned tensor.
Returns:
A `Tensor` with the same type as `input`.
Raises:
ValueError: If input/output depth does not match `filter`'s shape, or if
padding is other than `'VALID'` or `'SAME'`.
References:
Deconvolutional Networks:
[Zeiler et al., 2010]
(https://ieeexplore.ieee.org/abstract/document/5539957)
([pdf]
(http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.232.4023&rep=rep1&type=pdf))
"""
with ops.name_scope(name, "conv2d_transpose",
[input, filter, output_shape]) as name:
if data_format is None:
data_format = "NHWC"
channel_index = 1 if data_format.startswith("NC") else 3
strides = _get_sequence(strides, 2, channel_index, "strides")
dilations = _get_sequence(dilations, 2, channel_index, "dilations")
padding, explicit_paddings = convert_padding(padding)
return gen_nn_ops.conv2d_backprop_input(
input_sizes=output_shape,
filter=filters,
out_backprop=input,
strides=strides,
padding=padding,
explicit_paddings=explicit_paddings,
data_format=data_format,
dilations=dilations,
name=name)
def _conv2d_expanded_batch(
input, # pylint: disable=redefined-builtin
filters,
strides,
padding,
data_format,
dilations,
name):
"""Helper function for `convolution_internal`; handles expanded batches."""
# Try really hard to avoid modifying the legacy name scopes - return early.
input_rank = input.shape.rank
if input_rank is None or input_rank < 5:
# We avoid calling squeeze_batch_dims to reduce extra python function
# call slowdown in eager mode. This branch doesn't require reshapes.
return gen_nn_ops.conv2d(
input,
filter=filters,
strides=strides,
padding=padding,
data_format=data_format,
dilations=dilations,
name=name)
return squeeze_batch_dims(
input,
functools.partial(
gen_nn_ops.conv2d,
filter=filters,
strides=strides,
padding=padding,
data_format=data_format,
dilations=dilations),
inner_rank=3,
name=name)
@tf_export("nn.atrous_conv2d_transpose")
@dispatch.add_dispatch_support
def atrous_conv2d_transpose(value,
filters,
output_shape,
rate,
padding,
name=None):
"""The transpose of `atrous_conv2d`.
This operation is sometimes called "deconvolution" after
(Zeiler et al., 2010), but is really the transpose (gradient) of
`atrous_conv2d` rather than an actual deconvolution.
Args:
value: A 4-D `Tensor` of type `float`. It needs to be in the default `NHWC`
format. Its shape is `[batch, in_height, in_width, in_channels]`.
filters: A 4-D `Tensor` with the same type as `value` and shape
`[filter_height, filter_width, out_channels, in_channels]`. `filters`'
`in_channels` dimension must match that of `value`. Atrous convolution is
equivalent to standard convolution with upsampled filters with effective
height `filter_height + (filter_height - 1) * (rate - 1)` and effective
width `filter_width + (filter_width - 1) * (rate - 1)`, produced by
inserting `rate - 1` zeros along consecutive elements across the
`filters`' spatial dimensions.
output_shape: A 1-D `Tensor` of shape representing the output shape of the
deconvolution op.
rate: A positive int32. The stride with which we sample input values across
the `height` and `width` dimensions. Equivalently, the rate by which we
upsample the filter values by inserting zeros across the `height` and
`width` dimensions. In the literature, the same parameter is sometimes
called `input stride` or `dilation`.
padding: A string, either `'VALID'` or `'SAME'`. The padding algorithm.
name: Optional name for the returned tensor.
Returns:
A `Tensor` with the same type as `value`.
Raises:
ValueError: If input/output depth does not match `filters`' shape, or if
padding is other than `'VALID'` or `'SAME'`, or if the `rate` is less
than one, or if the output_shape is not a tensor with 4 elements.
References:
Deconvolutional Networks:
[Zeiler et al., 2010]
(https://ieeexplore.ieee.org/abstract/document/5539957)
([pdf]
(http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.232.4023&rep=rep1&type=pdf))
"""
with ops.name_scope(name, "atrous_conv2d_transpose",
[value, filters, output_shape]) as name:
value = ops.convert_to_tensor(value, name="value")
filters = ops.convert_to_tensor(filters, name="filters")
if not value.get_shape().dims[3].is_compatible_with(filters.get_shape()[3]):
raise ValueError(
"value's input channels does not match filters' input channels, "
"{} != {}".format(value.get_shape()[3],
filters.get_shape()[3]))
if rate < 1:
raise ValueError("rate {} cannot be less than one".format(rate))
if rate == 1:
return conv2d_transpose(
value,
filters,
output_shape,
strides=[1, 1, 1, 1],
padding=padding,
data_format="NHWC")
output_shape_ = ops.convert_to_tensor(output_shape, name="output_shape")
if not output_shape_.get_shape().is_compatible_with(
tensor_shape.TensorShape([4])):
raise ValueError("output_shape must have shape (4,), got {}".format(
output_shape_.get_shape()))
if isinstance(output_shape, tuple):
output_shape = list(output_shape)
if isinstance(output_shape, (list, np.ndarray)):
# output_shape's shape should be == [4] if reached this point.
if not filters.get_shape().dims[2].is_compatible_with(output_shape[3]):
raise ValueError(
"output_shape does not match filter's output channels, "
"{} != {}".format(output_shape[3],
filters.get_shape()[2]))
# We have two padding contributions. The first is used for converting "SAME"
# to "VALID". The second is required so that the height and width of the
# zero-padded value tensor are multiples of rate.
# Padding required to reduce to "VALID" convolution
if padding == "SAME":
# Handle filters whose shape is unknown during graph creation.
if filters.get_shape().is_fully_defined():
filter_shape = filters.get_shape().as_list()
else:
filter_shape = array_ops.shape(filters)
filter_height, filter_width = filter_shape[0], filter_shape[1]
# Spatial dimensions of the filters and the upsampled filters in which we
# introduce (rate - 1) zeros between consecutive filter values.
filter_height_up = filter_height + (filter_height - 1) * (rate - 1)
filter_width_up = filter_width + (filter_width - 1) * (rate - 1)
pad_height = filter_height_up - 1
pad_width = filter_width_up - 1
# When pad_height (pad_width) is odd, we pad more to bottom (right),
# following the same convention as conv2d().
pad_top = pad_height // 2
pad_bottom = pad_height - pad_top
pad_left = pad_width // 2
pad_right = pad_width - pad_left
elif padding == "VALID":
pad_top = 0
pad_bottom = 0
pad_left = 0
pad_right = 0
else:
raise ValueError("padding must be either VALID or SAME:"
" {}".format(padding))
in_height = output_shape[1] + pad_top + pad_bottom
in_width = output_shape[2] + pad_left + pad_right
# More padding so that rate divides the height and width of the input.
pad_bottom_extra = (rate - in_height % rate) % rate
pad_right_extra = (rate - in_width % rate) % rate
# The paddings argument to space_to_batch is just the extra padding
# component.
space_to_batch_pad = [[0, pad_bottom_extra], [0, pad_right_extra]]
value = array_ops.space_to_batch(
input=value, paddings=space_to_batch_pad, block_size=rate)
input_sizes = [
rate * rate * output_shape[0], (in_height + pad_bottom_extra) // rate,
(in_width + pad_right_extra) // rate, output_shape[3]
]
value = gen_nn_ops.conv2d_backprop_input(
input_sizes=input_sizes,
filter=filters,
out_backprop=value,
strides=[1, 1, 1, 1],
padding="VALID",
data_format="NHWC")
# The crops argument to batch_to_space includes both padding components.
batch_to_space_crop = [[pad_top, pad_bottom + pad_bottom_extra],
[pad_left, pad_right + pad_right_extra]]
return array_ops.batch_to_space(
input=value, crops=batch_to_space_crop, block_size=rate)
@tf_export(v1=["nn.depthwise_conv2d_native"])
@dispatch.add_dispatch_support
@deprecation.deprecated_endpoints("nn.depthwise_conv2d_native")
def depthwise_conv2d_native( # pylint: disable=redefined-builtin,dangerous-default-value
input,
filter,
strides,
padding,
data_format="NHWC",
dilations=[1, 1, 1, 1],
name=None):
r"""Computes a 2-D depthwise convolution.
Given an input tensor of shape `[batch, in_height, in_width, in_channels]`
and a filter / kernel tensor of shape
`[filter_height, filter_width, in_channels, channel_multiplier]`, containing
`in_channels` convolutional filters of depth 1, `depthwise_conv2d` applies
a different filter to each input channel (expanding from 1 channel to
`channel_multiplier` channels for each), then concatenates the results
together. Thus, the output has `in_channels * channel_multiplier` channels.
```
for k in 0..in_channels-1
for q in 0..channel_multiplier-1
output[b, i, j, k * channel_multiplier + q] =
sum_{di, dj} input[b, strides[1] * i + di, strides[2] * j + dj, k] *
filter[di, dj, k, q]
```
Must have `strides[0] = strides[3] = 1`. For the most common case of the same
horizontal and vertices strides, `strides = [1, stride, stride, 1]`.
Args:
input: A `Tensor`. Must be one of the following types: `half`, `bfloat16`,
`float32`, `float64`.
filter: A `Tensor`. Must have the same type as `input`.
strides: A list of `ints`. 1-D of length 4. The stride of the sliding
window for each dimension of `input`.
padding: Controls how to pad the image before applying the convolution. Can
be the string `"SAME"` or `"VALID"` indicating the type of padding
algorithm to use, or a list indicating the explicit paddings at the start
and end of each dimension. When explicit padding is used and data_format
is `"NHWC"`, this should be in the form `[[0, 0], [pad_top, pad_bottom],
[pad_left, pad_right], [0, 0]]`. When explicit padding used and
data_format is `"NCHW"`, this should be in the form `[[0, 0], [0, 0],
[pad_top, pad_bottom], [pad_left, pad_right]]`.
data_format: An optional `string` from: `"NHWC", "NCHW"`. Defaults to
`"NHWC"`. Specify the data format of the input and output data. With the
default format "NHWC", the data is stored in the order of: [batch, height,
width, channels].
Alternatively, the format could be "NCHW", the data storage order of:
[batch, channels, height, width].
dilations: An optional list of `ints`. Defaults to `[1, 1, 1, 1]`. 1-D
tensor of length 4. The dilation factor for each dimension of `input`. If
set to k > 1, there will be k-1 skipped cells between each filter element
on that dimension. The dimension order is determined by the value of
`data_format`, see above for details. Dilations in the batch and depth
dimensions must be 1.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `input`.
"""
padding, explicit_paddings = convert_padding(padding)
return gen_nn_ops.depthwise_conv2d_native(
input,
filter,
strides,
padding,
explicit_paddings=explicit_paddings,
data_format=data_format,
dilations=dilations,
name=name)
@tf_export(
"nn.depthwise_conv2d_backprop_input",
v1=[
"nn.depthwise_conv2d_native_backprop_input",
"nn.depthwise_conv2d_backprop_input"
])
@dispatch.add_dispatch_support
@deprecation.deprecated_endpoints("nn.depthwise_conv2d_native_backprop_input")
def depthwise_conv2d_native_backprop_input( # pylint: disable=redefined-builtin,dangerous-default-value
input_sizes,
filter,
out_backprop,
strides,
padding,
data_format="NHWC",
dilations=[1, 1, 1, 1],
name=None):
r"""Computes the gradients of depthwise convolution with respect to the input.
Args:
input_sizes: A `Tensor` of type `int32`. An integer vector representing the
shape of `input`, based on `data_format`. For example, if `data_format`
is 'NHWC' then `input` is a 4-D `[batch, height, width, channels]` tensor.
filter: A `Tensor`. Must be one of the following types: `half`, `bfloat16`,
`float32`, `float64`. 4-D with shape `[filter_height, filter_width,
in_channels, depthwise_multiplier]`.
out_backprop: A `Tensor`. Must have the same type as `filter`. 4-D with
shape based on `data_format`. For example, if `data_format` is 'NHWC'
then out_backprop shape is `[batch, out_height, out_width, out_channels]`.
Gradients w.r.t. the output of the convolution.
strides: A list of `ints`. The stride of the sliding window for each
dimension of the input of the convolution.
padding: Controls how to pad the image before applying the convolution. Can
be the string `"SAME"` or `"VALID"` indicating the type of padding
algorithm to use, or a list indicating the explicit paddings at the start
and end of each dimension. When explicit padding is used and data_format
is `"NHWC"`, this should be in the form `[[0, 0], [pad_top, pad_bottom],
[pad_left, pad_right], [0, 0]]`. When explicit padding used and
data_format is `"NCHW"`, this should be in the form `[[0, 0], [0, 0],
[pad_top, pad_bottom], [pad_left, pad_right]]`.
data_format: An optional `string` from: `"NHWC", "NCHW"`. Defaults to
`"NHWC"`. Specify the data format of the input and output data. With the
default format "NHWC", the data is stored in the order of: [batch, height,
width, channels].
Alternatively, the format could be "NCHW", the data storage order of:
[batch, channels, height, width].
dilations: An optional list of `ints`. Defaults to `[1, 1, 1, 1]`. 1-D
tensor of length 4. The dilation factor for each dimension of `input`. If
set to k > 1, there will be k-1 skipped cells between each filter element
on that dimension. The dimension order is determined by the value of
`data_format`, see above for details. Dilations in the batch and depth
dimensions must be 1.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `filter`.
"""
padding, explicit_paddings = convert_padding(padding)
return gen_nn_ops.depthwise_conv2d_native_backprop_input(
input_sizes,
filter,
out_backprop,
strides,
padding,
explicit_paddings=explicit_paddings,
data_format=data_format,
dilations=dilations,
name=name)
@tf_export(
"nn.depthwise_conv2d_backprop_filter",
v1=[
"nn.depthwise_conv2d_native_backprop_filter",
"nn.depthwise_conv2d_backprop_filter"
])
@dispatch.add_dispatch_support
@deprecation.deprecated_endpoints("nn.depthwise_conv2d_native_backprop_filter")
def depthwise_conv2d_native_backprop_filter( # pylint: disable=redefined-builtin,dangerous-default-value
input,
filter_sizes,
out_backprop,
strides,
padding,
data_format="NHWC",
dilations=[1, 1, 1, 1],
name=None):
r"""Computes the gradients of depthwise convolution with respect to the filter.
Args:
input: A `Tensor`. Must be one of the following types: `half`, `bfloat16`,
`float32`, `float64`. 4-D with shape based on `data_format`. For example,
if `data_format` is 'NHWC' then `input` is a 4-D `[batch, in_height,
in_width, in_channels]` tensor.
filter_sizes: A `Tensor` of type `int32`. An integer vector representing the
tensor shape of `filter`, where `filter` is a 4-D `[filter_height,
filter_width, in_channels, depthwise_multiplier]` tensor.
out_backprop: A `Tensor`. Must have the same type as `input`. 4-D with shape
based on `data_format`. For example, if `data_format` is 'NHWC' then
out_backprop shape is `[batch, out_height, out_width, out_channels]`.
Gradients w.r.t. the output of the convolution.
strides: A list of `ints`. The stride of the sliding window for each
dimension of the input of the convolution.
padding: Controls how to pad the image before applying the convolution. Can
be the string `"SAME"` or `"VALID"` indicating the type of padding
algorithm to use, or a list indicating the explicit paddings at the start
and end of each dimension. When explicit padding is used and data_format
is `"NHWC"`, this should be in the form `[[0, 0], [pad_top, pad_bottom],
[pad_left, pad_right], [0, 0]]`. When explicit padding used and
data_format is `"NCHW"`, this should be in the form `[[0, 0], [0, 0],
[pad_top, pad_bottom], [pad_left, pad_right]]`.
data_format: An optional `string` from: `"NHWC", "NCHW"`. Defaults to
`"NHWC"`. Specify the data format of the input and output data. With the
default format "NHWC", the data is stored in the order of: [batch, height,
width, channels].
Alternatively, the format could be "NCHW", the data storage order of:
[batch, channels, height, width].
dilations: An optional list of `ints`. Defaults to `[1, 1, 1, 1]`. 1-D
tensor of length 4. The dilation factor for each dimension of `input`. If
set to k > 1, there will be k-1 skipped cells between each filter element
on that dimension. The dimension order is determined by the value of
`data_format`, see above for details. Dilations in the batch and depth
dimensions must be 1.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `input`.
"""
padding, explicit_paddings = convert_padding(padding)
return gen_nn_ops.depthwise_conv2d_native_backprop_filter(
input,
filter_sizes,
out_backprop,
strides,
padding,
explicit_paddings=explicit_paddings,
data_format=data_format,
dilations=dilations,
name=name)
def _conv3d_expanded_batch(
input, # pylint: disable=redefined-builtin
filter, # pylint: disable=redefined-builtin
strides,
padding,
data_format,
dilations=None,
name=None):
"""Helper function for `conv3d`; handles expanded batches."""
shape = input.shape
# shape object may lack ndims, e.g., if input is an np.ndarray. In that case,
# we fall back to len(shape).
ndims = getattr(shape, "ndims", -1)
if ndims == -1:
ndims = len(shape)
if ndims in (5, 4, 3, 2, 1, 0, None):
# We avoid calling squeeze_batch_dims to reduce extra python function
# call slowdown in eager mode. This branch doesn't require reshapes.
return gen_nn_ops.conv3d(
input,
filter,
strides,
padding,
data_format=data_format,
dilations=dilations,
name=name)
else:
return squeeze_batch_dims(
input,
functools.partial(
gen_nn_ops.conv3d,
filter=filter,
strides=strides,
padding=padding,
data_format=data_format,
dilations=dilations),
inner_rank=4,
name=name)
@tf_export("nn.conv3d", v1=[])
@dispatch.add_dispatch_support
def conv3d_v2(input, # pylint: disable=redefined-builtin,missing-docstring
filters,
strides,
padding,
data_format="NDHWC",
dilations=None,
name=None):
if dilations is None:
dilations = [1, 1, 1, 1, 1]
return _conv3d_expanded_batch(input, filters, strides, padding, data_format,
dilations, name)
@tf_export(v1=["nn.conv3d"])
@dispatch.add_dispatch_support
def conv3d_v1( # pylint: disable=missing-docstring,dangerous-default-value
input, # pylint: disable=redefined-builtin
filter=None, # pylint: disable=redefined-builtin
strides=None,
padding=None,
data_format="NDHWC",
dilations=[1, 1, 1, 1, 1],
name=None,
filters=None):
filter = deprecated_argument_lookup("filters", filters, "filter", filter)
return gen_nn_ops.conv3d(
input, filter, strides, padding, data_format, dilations, name)
conv3d_v2.__doc__ = deprecation.rewrite_argument_docstring(
gen_nn_ops.conv3d.__doc__, "filter", "filters")
conv3d_v1.__doc__ = gen_nn_ops.conv3d.__doc__
@tf_export(v1=["nn.conv3d_transpose"])
@dispatch.add_dispatch_support
def conv3d_transpose(
value,
filter=None, # pylint: disable=redefined-builtin
output_shape=None,
strides=None,
padding="SAME",
data_format="NDHWC",
name=None,
input=None, # pylint: disable=redefined-builtin
filters=None,
dilations=None):
"""The transpose of `conv3d`.
This operation is sometimes called "deconvolution" after
(Zeiler et al., 2010), but is really the transpose (gradient) of `conv3d`
rather than an actual deconvolution.
Args:
value: A 5-D `Tensor` of type `float` and shape
`[batch, depth, height, width, in_channels]`.
filter: A 5-D `Tensor` with the same type as `value` and shape
`[depth, height, width, output_channels, in_channels]`. `filter`'s
`in_channels` dimension must match that of `value`.
output_shape: A 1-D `Tensor` representing the output shape of the
deconvolution op.
strides: A list of ints. The stride of the sliding window for each
dimension of the input tensor.
padding: A string, either `'VALID'` or `'SAME'`. The padding algorithm.
See the "returns" section of `tf.nn.convolution` for details.
data_format: A string, either `'NDHWC'` or `'NCDHW`' specifying the layout
of the input and output tensors. Defaults to `'NDHWC'`.
name: Optional name for the returned tensor.
input: Alias of value.
filters: Alias of filter.
dilations: An int or list of `ints` that has length `1`, `3` or `5`,
defaults to 1. The dilation factor for each dimension of`input`. If a
single value is given it is replicated in the `D`, `H` and `W` dimension.
By default the `N` and `C` dimensions are set to 1. If set to k > 1, there
will be k-1 skipped cells between each filter element on that dimension.
The dimension order is determined by the value of `data_format`, see above
for details. Dilations in the batch and depth dimensions if a 5-d tensor
must be 1.
Returns:
A `Tensor` with the same type as `value`.
Raises:
ValueError: If input/output depth does not match `filter`'s shape, or if
padding is other than `'VALID'` or `'SAME'`.
References:
Deconvolutional Networks:
[Zeiler et al., 2010]
(https://ieeexplore.ieee.org/abstract/document/5539957)
([pdf]
(http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.232.4023&rep=rep1&type=pdf))
"""
filter = deprecated_argument_lookup("filters", filters, "filter", filter)
value = deprecated_argument_lookup("input", input, "value", value)
return conv3d_transpose_v2(
value,
filter,
output_shape,
strides,
padding=padding,
data_format=data_format,
dilations=dilations,
name=name)
@tf_export("nn.conv3d_transpose", v1=[])
@dispatch.add_dispatch_support
def conv3d_transpose_v2(input, # pylint: disable=redefined-builtin
filters,
output_shape,
strides,
padding="SAME",
data_format="NDHWC",
dilations=None,
name=None):
"""The transpose of `conv3d`.
This operation is sometimes called "deconvolution" after
(Zeiler et al., 2010), but is really the transpose (gradient) of `conv3d`
rather than an actual deconvolution.
Args:
input: A 5-D `Tensor` of type `float` and shape `[batch, depth, height,
width, in_channels]` for `NDHWC` data format or `[batch, in_channels,
depth, height, width]` for `NCDHW` data format.
filters: A 5-D `Tensor` with the same type as `input` and shape `[depth,
height, width, output_channels, in_channels]`. `filter`'s `in_channels`
dimension must match that of `input`.
output_shape: A 1-D `Tensor` representing the output shape of the
deconvolution op.
strides: An int or list of `ints` that has length `1`, `3` or `5`. The
stride of the sliding window for each dimension of `input`. If a single
value is given it is replicated in the `D`, `H` and `W` dimension. By
default the `N` and `C` dimensions are set to 0. The dimension order is
determined by the value of `data_format`, see below for details.
padding: A string, either `'VALID'` or `'SAME'`. The padding algorithm. See
the "returns" section of `tf.nn.convolution` for details.
data_format: A string. 'NDHWC' and 'NCDHW' are supported.
dilations: An int or list of `ints` that has length `1`, `3` or `5`,
defaults to 1. The dilation factor for each dimension of`input`. If a
single value is given it is replicated in the `D`, `H` and `W` dimension.
By default the `N` and `C` dimensions are set to 1. If set to k > 1, there
will be k-1 skipped cells between each filter element on that dimension.
The dimension order is determined by the value of `data_format`, see above
for details. Dilations in the batch and depth dimensions if a 5-d tensor
must be 1.
name: Optional name for the returned tensor.
Returns:
A `Tensor` with the same type as `input`.
References:
Deconvolutional Networks:
[Zeiler et al., 2010]
(https://ieeexplore.ieee.org/abstract/document/5539957)
([pdf]
(http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.232.4023&rep=rep1&type=pdf))
"""
with ops.name_scope(name, "conv3d_transpose",
[input, filter, output_shape]) as name:
if data_format is None:
data_format = "NDHWC"
channel_index = 1 if data_format.startswith("NC") else 4
strides = _get_sequence(strides, 3, channel_index, "strides")
dilations = _get_sequence(dilations, 3, channel_index, "dilations")
return gen_nn_ops.conv3d_backprop_input_v2(
input_sizes=output_shape,
filter=filters,
out_backprop=input,
strides=strides,
padding=padding,
data_format=data_format,
dilations=dilations,
name=name)
CONV_TRANSPOSE_OPS = (
conv1d_transpose,
conv2d_transpose_v2,
conv3d_transpose_v2,
)
@tf_export("nn.conv_transpose")
@dispatch.add_dispatch_support
def conv_transpose(input, # pylint: disable=redefined-builtin
filters,
output_shape,
strides,
padding="SAME",
data_format=None,
dilations=None,
name=None):
"""The transpose of `convolution`.
This operation is sometimes called "deconvolution" after
(Zeiler et al., 2010), but is really the transpose (gradient) of `conv3d`
rather than an actual deconvolution.
Args:
input: An N+2 dimensional `Tensor` of shape
`[batch_size] + input_spatial_shape + [in_channels]` if data_format does
not start with "NC" (default), or
`[batch_size, in_channels] + input_spatial_shape` if data_format starts
with "NC". It must be one of the following types:
`half`, `bfloat16`, `float32`, `float64`.
filters: An N+2 dimensional `Tensor` with the same type as `input` and
shape `spatial_filter_shape + [in_channels, out_channels]`.
output_shape: A 1-D `Tensor` representing the output shape of the
deconvolution op.
strides: An int or list of `ints` that has length `1`, `N` or `N+2`. The
stride of the sliding window for each dimension of `input`. If a single
value is given it is replicated in the spatial dimensions. By default
the `N` and `C` dimensions are set to 0. The dimension order is determined
by the value of `data_format`, see below for details.
padding: A string, either `'VALID'` or `'SAME'`. The padding algorithm. See
the "returns" section of `tf.nn.convolution` for details.
data_format: A string or None. Specifies whether the channel dimension of
the `input` and output is the last dimension (default, or if `data_format`
does not start with "NC"), or the second dimension (if `data_format`
starts with "NC"). For N=1, the valid values are "NWC" (default) and
"NCW". For N=2, the valid values are "NHWC" (default) and "NCHW".
For N=3, the valid values are "NDHWC" (default) and "NCDHW".
dilations: An int or list of `ints` that has length `1`, `N` or `N+2`,
defaults to 1. The dilation factor for each dimension of`input`. If a
single value is given it is replicated in the spatial dimensions. By
default the `N` and `C` dimensions are set to 1. If set to k > 1, there
will be k-1 skipped cells between each filter element on that dimension.
The dimension order is determined by the value of `data_format`, see above
for details.
name: A name for the operation (optional). If not specified "conv_transpose"
is used.
Returns:
A `Tensor` with the same type as `value`.
References:
Deconvolutional Networks:
[Zeiler et al., 2010]
(https://ieeexplore.ieee.org/abstract/document/5539957)
([pdf]
(http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.232.4023&rep=rep1&type=pdf))
"""
with ops.name_scope(name, "conv_transpose",
[input, filter, output_shape]) as name:
if tensor_util.is_tf_type(output_shape):
n = output_shape.shape[0] - 2
elif isinstance(output_shape, collections_abc.Sized):
n = len(output_shape) - 2
else:
raise ValueError("output_shape must be a tensor or sized collection.")
if not 1 <= n <= 3:
raise ValueError(
"output_shape must be of length 3, 4 or 5 but was {}.".format(n + 2))
op = CONV_TRANSPOSE_OPS[n-1]
return op(
input,
filters,
output_shape,
strides,
padding=padding,
data_format=data_format,
dilations=dilations,
name=name)
def _tf_deterministic_ops():
if _tf_deterministic_ops.value is None:
tf_deterministic_ops = os.environ.get("TF_DETERMINISTIC_OPS")
if tf_deterministic_ops is not None:
tf_deterministic_ops = tf_deterministic_ops.lower()
_tf_deterministic_ops.value = (
tf_deterministic_ops == "true" or tf_deterministic_ops == "1")
return _tf_deterministic_ops.value
_tf_deterministic_ops.value = None
@tf_export("nn.bias_add")
@dispatch.add_dispatch_support
def bias_add(value, bias, data_format=None, name=None):
"""Adds `bias` to `value`.
This is (mostly) a special case of `tf.add` where `bias` is restricted to 1-D.
Broadcasting is supported, so `value` may have any number of dimensions.
Unlike `tf.add`, the type of `bias` is allowed to differ from `value` in the
case where both types are quantized.
Args:
value: A `Tensor` with type `float`, `double`, `int64`, `int32`, `uint8`,
`int16`, `int8`, `complex64`, or `complex128`.
bias: A 1-D `Tensor` with size matching the channel dimension of `value`.
Must be the same type as `value` unless `value` is a quantized type,
in which case a different quantized type may be used.
data_format: A string. 'N...C' and 'NC...' are supported. If `None` (the
default) is specified then 'N..C' is assumed.
name: A name for the operation (optional).
Returns:
A `Tensor` with the same type as `value`.
Raises:
ValueError if data format is unrecognized, if `value` has less than two
dimensions when `data_format` is 'N..C'/`None` or `value` has less
then three dimensions when `data_format` is `NC..`, if `bias` does not
have exactly one dimension (is a vector), or if the size of `bias`
does not match the size of the channel dimension of `value`.
"""
with ops.name_scope(name, "BiasAdd", [value, bias]) as name:
if data_format is not None:
if data_format.startswith("NC"):
data_format = "NCHW"
elif data_format.startswith("N") and data_format.endswith("C"):
data_format = "NHWC"
else:
raise ValueError("data_format must be of the form `N...C` or `NC...`")
if not context.executing_eagerly():
value = ops.convert_to_tensor(value, name="input")
bias = ops.convert_to_tensor(bias, dtype=value.dtype, name="bias")
# TODO(duncanriach): Implement deterministic functionality at CUDA kernel
# level.
if _tf_deterministic_ops():
# Note that this code does not implement the same error checks as the
# pre-existing C++ ops.
if data_format == "NCHW":
broadcast_shape_head = [1, array_ops.size(bias)]
broadcast_shape_tail = array_ops.ones(
array_ops.rank(value) - 2, dtype=dtypes.int32)
broadcast_shape = array_ops.concat(
[broadcast_shape_head, broadcast_shape_tail], 0)
return math_ops.add(
value, array_ops.reshape(bias, broadcast_shape), name=name)
else: # data_format == 'NHWC' or data_format == None
return math_ops.add(value, bias, name=name)
else:
return gen_nn_ops.bias_add(
value, bias, data_format=data_format, name=name)
def bias_add_v1(value, bias, name=None):
"""Adds `bias` to `value`.
This is a deprecated version of bias_add and will soon to be removed.
This is (mostly) a special case of `tf.add` where `bias` is restricted to 1-D.
Broadcasting is supported, so `value` may have any number of dimensions.
Unlike `tf.add`, the type of `bias` is allowed to differ from `value` in the
case where both types are quantized.
Args:
value: A `Tensor` with type `float`, `double`, `int64`, `int32`, `uint8`,
`int16`, `int8`, `complex64`, or `complex128`.
bias: A 1-D `Tensor` with size matching the last dimension of `value`.
Must be the same type as `value` unless `value` is a quantized type,
in which case a different quantized type may be used.
name: A name for the operation (optional).
Returns:
A `Tensor` with the same type as `value`.
"""
with ops.name_scope(name, "BiasAddV1", [value, bias]) as name:
value = ops.convert_to_tensor(value, name="input")
bias = ops.convert_to_tensor(bias, dtype=value.dtype, name="bias")
return gen_nn_ops.bias_add_v1(value, bias, name=name)
@tf_export(v1=["nn.crelu"])
@dispatch.add_dispatch_support
def crelu(features, name=None, axis=-1):
"""Computes Concatenated ReLU.
Concatenates a ReLU which selects only the positive part of the activation
with a ReLU which selects only the *negative* part of the activation.
Note that as a result this non-linearity doubles the depth of the activations.
Source: [Understanding and Improving Convolutional Neural Networks via
Concatenated Rectified Linear Units. W. Shang, et
al.](https://arxiv.org/abs/1603.05201)
Args:
features: A `Tensor` with type `float`, `double`, `int32`, `int64`, `uint8`,
`int16`, or `int8`.
name: A name for the operation (optional).
axis: The axis that the output values are concatenated along. Default is -1.
Returns:
A `Tensor` with the same type as `features`.
References:
Understanding and Improving Convolutional Neural Networks via Concatenated
Rectified Linear Units:
[Shang et al., 2016](http://proceedings.mlr.press/v48/shang16)
([pdf](http://proceedings.mlr.press/v48/shang16.pdf))
"""
with ops.name_scope(name, "CRelu", [features]) as name:
features = ops.convert_to_tensor(features, name="features")
c = array_ops.concat([features, -features], axis, name=name)
return gen_nn_ops.relu(c)
@tf_export("nn.crelu", v1=[])
@dispatch.add_dispatch_support
def crelu_v2(features, axis=-1, name=None):
return crelu(features, name=name, axis=axis)
crelu_v2.__doc__ = crelu.__doc__
@tf_export("nn.relu6")
@dispatch.add_dispatch_support
def relu6(features, name=None):
"""Computes Rectified Linear 6: `min(max(features, 0), 6)`.
Args:
features: A `Tensor` with type `float`, `double`, `int32`, `int64`, `uint8`,
`int16`, or `int8`.
name: A name for the operation (optional).
Returns:
A `Tensor` with the same type as `features`.
References:
Convolutional Deep Belief Networks on CIFAR-10:
Krizhevsky et al., 2010
([pdf](http://www.cs.utoronto.ca/~kriz/conv-cifar10-aug2010.pdf))
"""
with ops.name_scope(name, "Relu6", [features]) as name:
features = ops.convert_to_tensor(features, name="features")
return gen_nn_ops.relu6(features, name=name)
@tf_export("nn.leaky_relu")
@dispatch.add_dispatch_support
def leaky_relu(features, alpha=0.2, name=None):
"""Compute the Leaky ReLU activation function.
Source: [Rectifier Nonlinearities Improve Neural Network Acoustic Models.
AL Maas, AY Hannun, AY Ng - Proc. ICML, 2013]
(https://ai.stanford.edu/~amaas/papers/relu_hybrid_icml2013_final.pdf).
Args:
features: A `Tensor` representing preactivation values. Must be one of
the following types: `float16`, `float32`, `float64`, `int32`, `int64`.
alpha: Slope of the activation function at x < 0.
name: A name for the operation (optional).
Returns:
The activation value.
References:
Rectifier Nonlinearities Improve Neural Network Acoustic Models:
[Maas et al., 2013]
(http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.693.1422)
([pdf]
(http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.693.1422&rep=rep1&type=pdf))
"""
with ops.name_scope(name, "LeakyRelu", [features, alpha]) as name:
features = ops.convert_to_tensor(features, name="features")
if features.dtype.is_integer:
features = math_ops.cast(features, dtypes.float32)
if isinstance(alpha, np.ndarray):
alpha = alpha.item()
return gen_nn_ops.leaky_relu(features, alpha=alpha, name=name)
@tf_export("nn.gelu", v1=[])
@dispatch.add_dispatch_support
def gelu(features, approximate=False, name=None):
"""Compute the Gaussian Error Linear Unit (GELU) activation function.
Gaussian error linear unit (GELU) computes
`x * P(X <= x)`, where `P(X) ~ N(0, 1)`.
The (GELU) nonlinearity weights inputs by their value, rather than gates
inputs by their sign as in ReLU.
For example:
>>> x = tf.constant([-3.0, -1.0, 0.0, 1.0, 3.0], dtype=tf.float32)
>>> y = tf.nn.gelu(x)
>>> y.numpy()
array([-0.00404951, -0.15865529, 0. , 0.8413447 , 2.9959507 ],
dtype=float32)
>>> y = tf.nn.gelu(x, approximate=True)
>>> y.numpy()
array([-0.00363752, -0.15880796, 0. , 0.841192 , 2.9963627 ],
dtype=float32)
Args:
features: A `Tensor` representing preactivation values.
approximate: An optional `bool`. Defaults to `False`. Whether to enable
approximation.
name: A name for the operation (optional).
Returns:
A `Tensor` with the same type as `features`.
References:
[Gaussian Error Linear Units (GELUs)](https://arxiv.org/abs/1606.08415).
"""
with ops.name_scope(name, "Gelu", [features]):
features = ops.convert_to_tensor(features, name="features")
if approximate:
coeff = math_ops.cast(0.044715, features.dtype)
return 0.5 * features * (
1.0 + math_ops.tanh(0.7978845608028654 *
(features + coeff * math_ops.pow(features, 3))))
else:
return 0.5 * features * (1.0 + math_ops.erf(
features / math_ops.cast(1.4142135623730951, features.dtype)))
def _flatten_outer_dims(logits):
"""Flattens logits' outer dimensions and keep its last dimension."""
rank = array_ops.rank(logits)
last_dim_size = array_ops.slice(
array_ops.shape(logits), [math_ops.subtract(rank, 1)], [1])
output = array_ops.reshape(logits, array_ops.concat([[-1], last_dim_size], 0))
# Set output shape if known.
if not context.executing_eagerly():
shape = logits.get_shape()
if shape is not None and shape.dims is not None:
shape = shape.as_list()
product = 1
product_valid = True
for d in shape[:-1]:
if d is None:
product_valid = False
break
else:
product *= d
if product_valid:
output_shape = [product, shape[-1]]
output.set_shape(output_shape)
return output
def _wrap_2d_function(inputs, compute_op, dim=-1, name=None):
"""Helper function for ops that accept and return 2d inputs of same shape.
It reshapes and transposes the inputs into a 2-D Tensor and then invokes
the given function. The output would be transposed and reshaped back.
If the given function returns a tuple of tensors, each of them will be
transposed and reshaped.
Args:
inputs: A non-empty `Tensor`. Must be one of the following types: `half`,
`float32`, `float64`.
compute_op: The function to wrap. Must accept the input tensor as its first
arugment, and a second keyword argument `name`.
dim: The dimension softmax would be performed on. The default is -1 which
indicates the last dimension.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same shape as inputs. If compute_op returns multiple
tensors, each of them have the same shape as the input.
Raises:
InvalidArgumentError: if `inputs` is empty or `dim` is beyond the last
dimension of `inputs`.
"""
def _swap_axis(input_tensor, dim_index, last_index, name=None):
"""Swaps logits's dim_index and last_index."""
return array_ops.transpose(
input_tensor,
array_ops.concat([
math_ops.range(dim_index), [last_index],
math_ops.range(dim_index + 1, last_index), [dim_index]
], 0),
name=name)
inputs = ops.convert_to_tensor(inputs)
# We need its original shape for shape inference.
shape = inputs.get_shape()
is_last_dim = (dim == -1) or (dim == shape.ndims - 1)
if is_last_dim:
return compute_op(inputs, name=name)
dim_val = dim
if isinstance(dim, ops.Tensor):
dim_val = tensor_util.constant_value(dim)
if dim_val is not None and not -shape.ndims <= dim_val < shape.ndims:
raise errors_impl.InvalidArgumentError(
None, None,
"Dimension (%d) must be in the range [%d, %d) where %d is the number of"
" dimensions in the input." % (dim_val, -shape.ndims, shape.ndims,
shape.ndims))
# If dim is not the last dimension, we have to do a transpose so that we can
# still perform the op on its last dimension.
# In case dim is negative (and is not last dimension -1), add shape.ndims
ndims = array_ops.rank(inputs)
if not isinstance(dim, ops.Tensor):
if dim < 0:
dim += ndims
else:
dim = array_ops.where(math_ops.less(dim, 0), dim + ndims, dim)
# Swap logits' dimension of dim and its last dimension.
input_rank = array_ops.rank(inputs)
dim_axis = dim % shape.ndims
inputs = _swap_axis(inputs, dim_axis, math_ops.subtract(input_rank, 1))
# Do the actual call on its last dimension.
def fix_output(output):
output = _swap_axis(
output, dim_axis, math_ops.subtract(input_rank, 1), name=name)
# Make shape inference work since transpose may erase its static shape.
output.set_shape(shape)
return output
outputs = compute_op(inputs)
if isinstance(outputs, tuple):
return tuple(fix_output(output) for output in outputs)
else:
return fix_output(outputs)
@tf_export(v1=["nn.softmax", "math.softmax"])
@dispatch.add_dispatch_support
@deprecation.deprecated_args(None, "dim is deprecated, use axis instead", "dim")
def softmax(logits, axis=None, name=None, dim=None):
"""Computes softmax activations.
This function performs the equivalent of
softmax = tf.exp(logits) / tf.reduce_sum(tf.exp(logits), axis)
See: https://en.wikipedia.org/wiki/Softmax_function
Example usage:
>>> tf.nn.softmax([-1, 0., 1.])
<tf.Tensor: shape=(3,), dtype=float32,
numpy=array([0.09003057, 0.24472848, 0.66524094], dtype=float32)>
Args:
logits: A non-empty `Tensor`, or an object whose type has a registered
`Tensor` conversion function. Must be one of the following types:
`half`,`float32`, `float64`. See also `convert_to_tensor`
axis: The dimension softmax would be performed on. The default is -1 which
indicates the last dimension.
name: A name for the operation (optional).
dim: Deprecated alias for `axis`.
Returns:
A `Tensor`. Has the same type and shape as `logits`.
Raises:
InvalidArgumentError: if `logits` is empty or `axis` is beyond the last
dimension of `logits`.
TypeError: If no conversion function is registered for `logits` to
Tensor.
RuntimeError: If a registered conversion function returns an invalid
value.
"""
axis = deprecation.deprecated_argument_lookup("axis", axis, "dim", dim)
if axis is None:
axis = -1
return _wrap_2d_function(logits, gen_nn_ops.softmax, axis, name)
@tf_export("nn.softmax", "math.softmax", v1=[])
@dispatch.add_dispatch_support
def softmax_v2(logits, axis=None, name=None):
"""Computes softmax activations.
This function performs the equivalent of
softmax = tf.exp(logits) / tf.reduce_sum(tf.exp(logits), axis)
Args:
logits: A non-empty `Tensor`. Must be one of the following types: `half`,
`float32`, `float64`.
axis: The dimension softmax would be performed on. The default is -1 which
indicates the last dimension.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type and shape as `logits`.
Raises:
InvalidArgumentError: if `logits` is empty or `axis` is beyond the last
dimension of `logits`.
"""
if axis is None:
axis = -1
return _wrap_2d_function(logits, gen_nn_ops.softmax, axis, name)
@tf_export(v1=["nn.log_softmax", "math.log_softmax"])
@dispatch.add_dispatch_support
@deprecation.deprecated_args(None, "dim is deprecated, use axis instead", "dim")
def log_softmax(logits, axis=None, name=None, dim=None):
"""Computes log softmax activations.
For each batch `i` and class `j` we have
logsoftmax = logits - log(reduce_sum(exp(logits), axis))
Args:
logits: A non-empty `Tensor`. Must be one of the following types: `half`,
`float32`, `float64`.
axis: The dimension softmax would be performed on. The default is -1 which
indicates the last dimension.
name: A name for the operation (optional).
dim: Deprecated alias for `axis`.
Returns:
A `Tensor`. Has the same type as `logits`. Same shape as `logits`.
Raises:
InvalidArgumentError: if `logits` is empty or `axis` is beyond the last
dimension of `logits`.
"""
axis = deprecation.deprecated_argument_lookup("axis", axis, "dim", dim)
if axis is None:
axis = -1
return _wrap_2d_function(logits, gen_nn_ops.log_softmax, axis, name)
@tf_export("nn.log_softmax", "math.log_softmax", v1=[])
@dispatch.add_dispatch_support
def log_softmax_v2(logits, axis=None, name=None):
"""Computes log softmax activations.
For each batch `i` and class `j` we have
logsoftmax = logits - log(reduce_sum(exp(logits), axis))
Args:
logits: A non-empty `Tensor`. Must be one of the following types: `half`,
`float32`, `float64`.
axis: The dimension softmax would be performed on. The default is -1 which
indicates the last dimension.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `logits`. Same shape as `logits`.
Raises:
InvalidArgumentError: if `logits` is empty or `axis` is beyond the last
dimension of `logits`.
"""
if axis is None:
axis = -1
return _wrap_2d_function(logits, gen_nn_ops.log_softmax, axis, name)
def _ensure_xent_args(name, sentinel, labels, logits):
# Make sure that all arguments were passed as named arguments.
if sentinel is not None:
raise ValueError("Only call `%s` with "
"named arguments (labels=..., logits=..., ...)" % name)
if labels is None or logits is None:
raise ValueError("Both labels and logits must be provided.")
@tf_export("nn.softmax_cross_entropy_with_logits", v1=[])
@dispatch.add_dispatch_support
def softmax_cross_entropy_with_logits_v2(labels, logits, axis=-1, name=None):
"""Computes softmax cross entropy between `logits` and `labels`.
Measures the probability error in discrete classification tasks in which the
classes are mutually exclusive (each entry is in exactly one class). For
example, each CIFAR-10 image is labeled with one and only one label: an image
can be a dog or a truck, but not both.
**NOTE:** While the classes are mutually exclusive, their probabilities
need not be. All that is required is that each row of `labels` is
a valid probability distribution. If they are not, the computation of the
gradient will be incorrect.
If using exclusive `labels` (wherein one and only
one class is true at a time), see `sparse_softmax_cross_entropy_with_logits`.
Usage:
>>> logits = [[4.0, 2.0, 1.0], [0.0, 5.0, 1.0]]
>>> labels = [[1.0, 0.0, 0.0], [0.0, 0.8, 0.2]]
>>> tf.nn.softmax_cross_entropy_with_logits(labels=labels, logits=logits)
<tf.Tensor: shape=(2,), dtype=float32,
numpy=array([0.16984604, 0.82474494], dtype=float32)>
**WARNING:** This op expects unscaled logits, since it performs a `softmax`
on `logits` internally for efficiency. Do not call this op with the
output of `softmax`, as it will produce incorrect results.
A common use case is to have logits and labels of shape
`[batch_size, num_classes]`, but higher dimensions are supported, with
the `axis` argument specifying the class dimension.
`logits` and `labels` must have the same dtype (either `float16`, `float32`,
or `float64`).
Backpropagation will happen into both `logits` and `labels`. To disallow
backpropagation into `labels`, pass label tensors through `tf.stop_gradient`
before feeding it to this function.
**Note that to avoid confusion, it is required to pass only named arguments to
this function.**
Args:
labels: Each vector along the class dimension should hold a valid
probability distribution e.g. for the case in which labels are of shape
`[batch_size, num_classes]`, each row of `labels[i]` must be a valid
probability distribution.
logits: Per-label activations, typically a linear output. These activation
energies are interpreted as unnormalized log probabilities.
axis: The class dimension. Defaulted to -1 which is the last dimension.
name: A name for the operation (optional).
Returns:
A `Tensor` that contains the softmax cross entropy loss. Its type is the
same as `logits` and its shape is the same as `labels` except that it does
not have the last dimension of `labels`.
"""
return softmax_cross_entropy_with_logits_v2_helper(
labels=labels, logits=logits, axis=axis, name=name)
@tf_export(v1=["nn.softmax_cross_entropy_with_logits_v2"])
@dispatch.add_dispatch_support
@deprecated_args(None, "dim is deprecated, use axis instead", "dim")
def softmax_cross_entropy_with_logits_v2_helper(
labels, logits, axis=None, name=None, dim=None):
"""Computes softmax cross entropy between `logits` and `labels`.
Measures the probability error in discrete classification tasks in which the
classes are mutually exclusive (each entry is in exactly one class). For
example, each CIFAR-10 image is labeled with one and only one label: an image
can be a dog or a truck, but not both.
**NOTE:** While the classes are mutually exclusive, their probabilities
need not be. All that is required is that each row of `labels` is
a valid probability distribution. If they are not, the computation of the
gradient will be incorrect.
If using exclusive `labels` (wherein one and only
one class is true at a time), see `sparse_softmax_cross_entropy_with_logits`.
**WARNING:** This op expects unscaled logits, since it performs a `softmax`
on `logits` internally for efficiency. Do not call this op with the
output of `softmax`, as it will produce incorrect results.
A common use case is to have logits and labels of shape
`[batch_size, num_classes]`, but higher dimensions are supported, with
the `axis` argument specifying the class dimension.
`logits` and `labels` must have the same dtype (either `float16`, `float32`,
or `float64`).
Backpropagation will happen into both `logits` and `labels`. To disallow
backpropagation into `labels`, pass label tensors through `tf.stop_gradient`
before feeding it to this function.
**Note that to avoid confusion, it is required to pass only named arguments to
this function.**
Args:
labels: Each vector along the class dimension should hold a valid
probability distribution e.g. for the case in which labels are of shape
`[batch_size, num_classes]`, each row of `labels[i]` must be a valid
probability distribution.
logits: Unscaled log probabilities.
axis: The class dimension. Defaulted to -1 which is the last dimension.
name: A name for the operation (optional).
dim: Deprecated alias for axis.
Returns:
A `Tensor` that contains the softmax cross entropy loss. Its type is the
same as `logits` and its shape is the same as `labels` except that it does
not have the last dimension of `labels`.
"""
# TODO(pcmurray) Raise an error when the labels do not sum to 1. Note: This
# could break users who call this with bad labels, but disregard the bad
# results.
axis = deprecated_argument_lookup("axis", axis, "dim", dim)
del dim
if axis is None:
axis = -1
with ops.name_scope(name, "softmax_cross_entropy_with_logits",
[logits, labels]) as name:
logits = ops.convert_to_tensor(logits, name="logits")
labels = ops.convert_to_tensor(labels, name="labels")
convert_to_float32 = (
logits.dtype == dtypes.float16 or logits.dtype == dtypes.bfloat16)
precise_logits = math_ops.cast(
logits, dtypes.float32) if convert_to_float32 else logits
# labels and logits must be of the same type
labels = math_ops.cast(labels, precise_logits.dtype)
input_rank = array_ops.rank(precise_logits)
# For shape inference.
shape = logits.get_shape()
# Move the dim to the end if dim is not the last dimension.
if axis != -1:
def _move_dim_to_end(tensor, dim_index, rank):
return array_ops.transpose(
tensor,
array_ops.concat([
math_ops.range(dim_index),
math_ops.range(dim_index + 1, rank), [dim_index]
], 0))
precise_logits = _move_dim_to_end(precise_logits, axis, input_rank)
labels = _move_dim_to_end(labels, axis, input_rank)
input_shape = array_ops.shape(precise_logits)
# Make precise_logits and labels into matrices.
precise_logits = _flatten_outer_dims(precise_logits)
labels = _flatten_outer_dims(labels)
# Do the actual op computation.
# The second output tensor contains the gradients. We use it in
# CrossEntropyGrad() in nn_grad but not here.
cost, unused_backprop = gen_nn_ops.softmax_cross_entropy_with_logits(
precise_logits, labels, name=name)
# The output cost shape should be the input minus axis.
output_shape = array_ops.slice(input_shape, [0],
[math_ops.subtract(input_rank, 1)])
cost = array_ops.reshape(cost, output_shape)
# Make shape inference work since reshape and transpose may erase its static
# shape.
if not context.executing_eagerly(
) and shape is not None and shape.dims is not None:
shape = shape.as_list()
del shape[axis]
cost.set_shape(shape)
if convert_to_float32:
return math_ops.cast(cost, logits.dtype)
else:
return cost
_XENT_DEPRECATION = """
Future major versions of TensorFlow will allow gradients to flow
into the labels input on backprop by default.
See `tf.nn.softmax_cross_entropy_with_logits_v2`.
"""
@tf_export(v1=["nn.softmax_cross_entropy_with_logits"])
@dispatch.add_dispatch_support
@deprecation.deprecated(date=None, instructions=_XENT_DEPRECATION)
def softmax_cross_entropy_with_logits(
_sentinel=None, # pylint: disable=invalid-name
labels=None,
logits=None,
dim=-1,
name=None,
axis=None):
"""Computes softmax cross entropy between `logits` and `labels`.
Measures the probability error in discrete classification tasks in which the
classes are mutually exclusive (each entry is in exactly one class). For
example, each CIFAR-10 image is labeled with one and only one label: an image
can be a dog or a truck, but not both.
**NOTE:** While the classes are mutually exclusive, their probabilities
need not be. All that is required is that each row of `labels` is
a valid probability distribution. If they are not, the computation of the
gradient will be incorrect.
If using exclusive `labels` (wherein one and only
one class is true at a time), see `sparse_softmax_cross_entropy_with_logits`.
**WARNING:** This op expects unscaled logits, since it performs a `softmax`
on `logits` internally for efficiency. Do not call this op with the
output of `softmax`, as it will produce incorrect results.
A common use case is to have logits and labels of shape
`[batch_size, num_classes]`, but higher dimensions are supported, with
the `dim` argument specifying the class dimension.
Backpropagation will happen only into `logits`. To calculate a cross entropy
loss that allows backpropagation into both `logits` and `labels`, see
`tf.nn.softmax_cross_entropy_with_logits_v2`.
**Note that to avoid confusion, it is required to pass only named arguments to
this function.**
Args:
_sentinel: Used to prevent positional parameters. Internal, do not use.
labels: Each vector along the class dimension should hold a valid
probability distribution e.g. for the case in which labels are of shape
`[batch_size, num_classes]`, each row of `labels[i]` must be a valid
probability distribution.
logits: Per-label activations, typically a linear output. These activation
energies are interpreted as unnormalized log probabilities.
dim: The class dimension. Defaulted to -1 which is the last dimension.
name: A name for the operation (optional).
axis: Alias for dim.
Returns:
A `Tensor` that contains the softmax cross entropy loss. Its type is the
same as `logits` and its shape is the same as `labels` except that it does
not have the last dimension of `labels`.
"""
dim = deprecated_argument_lookup("axis", axis, "dim", dim)
_ensure_xent_args("softmax_cross_entropy_with_logits", _sentinel, labels,
logits)
with ops.name_scope(name, "softmax_cross_entropy_with_logits_sg",
[logits, labels]) as name:
labels = array_ops.stop_gradient(labels, name="labels_stop_gradient")
return softmax_cross_entropy_with_logits_v2(
labels=labels, logits=logits, axis=dim, name=name)
@tf_export(v1=["nn.sparse_softmax_cross_entropy_with_logits"])
@dispatch.add_dispatch_support
def sparse_softmax_cross_entropy_with_logits(
_sentinel=None, # pylint: disable=invalid-name
labels=None,
logits=None,
name=None):
"""Computes sparse softmax cross entropy between `logits` and `labels`.
Measures the probability error in discrete classification tasks in which the
classes are mutually exclusive (each entry is in exactly one class). For
example, each CIFAR-10 image is labeled with one and only one label: an image
can be a dog or a truck, but not both.
**NOTE:** For this operation, the probability of a given label is considered
exclusive. That is, soft classes are not allowed, and the `labels` vector
must provide a single specific index for the true class for each row of
`logits` (each minibatch entry). For soft softmax classification with
a probability distribution for each entry, see
`softmax_cross_entropy_with_logits_v2`.
**WARNING:** This op expects unscaled logits, since it performs a `softmax`
on `logits` internally for efficiency. Do not call this op with the
output of `softmax`, as it will produce incorrect results.
A common use case is to have logits of shape
`[batch_size, num_classes]` and have labels of shape
`[batch_size]`, but higher dimensions are supported, in which
case the `dim`-th dimension is assumed to be of size `num_classes`.
`logits` must have the dtype of `float16`, `float32`, or `float64`, and
`labels` must have the dtype of `int32` or `int64`.
**Note that to avoid confusion, it is required to pass only named arguments to
this function.**
Args:
_sentinel: Used to prevent positional parameters. Internal, do not use.
labels: `Tensor` of shape `[d_0, d_1, ..., d_{r-1}]` (where `r` is rank of
`labels` and result) and dtype `int32` or `int64`. Each entry in `labels`
must be an index in `[0, num_classes)`. Other values will raise an
exception when this op is run on CPU, and return `NaN` for corresponding
loss and gradient rows on GPU.
logits: Per-label activations (typically a linear output) of shape
`[d_0, d_1, ..., d_{r-1}, num_classes]` and dtype `float16`, `float32`, or
`float64`. These activation energies are interpreted as unnormalized log
probabilities.
name: A name for the operation (optional).
Returns:
A `Tensor` of the same shape as `labels` and of the same type as `logits`
with the softmax cross entropy loss.
Raises:
ValueError: If logits are scalars (need to have rank >= 1) or if the rank
of the labels is not equal to the rank of the logits minus one.
"""
_ensure_xent_args("sparse_softmax_cross_entropy_with_logits", _sentinel,
labels, logits)
# TODO(pcmurray) Raise an error when the label is not an index in
# [0, num_classes). Note: This could break users who call this with bad
# labels, but disregard the bad results.
# Reshape logits and labels to rank 2.
with ops.name_scope(name, "SparseSoftmaxCrossEntropyWithLogits",
[labels, logits]):
labels = ops.convert_to_tensor(labels)
logits = ops.convert_to_tensor(logits)
precise_logits = math_ops.cast(logits, dtypes.float32) if (dtypes.as_dtype(
logits.dtype) == dtypes.float16) else logits
# Store label shape for result later.
labels_static_shape = labels.get_shape()
labels_shape = array_ops.shape(labels)
static_shapes_fully_defined = (
labels_static_shape.is_fully_defined() and
logits.get_shape()[:-1].is_fully_defined())
if logits.get_shape().ndims is not None and logits.get_shape().ndims == 0:
raise ValueError(
"Logits cannot be scalars - received shape %s." % logits.get_shape())
if logits.get_shape().ndims is not None and (
labels_static_shape.ndims is not None and
labels_static_shape.ndims != logits.get_shape().ndims - 1):
raise ValueError("Rank mismatch: Rank of labels (received %s) should "
"equal rank of logits minus 1 (received %s)." %
(labels_static_shape.ndims, logits.get_shape().ndims))
if (static_shapes_fully_defined and
labels_static_shape != logits.get_shape()[:-1]):
raise ValueError("Shape mismatch: The shape of labels (received %s) "
"should equal the shape of logits except for the last "
"dimension (received %s)." % (labels_static_shape,
logits.get_shape()))
# Check if no reshapes are required.
if logits.get_shape().ndims == 2:
cost, _ = gen_nn_ops.sparse_softmax_cross_entropy_with_logits(
precise_logits, labels, name=name)
if logits.dtype == dtypes.float16:
return math_ops.cast(cost, dtypes.float16)
else:
return cost
# Perform a check of the dynamic shapes if the static shapes are not fully
# defined.
shape_checks = []
if not static_shapes_fully_defined:
shape_checks.append(
check_ops.assert_equal(
array_ops.shape(labels),
array_ops.shape(logits)[:-1]))
with ops.control_dependencies(shape_checks):
# Reshape logits to 2 dim, labels to 1 dim.
num_classes = array_ops.shape(logits)[array_ops.rank(logits) - 1]
precise_logits = array_ops.reshape(precise_logits, [-1, num_classes])
labels = array_ops.reshape(labels, [-1])
# The second output tensor contains the gradients. We use it in
# _CrossEntropyGrad() in nn_grad but not here.
cost, _ = gen_nn_ops.sparse_softmax_cross_entropy_with_logits(
precise_logits, labels, name=name)
cost = array_ops.reshape(cost, labels_shape)
cost.set_shape(labels_static_shape)
if logits.dtype == dtypes.float16:
return math_ops.cast(cost, dtypes.float16)
else:
return cost
@tf_export("nn.sparse_softmax_cross_entropy_with_logits", v1=[])
@dispatch.add_dispatch_support
def sparse_softmax_cross_entropy_with_logits_v2(labels, logits, name=None):
"""Computes sparse softmax cross entropy between `logits` and `labels`.
Measures the probability error in discrete classification tasks in which the
classes are mutually exclusive (each entry is in exactly one class). For
example, each CIFAR-10 image is labeled with one and only one label: an image
can be a dog or a truck, but not both.
**NOTE:** For this operation, the probability of a given label is considered
exclusive. That is, soft classes are not allowed, and the `labels` vector
must provide a single specific index for the true class for each row of
`logits` (each minibatch entry). For soft softmax classification with
a probability distribution for each entry, see
`softmax_cross_entropy_with_logits_v2`.
**WARNING:** This op expects unscaled logits, since it performs a `softmax`
on `logits` internally for efficiency. Do not call this op with the
output of `softmax`, as it will produce incorrect results.
A common use case is to have logits of shape
`[batch_size, num_classes]` and have labels of shape
`[batch_size]`, but higher dimensions are supported, in which
case the `dim`-th dimension is assumed to be of size `num_classes`.
`logits` must have the dtype of `float16`, `float32`, or `float64`, and
`labels` must have the dtype of `int32` or `int64`.
**Note that to avoid confusion, it is required to pass only named arguments to
this function.**
Args:
labels: `Tensor` of shape `[d_0, d_1, ..., d_{r-1}]` (where `r` is rank of
`labels` and result) and dtype `int32` or `int64`. Each entry in `labels`
must be an index in `[0, num_classes)`. Other values will raise an
exception when this op is run on CPU, and return `NaN` for corresponding
loss and gradient rows on GPU.
logits: Unscaled log probabilities of shape `[d_0, d_1, ..., d_{r-1},
num_classes]` and dtype `float16`, `float32`, or `float64`.
name: A name for the operation (optional).
Returns:
A `Tensor` of the same shape as `labels` and of the same type as `logits`
with the softmax cross entropy loss.
Raises:
ValueError: If logits are scalars (need to have rank >= 1) or if the rank
of the labels is not equal to the rank of the logits minus one.
"""
return sparse_softmax_cross_entropy_with_logits(
labels=labels, logits=logits, name=name)
@tf_export("nn.avg_pool", v1=["nn.avg_pool_v2"])
@dispatch.add_dispatch_support
def avg_pool_v2(input, ksize, strides, padding, data_format=None, name=None): # pylint: disable=redefined-builtin
"""Performs the avg pooling on the input.
Each entry in `output` is the mean of the corresponding size `ksize`
window in `value`.
Args:
input: Tensor of rank N+2, of shape `[batch_size] + input_spatial_shape +
[num_channels]` if `data_format` does not start with "NC" (default), or
`[batch_size, num_channels] + input_spatial_shape` if data_format starts
with "NC". Pooling happens over the spatial dimensions only.
ksize: An int or list of `ints` that has length `1`, `N` or `N+2`. The size
of the window for each dimension of the input tensor.
strides: An int or list of `ints` that has length `1`, `N` or `N+2`. The
stride of the sliding window for each dimension of the input tensor.
padding: A string, either `'VALID'` or `'SAME'`. The padding algorithm. See
the "returns" section of `tf.nn.convolution` for details.
data_format: A string. Specifies the channel dimension. For N=1 it can be
either "NWC" (default) or "NCW", for N=2 it can be either "NHWC" (default)
or "NCHW" and for N=3 either "NDHWC" (default) or "NCDHW".
name: Optional name for the operation.
Returns:
A `Tensor` of format specified by `data_format`.
The average pooled output tensor.
"""
if input.shape is not None:
n = len(input.shape) - 2
elif data_format is not None:
n = len(data_format) - 2
else:
raise ValueError(
"The input must have a rank or a data format must be given.")
if not 1 <= n <= 3:
raise ValueError(
"Input tensor must be of rank 3, 4 or 5 but was {}.".format(n + 2))
if data_format is None:
channel_index = n + 1
else:
channel_index = 1 if data_format.startswith("NC") else n + 1
ksize = _get_sequence(ksize, n, channel_index, "ksize")
strides = _get_sequence(strides, n, channel_index, "strides")
avg_pooling_ops = {
1: avg_pool1d,
2: gen_nn_ops.avg_pool,
3: gen_nn_ops.avg_pool3d
}
op = avg_pooling_ops[n]
return op(
input,
ksize=ksize,
strides=strides,
padding=padding,
data_format=data_format,
name=name)
@tf_export(v1=["nn.avg_pool", "nn.avg_pool2d"])
@dispatch.add_dispatch_support
def avg_pool(value, ksize, strides, padding, data_format="NHWC",
name=None, input=None): # pylint: disable=redefined-builtin
"""Performs the average pooling on the input.
Each entry in `output` is the mean of the corresponding size `ksize`
window in `value`.
Args:
value: A 4-D `Tensor` of shape `[batch, height, width, channels]` and type
`float32`, `float64`, `qint8`, `quint8`, or `qint32`.
ksize: An int or list of `ints` that has length `1`, `2` or `4`. The size of
the window for each dimension of the input tensor.
strides: An int or list of `ints` that has length `1`, `2` or `4`. The
stride of the sliding window for each dimension of the input tensor.
padding: A string, either `'VALID'` or `'SAME'`. The padding algorithm.
See the "returns" section of `tf.nn.convolution` for details.
data_format: A string. 'NHWC' and 'NCHW' are supported.
name: Optional name for the operation.
input: Alias for value.
Returns:
A `Tensor` with the same type as `value`. The average pooled output tensor.
"""
with ops.name_scope(name, "AvgPool", [value]) as name:
value = deprecation.deprecated_argument_lookup(
"input", input, "value", value)
if data_format is None:
data_format = "NHWC"
channel_index = 1 if data_format.startswith("NC") else 3
ksize = _get_sequence(ksize, 2, channel_index, "ksize")
strides = _get_sequence(strides, 2, channel_index, "strides")
return gen_nn_ops.avg_pool(
value,
ksize=ksize,
strides=strides,
padding=padding,
data_format=data_format,
name=name)
@tf_export("nn.avg_pool2d", v1=[])
@dispatch.add_dispatch_support
def avg_pool2d(input, ksize, strides, padding, data_format="NHWC", name=None): # pylint: disable=redefined-builtin
"""Performs the average pooling on the input.
Each entry in `output` is the mean of the corresponding size `ksize`
window in `value`.
Args:
input: A 4-D `Tensor` of shape `[batch, height, width, channels]` and type
`float32`, `float64`, `qint8`, `quint8`, or `qint32`.
ksize: An int or list of `ints` that has length `1`, `2` or `4`. The size of
the window for each dimension of the input tensor.
strides: An int or list of `ints` that has length `1`, `2` or `4`. The
stride of the sliding window for each dimension of the input tensor.
padding: A string, either `'VALID'` or `'SAME'`. The padding algorithm.
See the "returns" section of `tf.nn.convolution` for details.
data_format: A string. 'NHWC' and 'NCHW' are supported.
name: Optional name for the operation.
Returns:
A `Tensor` with the same type as `value`. The average pooled output tensor.
"""
with ops.name_scope(name, "AvgPool2D", [input]) as name:
if data_format is None:
data_format = "NHWC"
channel_index = 1 if data_format.startswith("NC") else 3
ksize = _get_sequence(ksize, 2, channel_index, "ksize")
strides = _get_sequence(strides, 2, channel_index, "strides")
return gen_nn_ops.avg_pool(
input,
ksize=ksize,
strides=strides,
padding=padding,
data_format=data_format,
name=name)
@tf_export("nn.avg_pool1d")
@dispatch.add_dispatch_support
def avg_pool1d(input, ksize, strides, padding, data_format="NWC", name=None): # pylint: disable=redefined-builtin
"""Performs the average pooling on the input.
Each entry in `output` is the mean of the corresponding size `ksize`
window in `value`.
Note internally this op reshapes and uses the underlying 2d operation.
Args:
input: A 3-D `Tensor` of the format specified by `data_format`.
ksize: An int or list of `ints` that has length `1` or `3`. The size of the
window for each dimension of the input tensor.
strides: An int or list of `ints` that has length `1` or `3`. The stride of
the sliding window for each dimension of the input tensor.
padding: A string, either `'VALID'` or `'SAME'`. The padding algorithm. See
the "returns" section of `tf.nn.convolution` for details.
data_format: An optional string from: "NWC", "NCW". Defaults to "NWC".
name: A name for the operation (optional).
Returns:
A `Tensor` of format specified by `data_format`.
The max pooled output tensor.
"""
with ops.name_scope(name, "AvgPool1D", [input]) as name:
if data_format is None:
data_format = "NWC"
channel_index = 1 if data_format.startswith("NC") else 2
ksize = [1] + _get_sequence(ksize, 1, channel_index, "ksize")
strides = [1] + _get_sequence(strides, 1, channel_index, "strides")
expanding_dim = 1 if data_format == "NWC" else 2
data_format = "NHWC" if data_format == "NWC" else "NCHW"
input = array_ops.expand_dims_v2(input, expanding_dim)
result = gen_nn_ops.avg_pool(
input,
ksize=ksize,
strides=strides,
padding=padding,
data_format=data_format,
name=name)
return array_ops.squeeze(result, expanding_dim)
@tf_export("nn.avg_pool3d")
@dispatch.add_dispatch_support
def avg_pool3d(input, ksize, strides, padding, data_format="NDHWC", name=None): # pylint: disable=redefined-builtin
"""Performs the average pooling on the input.
Each entry in `output` is the mean of the corresponding size `ksize`
window in `value`.
Args:
input: A 5-D `Tensor` of shape `[batch, height, width, channels]` and type
`float32`, `float64`, `qint8`, `quint8`, or `qint32`.
ksize: An int or list of `ints` that has length `1`, `3` or `5`. The size of
the window for each dimension of the input tensor.
strides: An int or list of `ints` that has length `1`, `3` or `5`. The
stride of the sliding window for each dimension of the input tensor.
padding: A string, either `'VALID'` or `'SAME'`. The padding algorithm.
See the "returns" section of `tf.nn.convolution` for details.
data_format: A string. 'NDHWC' and 'NCDHW' are supported.
name: Optional name for the operation.
Returns:
A `Tensor` with the same type as `value`. The average pooled output tensor.
"""
with ops.name_scope(name, "AvgPool3D", [input]) as name:
if data_format is None:
data_format = "NDHWC"
channel_index = 1 if data_format.startswith("NC") else 3
ksize = _get_sequence(ksize, 3, channel_index, "ksize")
strides = _get_sequence(strides, 3, channel_index, "strides")
return gen_nn_ops.avg_pool3d(
input,
ksize=ksize,
strides=strides,
padding=padding,
data_format=data_format,
name=name)
# pylint: disable=redefined-builtin
@tf_export("nn.max_pool", v1=["nn.max_pool_v2"])
@dispatch.add_dispatch_support
def max_pool_v2(input, ksize, strides, padding, data_format=None, name=None):
"""Performs the max pooling on the input.
Args:
input: Tensor of rank N+2, of shape `[batch_size] + input_spatial_shape +
[num_channels]` if `data_format` does not start with "NC" (default), or
`[batch_size, num_channels] + input_spatial_shape` if data_format starts
with "NC". Pooling happens over the spatial dimensions only.
ksize: An int or list of `ints` that has length `1`, `N` or `N+2`. The size
of the window for each dimension of the input tensor.
strides: An int or list of `ints` that has length `1`, `N` or `N+2`. The
stride of the sliding window for each dimension of the input tensor.
padding: Either the `string` `"SAME"` or `"VALID"` indicating the type of
padding algorithm to use, or a list indicating the explicit paddings at
the start and end of each dimension. When explicit padding is used and
data_format is `"NHWC"`, this should be in the form `[[0, 0], [pad_top,
pad_bottom], [pad_left, pad_right], [0, 0]]`. When explicit padding used
and data_format is `"NCHW"`, this should be in the form `[[0, 0], [0, 0],
[pad_top, pad_bottom], [pad_left, pad_right]]`. When using explicit
padding, the size of the paddings cannot be greater than the sliding
window size.
data_format: A string. Specifies the channel dimension. For N=1 it can be
either "NWC" (default) or "NCW", for N=2 it can be either "NHWC" (default)
or "NCHW" and for N=3 either "NDHWC" (default) or "NCDHW".
name: Optional name for the operation.
Returns:
A `Tensor` of format specified by `data_format`.
The max pooled output tensor.
"""
if input.shape is not None:
n = len(input.shape) - 2
elif data_format is not None:
n = len(data_format) - 2
else:
raise ValueError(
"The input must have a rank or a data format must be given.")
if not 1 <= n <= 3:
raise ValueError(
"Input tensor must be of rank 3, 4 or 5 but was {}.".format(n + 2))
if data_format is None:
channel_index = n + 1
else:
channel_index = 1 if data_format.startswith("NC") else n + 1
if isinstance(padding, (list, tuple)) and data_format == "NCHW_VECT_C":
raise ValueError("Data formats NCHW_VECT_C is not yet supported with "
"explicit padding")
ksize = _get_sequence(ksize, n, channel_index, "ksize")
strides = _get_sequence(strides, n, channel_index, "strides")
if (isinstance(padding, (list, tuple)) and n == 3):
raise ValueError("Explicit padding is not yet supported with an input "
"tensor of rank 5")
max_pooling_ops = {
1: max_pool1d,
2: max_pool2d,
3: gen_nn_ops.max_pool3d
}
op = max_pooling_ops[n]
return op(
input,
ksize=ksize,
strides=strides,
padding=padding,
data_format=data_format,
name=name)
# pylint: enable=redefined-builtin
@tf_export(v1=["nn.max_pool"])
@dispatch.add_dispatch_support
def max_pool(value,
ksize,
strides,
padding,
data_format="NHWC",
name=None,
input=None): # pylint: disable=redefined-builtin
"""Performs the max pooling on the input.
Args:
value: A 4-D `Tensor` of the format specified by `data_format`.
ksize: An int or list of `ints` that has length `1`, `2` or `4`.
The size of the window for each dimension of the input tensor.
strides: An int or list of `ints` that has length `1`, `2` or `4`.
The stride of the sliding window for each dimension of the input tensor.
padding: Either the `string` `"SAME"` or `"VALID"` indicating the type of
padding algorithm to use, or a list indicating the explicit paddings at
the start and end of each dimension. When explicit padding is used and
data_format is `"NHWC"`, this should be in the form `[[0, 0], [pad_top,
pad_bottom], [pad_left, pad_right], [0, 0]]`. When explicit padding used
and data_format is `"NCHW"`, this should be in the form `[[0, 0], [0, 0],
[pad_top, pad_bottom], [pad_left, pad_right]]`. When using explicit
padding, the size of the paddings cannot be greater than the sliding
window size.
data_format: A string. 'NHWC', 'NCHW' and 'NCHW_VECT_C' are supported.
name: Optional name for the operation.
input: Alias for value.
Returns:
A `Tensor` of format specified by `data_format`.
The max pooled output tensor.
"""
value = deprecation.deprecated_argument_lookup("input", input, "value", value)
with ops.name_scope(name, "MaxPool", [value]) as name:
if data_format is None:
data_format = "NHWC"
channel_index = 1 if data_format.startswith("NC") else 3
ksize = _get_sequence(ksize, 2, channel_index, "ksize")
strides = _get_sequence(strides, 2, channel_index, "strides")
if isinstance(padding, (list, tuple)) and data_format == "NCHW_VECT_C":
raise ValueError("Data formats NCHW_VECT_C is not yet supported with "
"explicit padding")
padding, explicit_paddings = convert_padding(padding)
if ((np.isscalar(ksize) and ksize == 0) or
(isinstance(ksize,
(list, tuple, np.ndarray)) and any(v == 0 for v in ksize))):
raise ValueError("ksize cannot be zero.")
return gen_nn_ops.max_pool(
value,
ksize=ksize,
strides=strides,
padding=padding,
explicit_paddings=explicit_paddings,
data_format=data_format,
name=name)
# pylint: disable=redefined-builtin
@tf_export("nn.max_pool1d")
@dispatch.add_dispatch_support
def max_pool1d(input, ksize, strides, padding, data_format="NWC", name=None):
"""Performs the max pooling on the input.
Note internally this op reshapes and uses the underlying 2d operation.
Args:
input: A 3-D `Tensor` of the format specified by `data_format`.
ksize: An int or list of `ints` that has length `1` or `3`. The size of the
window for each dimension of the input tensor.
strides: An int or list of `ints` that has length `1` or `3`. The stride of
the sliding window for each dimension of the input tensor.
padding: Either the `string` `"SAME"` or `"VALID"` indicating the type of
padding algorithm to use, or a list indicating the explicit paddings at
the start and end of each dimension. When explicit padding is used and
data_format is `"NWC"`, this should be in the form `[[0, 0], [pad_left,
pad_right], [0, 0]]`. When explicit padding used and data_format is
`"NCW"`, this should be in the form `[[0, 0], [0, 0], [pad_left,
pad_right]]`. When using explicit padding, the size of the paddings cannot
be greater than the sliding window size.
data_format: An optional string from: "NWC", "NCW". Defaults to "NWC".
name: A name for the operation (optional).
Returns:
A `Tensor` of format specified by `data_format`.
The max pooled output tensor.
"""
with ops.name_scope(name, "MaxPool1d", [input]) as name:
if isinstance(padding, (list, tuple)) and data_format == "NCHW_VECT_C":
raise ValueError("Data formats NCHW_VECT_C is not yet supported with "
"explicit padding")
if data_format is None:
data_format = "NWC"
channel_index = 1 if data_format.startswith("NC") else 2
ksize = [1] + _get_sequence(ksize, 1, channel_index, "ksize")
strides = [1] + _get_sequence(strides, 1, channel_index, "strides")
padding, explicit_paddings = convert_padding(padding, 3)
if padding == "EXPLICIT":
explicit_paddings = [0, 0] + explicit_paddings
expanding_dim = 1 if data_format == "NWC" else 2
data_format = "NHWC" if data_format == "NWC" else "NCHW"
input = array_ops.expand_dims_v2(input, expanding_dim)
result = gen_nn_ops.max_pool(
input,
ksize=ksize,
strides=strides,
padding=padding,
explicit_paddings=explicit_paddings,
data_format=data_format,
name=name)
return array_ops.squeeze(result, expanding_dim)
# pylint: enable=redefined-builtin
# pylint: disable=redefined-builtin
@tf_export("nn.max_pool2d")
@dispatch.add_dispatch_support
def max_pool2d(input, ksize, strides, padding, data_format="NHWC", name=None):
"""Performs the max pooling on the input.
Args:
input: A 4-D `Tensor` of the format specified by `data_format`.
ksize: An int or list of `ints` that has length `1`, `2` or `4`. The size of
the window for each dimension of the input tensor.
strides: An int or list of `ints` that has length `1`, `2` or `4`. The
stride of the sliding window for each dimension of the input tensor.
padding: Either the `string` `"SAME"` or `"VALID"` indicating the type of
padding algorithm to use, or a list indicating the explicit paddings at
the start and end of each dimension. When explicit padding is used and
data_format is `"NHWC"`, this should be in the form `[[0, 0], [pad_top,
pad_bottom], [pad_left, pad_right], [0, 0]]`. When explicit padding used
and data_format is `"NCHW"`, this should be in the form `[[0, 0], [0, 0],
[pad_top, pad_bottom], [pad_left, pad_right]]`. When using explicit
padding, the size of the paddings cannot be greater than the sliding
window size.
data_format: A string. 'NHWC', 'NCHW' and 'NCHW_VECT_C' are supported.
name: Optional name for the operation.
Returns:
A `Tensor` of format specified by `data_format`.
The max pooled output tensor.
"""
with ops.name_scope(name, "MaxPool2d", [input]) as name:
if data_format is None:
data_format = "NHWC"
channel_index = 1 if data_format.startswith("NC") else 3
ksize = _get_sequence(ksize, 2, channel_index, "ksize")
strides = _get_sequence(strides, 2, channel_index, "strides")
if isinstance(padding, (list, tuple)) and data_format == "NCHW_VECT_C":
raise ValueError("Data formats NCHW_VECT_C is not yet supported with "
"explicit padding")
padding, explicit_paddings = convert_padding(padding)
return gen_nn_ops.max_pool(
input,
ksize=ksize,
strides=strides,
padding=padding,
explicit_paddings=explicit_paddings,
data_format=data_format,
name=name)
# pylint: enable=redefined-builtin
# pylint: disable=redefined-builtin
@tf_export("nn.max_pool3d")
@dispatch.add_dispatch_support
def max_pool3d(input, ksize, strides, padding, data_format="NDHWC", name=None):
"""Performs the max pooling on the input.
Args:
input: A 5-D `Tensor` of the format specified by `data_format`.
ksize: An int or list of `ints` that has length `1`, `3` or `5`. The size of
the window for each dimension of the input tensor.
strides: An int or list of `ints` that has length `1`, `3` or `5`. The
stride of the sliding window for each dimension of the input tensor.
padding: A string, either `'VALID'` or `'SAME'`. The padding algorithm. See
the "returns" section of `tf.nn.convolution` for details.
data_format: An optional string from: "NDHWC", "NCDHW". Defaults to "NDHWC".
The data format of the input and output data. With the default format
"NDHWC", the data is stored in the order of: [batch, in_depth, in_height,
in_width, in_channels]. Alternatively, the format could be "NCDHW", the
data storage order is: [batch, in_channels, in_depth, in_height,
in_width].
name: A name for the operation (optional).
Returns:
A `Tensor` of format specified by `data_format`.
The max pooled output tensor.
"""
with ops.name_scope(name, "MaxPool3D", [input]) as name:
if data_format is None:
data_format = "NDHWC"
channel_index = 1 if data_format.startswith("NC") else 4
ksize = _get_sequence(ksize, 3, channel_index, "ksize")
strides = _get_sequence(strides, 3, channel_index, "strides")
return gen_nn_ops.max_pool3d(
input,
ksize=ksize,
strides=strides,
padding=padding,
data_format=data_format,
name=name)
# pylint: enable=redefined-builtin
@tf_export("nn.max_pool_with_argmax", v1=[])
@dispatch.add_dispatch_support
def max_pool_with_argmax_v2(
input, # pylint: disable=redefined-builtin
ksize,
strides,
padding,
data_format="NHWC",
output_dtype=dtypes.int64,
include_batch_in_index=False,
name=None):
"""Performs max pooling on the input and outputs both max values and indices.
The indices in `argmax` are flattened, so that a maximum value at position
`[b, y, x, c]` becomes flattened index: `(y * width + x) * channels + c` if
`include_batch_in_index` is False;
`((b * height + y) * width + x) * channels + c`
if `include_batch_in_index` is True.
The indices returned are always in `[0, height) x [0, width)` before
flattening, even if padding is involved and the mathematically correct answer
is outside (either negative or too large). This is a bug, but fixing it is
difficult to do in a safe backwards compatible way, especially due to
flattening.
Args:
input: A `Tensor`. Must be one of the following types: `float32`, `float64`,
`int32`, `uint8`, `int16`, `int8`, `int64`, `bfloat16`, `uint16`, `half`,
`uint32`, `uint64`.
4-D with shape `[batch, height, width, channels]`. Input to pool over.
ksize: An int or list of `ints` that has length `1`, `2` or `4`.
The size of the window for each dimension of the input tensor.
strides: An int or list of `ints` that has length `1`, `2` or `4`.
The stride of the sliding window for each dimension of the
input tensor.
padding: A `string` from: `"SAME", "VALID"`.
The type of padding algorithm to use.
data_format: An optional `string`, must be set to `"NHWC"`. Defaults to
`"NHWC"`.
Specify the data format of the input and output data.
output_dtype: An optional `tf.DType` from: `tf.int32, tf.int64`.
Defaults to `tf.int64`.
The dtype of the returned argmax tensor.
include_batch_in_index: An optional `boolean`. Defaults to `False`.
Whether to include batch dimension in flattened index of `argmax`.
name: A name for the operation (optional).
Returns:
A tuple of `Tensor` objects (output, argmax).
output: A `Tensor`. Has the same type as `input`.
argmax: A `Tensor` of type `output_dtype`.
"""
if data_format != "NHWC":
raise ValueError("Data formats other than 'NHWC' are not yet supported")
ksize = _get_sequence(ksize, 2, 3, "ksize")
strides = _get_sequence(strides, 2, 3, "strides")
return gen_nn_ops.max_pool_with_argmax(
input=input,
ksize=ksize,
strides=strides,
padding=padding,
Targmax=output_dtype,
include_batch_in_index=include_batch_in_index,
name=name)
@tf_export(v1=["nn.max_pool_with_argmax"])
@dispatch.add_dispatch_support
def max_pool_with_argmax_v1( # pylint: disable=missing-docstring,invalid-name
input, # pylint: disable=redefined-builtin
ksize,
strides,
padding,
data_format="NHWC",
Targmax=None,
name=None,
output_dtype=None,
include_batch_in_index=False):
if data_format != "NHWC":
raise ValueError("Data formats other than 'NHWC' are not yet supported")
Targmax = deprecated_argument_lookup(
"output_dtype", output_dtype, "Targmax", Targmax)
if Targmax is None:
Targmax = dtypes.int64
return gen_nn_ops.max_pool_with_argmax(
input=input,
ksize=ksize,
strides=strides,
padding=padding,
Targmax=Targmax,
include_batch_in_index=include_batch_in_index,
name=name)
max_pool_with_argmax_v1.__doc__ = gen_nn_ops.max_pool_with_argmax.__doc__
@ops.RegisterStatistics("Conv3D", "flops")
def _calc_conv3d_flops(graph, node):
"""Calculates the compute resources needed for Conv3D."""
input_shape = graph_util.tensor_shape_from_node_def_name(graph, node.input[0])
input_shape.assert_is_fully_defined()
filter_shape = graph_util.tensor_shape_from_node_def_name(
graph, node.input[1])
filter_shape.assert_is_fully_defined()
output_shape = graph_util.tensor_shape_from_node_def_name(graph, node.name)
output_shape.assert_is_fully_defined()
filter_time = int(filter_shape[0])
filter_height = int(filter_shape[1])
filter_width = int(filter_shape[2])
filter_in_depth = int(filter_shape[3])
output_count = np.prod(output_shape.as_list(), dtype=np.int64)
return ops.OpStats("flops", (output_count * filter_in_depth * filter_time *
filter_height * filter_width * 2))
@ops.RegisterStatistics("Conv2D", "flops")
def _calc_conv_flops(graph, node):
"""Calculates the compute resources needed for Conv2D."""
input_shape = graph_util.tensor_shape_from_node_def_name(graph, node.input[0])
input_shape.assert_is_fully_defined()
filter_shape = graph_util.tensor_shape_from_node_def_name(
graph, node.input[1])
filter_shape.assert_is_fully_defined()
output_shape = graph_util.tensor_shape_from_node_def_name(graph, node.name)
output_shape.assert_is_fully_defined()
filter_height = int(filter_shape[0])
filter_width = int(filter_shape[1])
filter_in_depth = int(filter_shape[2])
output_count = np.prod(output_shape.as_list(), dtype=np.int64)
return ops.OpStats(
"flops",
(output_count * filter_in_depth * filter_height * filter_width * 2))
@ops.RegisterStatistics("DepthwiseConv2dNative", "flops")
def _calc_depthwise_conv_flops(graph, node):
"""Calculates the compute resources needed for DepthwiseConv2dNative."""
input_shape = graph_util.tensor_shape_from_node_def_name(graph, node.input[0])
input_shape.assert_is_fully_defined()
filter_shape = graph_util.tensor_shape_from_node_def_name(
graph, node.input[1])
filter_shape.assert_is_fully_defined()
output_shape = graph_util.tensor_shape_from_node_def_name(graph, node.name)
output_shape.assert_is_fully_defined()
filter_height = int(filter_shape[0])
filter_width = int(filter_shape[1])
output_count = np.prod(output_shape.as_list(), dtype=np.int64)
return ops.OpStats("flops", (output_count * filter_height * filter_width * 2))
@ops.RegisterStatistics("BiasAdd", "flops")
def _calc_bias_add_flops(graph, node):
"""Calculates the computing needed for BiasAdd."""
input_shape = graph_util.tensor_shape_from_node_def_name(graph, node.input[0])
input_shape.assert_is_fully_defined()
input_count = np.prod(input_shape.as_list())
return ops.OpStats("flops", input_count)
@tf_export(v1=["nn.xw_plus_b"])
@dispatch.add_dispatch_support
def xw_plus_b(x, weights, biases, name=None): # pylint: disable=invalid-name
"""Computes matmul(x, weights) + biases.
Args:
x: a 2D tensor. Dimensions typically: batch, in_units
weights: a 2D tensor. Dimensions typically: in_units, out_units
biases: a 1D tensor. Dimensions: out_units
name: A name for the operation (optional). If not specified
"xw_plus_b" is used.
Returns:
A 2-D Tensor computing matmul(x, weights) + biases.
Dimensions typically: batch, out_units.
"""
with ops.name_scope(name, "xw_plus_b", [x, weights, biases]) as name:
x = ops.convert_to_tensor(x, name="x")
weights = ops.convert_to_tensor(weights, name="weights")
biases = ops.convert_to_tensor(biases, name="biases")
mm = math_ops.matmul(x, weights)
return bias_add(mm, biases, name=name)
def xw_plus_b_v1(x, weights, biases, name=None):
"""Computes matmul(x, weights) + biases.
This is a deprecated version of that will soon be removed.
Args:
x: a 2D tensor. Dimensions typically: batch, in_units
weights: a 2D tensor. Dimensions typically: in_units, out_units
biases: a 1D tensor. Dimensions: out_units
name: A name for the operation (optional). If not specified
"xw_plus_b_v1" is used.
Returns:
A 2-D Tensor computing matmul(x, weights) + biases.
Dimensions typically: batch, out_units.
"""
with ops.name_scope(name, "xw_plus_b_v1", [x, weights, biases]) as name:
x = ops.convert_to_tensor(x, name="x")
weights = ops.convert_to_tensor(weights, name="weights")
biases = ops.convert_to_tensor(biases, name="biases")
mm = math_ops.matmul(x, weights)
return bias_add_v1(mm, biases, name=name)
def _get_noise_shape(x, noise_shape):
# If noise_shape is none return immediately.
if noise_shape is None:
return array_ops.shape(x)
try:
# Best effort to figure out the intended shape.
# If not possible, let the op to handle it.
# In eager mode exception will show up.
noise_shape_ = tensor_shape.as_shape(noise_shape)
except (TypeError, ValueError):
return noise_shape
if x.shape.dims is not None and len(x.shape.dims) == len(noise_shape_.dims):
new_dims = []
for i, dim in enumerate(x.shape.dims):
if noise_shape_.dims[i].value is None and dim.value is not None:
new_dims.append(dim.value)
else:
new_dims.append(noise_shape_.dims[i].value)
return tensor_shape.TensorShape(new_dims)
return noise_shape
@tf_export(v1=["nn.dropout"])
@dispatch.add_dispatch_support
@deprecation.deprecated_args(None, "Please use `rate` instead of `keep_prob`. "
"Rate should be set to `rate = 1 - keep_prob`.",
"keep_prob")
def dropout(x, keep_prob=None, noise_shape=None, seed=None, name=None,
rate=None):
"""Computes dropout.
For each element of `x`, with probability `rate`, outputs `0`, and otherwise
scales up the input by `1 / (1-rate)`. The scaling is such that the expected
sum is unchanged.
By default, each element is kept or dropped independently. If `noise_shape`
is specified, it must be
[broadcastable](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
to the shape of `x`, and only dimensions with `noise_shape[i] == shape(x)[i]`
will make independent decisions. For example, if `shape(x) = [k, l, m, n]`
and `noise_shape = [k, 1, 1, n]`, each batch and channel component will be
kept independently and each row and column will be kept or not kept together.
Args:
x: A floating point tensor.
keep_prob: (deprecated) A deprecated alias for `(1-rate)`.
noise_shape: A 1-D `Tensor` of type `int32`, representing the
shape for randomly generated keep/drop flags.
seed: A Python integer. Used to create random seeds. See
`tf.random.set_seed` for behavior.
name: A name for this operation (optional).
rate: A scalar `Tensor` with the same type as `x`. The probability that each
element of `x` is discarded.
Returns:
A Tensor of the same shape of `x`.
Raises:
ValueError: If `rate` is not in `[0, 1)` or if `x` is not a floating
point tensor.
"""
try:
keep = 1. - keep_prob if keep_prob is not None else None
except TypeError:
raise ValueError("keep_prob must be a floating point number or Tensor "
"(got %r)" % keep_prob)
rate = deprecation.deprecated_argument_lookup(
"rate", rate,
"keep_prob", keep)
if rate is None:
raise ValueError("You must provide a rate to dropout.")
return dropout_v2(x, rate, noise_shape=noise_shape, seed=seed, name=name)
@tf_export("nn.dropout", v1=[])
@dispatch.add_dispatch_support
def dropout_v2(x, rate, noise_shape=None, seed=None, name=None):
"""Computes dropout: randomly sets elements to zero to prevent overfitting.
Note: The behavior of dropout has changed between TensorFlow 1.x and 2.x.
When converting 1.x code, please use named arguments to ensure behavior stays
consistent.
See also: `tf.keras.layers.Dropout` for a dropout layer.
[Dropout](https://arxiv.org/abs/1207.0580) is useful for regularizing DNN
models. Inputs elements are randomly set to zero (and the other elements are
rescaled). This encourages each node to be independently useful, as it cannot
rely on the output of other nodes.
More precisely: With probability `rate` elements of `x` are set to `0`.
The remaining elements are scaled up by `1.0 / (1 - rate)`, so that the
expected value is preserved.
>>> tf.random.set_seed(0)
>>> x = tf.ones([3,5])
>>> tf.nn.dropout(x, rate = 0.5, seed = 1).numpy()
array([[2., 0., 0., 2., 2.],
[2., 2., 2., 2., 2.],
[2., 0., 2., 0., 2.]], dtype=float32)
>>> tf.random.set_seed(0)
>>> x = tf.ones([3,5])
>>> tf.nn.dropout(x, rate = 0.8, seed = 1).numpy()
array([[0., 0., 0., 5., 5.],
[0., 5., 0., 5., 0.],
[5., 0., 5., 0., 5.]], dtype=float32)
>>> tf.nn.dropout(x, rate = 0.0) == x
<tf.Tensor: shape=(3, 5), dtype=bool, numpy=
array([[ True, True, True, True, True],
[ True, True, True, True, True],
[ True, True, True, True, True]])>
By default, each element is kept or dropped independently. If `noise_shape`
is specified, it must be
[broadcastable](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
to the shape of `x`, and only dimensions with `noise_shape[i] == shape(x)[i]`
will make independent decisions. This is useful for dropping whole
channels from an image or sequence. For example:
>>> tf.random.set_seed(0)
>>> x = tf.ones([3,10])
>>> tf.nn.dropout(x, rate = 2/3, noise_shape=[1,10], seed=1).numpy()
array([[0., 0., 0., 3., 3., 0., 3., 3., 3., 0.],
[0., 0., 0., 3., 3., 0., 3., 3., 3., 0.],
[0., 0., 0., 3., 3., 0., 3., 3., 3., 0.]], dtype=float32)
Args:
x: A floating point tensor.
rate: A scalar `Tensor` with the same type as x. The probability
that each element is dropped. For example, setting rate=0.1 would drop
10% of input elements.
noise_shape: A 1-D `Tensor` of type `int32`, representing the
shape for randomly generated keep/drop flags.
seed: A Python integer. Used to create random seeds. See
`tf.random.set_seed` for behavior.
name: A name for this operation (optional).
Returns:
A Tensor of the same shape of `x`.
Raises:
ValueError: If `rate` is not in `[0, 1)` or if `x` is not a floating point
tensor. `rate=1` is disallowed, because the output would be all zeros,
which is likely not what was intended.
"""
with ops.name_scope(name, "dropout", [x]) as name:
is_rate_number = isinstance(rate, numbers.Real)
if is_rate_number and (rate < 0 or rate >= 1):
raise ValueError("rate must be a scalar tensor or a float in the "
"range [0, 1), got %g" % rate)
x = ops.convert_to_tensor(x, name="x")
x_dtype = x.dtype
if not x_dtype.is_floating:
raise ValueError("x has to be a floating point tensor since it's going "
"to be scaled. Got a %s tensor instead." % x_dtype)
if is_rate_number and rate == 0:
# Fast-path: Return the input immediately if rate is non-tensor & is `0`.
# We trigger this after all error checking
# and after `x` has been converted to a tensor, to prevent inconsistent
# tensor conversions/error raising if rate is changed to/from 0.
#
# We also explicitly call `random_seed.get_seed` to make sure
# we don't change the random number generation behavior of
# stateful random ops by entering a fastpath,
# despite not generating a random tensor in the fastpath
random_seed.get_seed(seed)
return x
is_executing_eagerly = context.executing_eagerly()
if not tensor_util.is_tf_type(rate):
if is_rate_number:
keep_prob = 1 - rate
scale = 1 / keep_prob
scale = ops.convert_to_tensor(scale, dtype=x_dtype)
ret = gen_math_ops.mul(x, scale)
else:
raise ValueError("rate is neither scalar nor scalar tensor %r" % rate)
else:
rate.get_shape().assert_has_rank(0)
rate_dtype = rate.dtype
if rate_dtype != x_dtype:
if not rate_dtype.is_compatible_with(x_dtype):
raise ValueError(
"Tensor dtype %s is incomptaible with Tensor dtype %s: %r" %
(x_dtype.name, rate_dtype.name, rate))
rate = gen_math_ops.cast(rate, x_dtype, name="rate")
one_tensor = constant_op.constant(1, dtype=x_dtype)
ret = gen_math_ops.real_div(x, gen_math_ops.sub(one_tensor, rate))
noise_shape = _get_noise_shape(x, noise_shape)
# Sample a uniform distribution on [0.0, 1.0) and select values larger
# than rate.
#
# NOTE: Random uniform can only generate 2^23 floats on [1.0, 2.0)
# and subtract 1.0.
random_tensor = random_ops.random_uniform(
noise_shape, seed=seed, dtype=x_dtype)
# NOTE: if (1.0 + rate) - 1 is equal to rate, then that float is selected,
# hence a >= comparison is used.
keep_mask = random_tensor >= rate
ret = gen_math_ops.mul(ret, gen_math_ops.cast(keep_mask, x_dtype))
if not is_executing_eagerly:
ret.set_shape(x.get_shape())
return ret
@tf_export("math.top_k", "nn.top_k")
@dispatch.add_dispatch_support
def top_k(input, k=1, sorted=True, name=None): # pylint: disable=redefined-builtin
"""Finds values and indices of the `k` largest entries for the last dimension.
If the input is a vector (rank=1), finds the `k` largest entries in the vector
and outputs their values and indices as vectors. Thus `values[j]` is the
`j`-th largest entry in `input`, and its index is `indices[j]`.
>>> result = tf.math.top_k([1, 2, 98, 1, 1, 99, 3, 1, 3, 96, 4, 1],
... k=3)
>>> result.values.numpy()
array([99, 98, 96], dtype=int32)
>>> result.indices.numpy()
array([5, 2, 9], dtype=int32)
For matrices (resp. higher rank input), computes the top `k` entries in each
row (resp. vector along the last dimension). Thus,
>>> input = tf.random.normal(shape=(3,4,5,6))
>>> k = 2
>>> values, indices = tf.math.top_k(input, k=k)
>>> values.shape.as_list()
[3, 4, 5, 2]
>>>
>>> values.shape == indices.shape == input.shape[:-1] + [k]
True
The indices can be used to `gather` from a tensor who's shape matches `input`.
>>> gathered_values = tf.gather(input, indices, batch_dims=-1)
>>> assert tf.reduce_all(gathered_values == values)
If two elements are equal, the lower-index element appears first.
>>> result = tf.math.top_k([1, 1, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0],
... k=3)
>>> result.indices.numpy()
array([0, 1, 3], dtype=int32)
Args:
input: 1-D or higher `Tensor` with last dimension at least `k`.
k: 0-D `int32` `Tensor`. Number of top elements to look for along the last
dimension (along each row for matrices).
sorted: If true the resulting `k` elements will be sorted by the values in
descending order.
name: Optional name for the operation.
Returns:
A tuple with two named fields:
values: The `k` largest elements along each last dimensional slice.
indices: The indices of `values` within the last dimension of `input`.
"""
return gen_nn_ops.top_kv2(input, k=k, sorted=sorted, name=name)
def nth_element(input, n, reverse=False, name=None): # pylint: disable=redefined-builtin
r"""Finds values of the `n`-th smallest value for the last dimension.
Note that n is zero-indexed.
If the input is a vector (rank-1), finds the entries which is the nth-smallest
value in the vector and outputs their values as scalar tensor.
For matrices (resp. higher rank input), computes the entries which is the
nth-smallest value in each row (resp. vector along the last dimension). Thus,
values.shape = input.shape[:-1]
Args:
input: 1-D or higher `Tensor` with last dimension at least `n+1`.
n: A `Tensor` of type `int32`.
0-D. Position of sorted vector to select along the last dimension (along
each row for matrices). Valid range of n is `[0, input.shape[:-1])`
reverse: An optional `bool`. Defaults to `False`.
When set to True, find the nth-largest value in the vector and vice
versa.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `input`.
The `n`-th order statistic along each last dimensional slice.
"""
return gen_nn_ops.nth_element(input, n, reverse=reverse, name=name)
@tf_export(v1=["nn.fractional_max_pool"])
@dispatch.add_dispatch_support
@deprecation.deprecated(date=None, instructions="`seed2` and `deterministic` "
"args are deprecated. Use fractional_max_pool_v2.")
def fractional_max_pool(value,
pooling_ratio,
pseudo_random=False,
overlapping=False,
deterministic=False,
seed=0,
seed2=0,
name=None): # pylint: disable=redefined-builtin
r"""Performs fractional max pooling on the input.
This is a deprecated version of `fractional_max_pool`.
Fractional max pooling is slightly different than regular max pooling. In
regular max pooling, you downsize an input set by taking the maximum value of
smaller N x N subsections of the set (often 2x2), and try to reduce the set by
a factor of N, where N is an integer. Fractional max pooling, as you might
expect from the word "fractional", means that the overall reduction ratio N
does not have to be an integer.
The sizes of the pooling regions are generated randomly but are fairly
uniform. For example, let's look at the height dimension, and the constraints
on the list of rows that will be pool boundaries.
First we define the following:
1. input_row_length : the number of rows from the input set
2. output_row_length : which will be smaller than the input
3. alpha = input_row_length / output_row_length : our reduction ratio
4. K = floor(alpha)
5. row_pooling_sequence : this is the result list of pool boundary rows
Then, row_pooling_sequence should satisfy:
1. a[0] = 0 : the first value of the sequence is 0
2. a[end] = input_row_length : the last value of the sequence is the size
3. K <= (a[i+1] - a[i]) <= K+1 : all intervals are K or K+1 size
4. length(row_pooling_sequence) = output_row_length+1
Args:
value: A `Tensor`. 4-D with shape `[batch, height, width, channels]`.
pooling_ratio: A list of `floats` that has length >= 4. Pooling ratio for
each dimension of `value`, currently only supports row and col dimension
and should be >= 1.0. For example, a valid pooling ratio looks like [1.0,
1.44, 1.73, 1.0]. The first and last elements must be 1.0 because we don't
allow pooling on batch and channels dimensions. 1.44 and 1.73 are pooling
ratio on height and width dimensions respectively.
pseudo_random: An optional `bool`. Defaults to `False`. When set to `True`,
generates the pooling sequence in a pseudorandom fashion, otherwise, in a
random fashion. Check (Graham, 2015) for difference between
pseudorandom and random.
overlapping: An optional `bool`. Defaults to `False`. When set to `True`,
it means when pooling, the values at the boundary of adjacent pooling
cells are used by both cells. For example:
`index 0 1 2 3 4`
`value 20 5 16 3 7`
If the pooling sequence is [0, 2, 4], then 16, at index 2 will be used
twice. The result would be [20, 16] for fractional max pooling.
deterministic: An optional `bool`. Deprecated; use `fractional_max_pool_v2`
instead.
seed: An optional `int`. Defaults to `0`. If set to be non-zero, the
random number generator is seeded by the given seed. Otherwise it is
seeded by a random seed.
seed2: An optional `int`. Deprecated; use `fractional_max_pool_v2` instead.
name: A name for the operation (optional).
Returns:
A tuple of `Tensor` objects (`output`, `row_pooling_sequence`,
`col_pooling_sequence`).
output: Output `Tensor` after fractional max pooling. Has the same type as
`value`.
row_pooling_sequence: A `Tensor` of type `int64`.
col_pooling_sequence: A `Tensor` of type `int64`.
References:
Fractional Max-Pooling:
[Graham, 2015](https://arxiv.org/abs/1412.6071)
([pdf](https://arxiv.org/pdf/1412.6071.pdf))
"""
return gen_nn_ops.fractional_max_pool(value, pooling_ratio, pseudo_random,
overlapping, deterministic, seed, seed2,
name)
@tf_export("nn.fractional_max_pool", v1=[])
@dispatch.add_dispatch_support
def fractional_max_pool_v2(value,
pooling_ratio,
pseudo_random=False,
overlapping=False,
seed=0,
name=None): # pylint: disable=redefined-builtin
r"""Performs fractional max pooling on the input.
Fractional max pooling is slightly different than regular max pooling. In
regular max pooling, you downsize an input set by taking the maximum value of
smaller N x N subsections of the set (often 2x2), and try to reduce the set by
a factor of N, where N is an integer. Fractional max pooling, as you might
expect from the word "fractional", means that the overall reduction ratio N
does not have to be an integer.
The sizes of the pooling regions are generated randomly but are fairly
uniform. For example, let's look at the height dimension, and the constraints
on the list of rows that will be pool boundaries.
First we define the following:
1. input_row_length : the number of rows from the input set
2. output_row_length : which will be smaller than the input
3. alpha = input_row_length / output_row_length : our reduction ratio
4. K = floor(alpha)
5. row_pooling_sequence : this is the result list of pool boundary rows
Then, row_pooling_sequence should satisfy:
1. a[0] = 0 : the first value of the sequence is 0
2. a[end] = input_row_length : the last value of the sequence is the size
3. K <= (a[i+1] - a[i]) <= K+1 : all intervals are K or K+1 size
4. length(row_pooling_sequence) = output_row_length+1
Args:
value: A `Tensor`. 4-D with shape `[batch, height, width, channels]`.
pooling_ratio: An int or list of `ints` that has length `1`, `2` or `4`.
Pooling ratio for each dimension of `value`, currently only supports row
and col dimension and should be >= 1.0. For example, a valid pooling ratio
looks like [1.0, 1.44, 1.73, 1.0]. The first and last elements must be 1.0
because we don't allow pooling on batch and channels dimensions. 1.44 and
1.73 are pooling ratio on height and width dimensions respectively.
pseudo_random: An optional `bool`. Defaults to `False`. When set to `True`,
generates the pooling sequence in a pseudorandom fashion, otherwise, in a
random fashion. Check paper (Graham, 2015) for difference between
pseudorandom and random.
overlapping: An optional `bool`. Defaults to `False`. When set to `True`,
it means when pooling, the values at the boundary of adjacent pooling
cells are used by both cells. For example:
`index 0 1 2 3 4`
`value 20 5 16 3 7`
If the pooling sequence is [0, 2, 4], then 16, at index 2 will be used
twice. The result would be [20, 16] for fractional max pooling.
seed: An optional `int`. Defaults to `0`. If set to be non-zero, the
random number generator is seeded by the given seed. Otherwise it is
seeded by a random seed.
name: A name for the operation (optional).
Returns:
A tuple of `Tensor` objects (`output`, `row_pooling_sequence`,
`col_pooling_sequence`).
output: Output `Tensor` after fractional max pooling. Has the same type as
`value`.
row_pooling_sequence: A `Tensor` of type `int64`.
col_pooling_sequence: A `Tensor` of type `int64`.
References:
Fractional Max-Pooling:
[Graham, 2015](https://arxiv.org/abs/1412.6071)
([pdf](https://arxiv.org/pdf/1412.6071.pdf))
"""
if (isinstance(pooling_ratio, (list, tuple))):
if (pooling_ratio[0] != 1.0 or pooling_ratio[-1] != 1.0):
raise ValueError(
"The first and last elements of pooling ratio must be 1.0.")
for element in pooling_ratio:
if element < 1.0:
raise ValueError("pooling_ratio should be >= 1.0.")
elif (isinstance(pooling_ratio, (int, float))):
if pooling_ratio < 1.0:
raise ValueError("pooling_ratio should be >= 1.0.")
else:
raise ValueError("pooling_ratio should be an int or a list of ints.")
pooling_ratio = _get_sequence(pooling_ratio, 2, 3, "pooling_ratio")
if seed == 0:
return gen_nn_ops.fractional_max_pool(value, pooling_ratio, pseudo_random,
overlapping, deterministic=False,
seed=0, seed2=0, name=name)
else:
seed1, seed2 = random_seed.get_seed(seed)
return gen_nn_ops.fractional_max_pool(value, pooling_ratio, pseudo_random,
overlapping, deterministic=True,
seed=seed1, seed2=seed2, name=name)
@tf_export(v1=["nn.fractional_avg_pool"])
@dispatch.add_dispatch_support
@deprecation.deprecated(date=None, instructions="`seed2` and `deterministic` "
"args are deprecated. Use fractional_avg_pool_v2.")
def fractional_avg_pool(value,
pooling_ratio,
pseudo_random=False,
overlapping=False,
deterministic=False,
seed=0,
seed2=0,
name=None): # pylint: disable=redefined-builtin
r"""Performs fractional average pooling on the input.
This is a deprecated version of `fractional_avg_pool`.
Fractional average pooling is similar to Fractional max pooling in the pooling
region generation step. The only difference is that after pooling regions are
generated, a mean operation is performed instead of a max operation in each
pooling region.
Args:
value: A `Tensor`. 4-D with shape `[batch, height, width, channels]`.
pooling_ratio: A list of `floats` that has length >= 4. Pooling ratio for
each dimension of `value`, currently only supports row and col dimension
and should be >= 1.0. For example, a valid pooling ratio looks like [1.0,
1.44, 1.73, 1.0]. The first and last elements must be 1.0 because we don't
allow pooling on batch and channels dimensions. 1.44 and 1.73 are pooling
ratio on height and width dimensions respectively.
pseudo_random: An optional `bool`. Defaults to `False`. When set to `True`,
generates the pooling sequence in a pseudorandom fashion, otherwise, in a
random fashion. Check paper (Graham, 2015) for difference between
pseudorandom and random.
overlapping: An optional `bool`. Defaults to `False`. When set to `True`,
it means when pooling, the values at the boundary of adjacent pooling
cells are used by both cells. For example:
`index 0 1 2 3 4`
`value 20 5 16 3 7`
If the pooling sequence is [0, 2, 4], then 16, at index 2 will be used
twice. The result would be [20, 16] for fractional avg pooling.
deterministic: An optional `bool`. Deprecated; use `fractional_avg_pool_v2`
instead.
seed: An optional `int`. Defaults to `0`. If set to be non-zero, the
random number generator is seeded by the given seed. Otherwise it is
seeded by a random seed.
seed2: An optional `int`. Deprecated; use `fractional_avg_pool_v2` instead.
name: A name for the operation (optional).
Returns:
A tuple of `Tensor` objects (`output`, `row_pooling_sequence`,
`col_pooling_sequence`).
output: Output `Tensor` after fractional avg pooling. Has the same type as
`value`.
row_pooling_sequence: A `Tensor` of type `int64`.
col_pooling_sequence: A `Tensor` of type `int64`.
References:
Fractional Max-Pooling:
[Graham, 2015](https://arxiv.org/abs/1412.6071)
([pdf](https://arxiv.org/pdf/1412.6071.pdf))
"""
return gen_nn_ops.fractional_avg_pool(value, pooling_ratio, pseudo_random,
overlapping, deterministic, seed, seed2,
name=name)
@tf_export("nn.fractional_avg_pool", v1=[])
@dispatch.add_dispatch_support
def fractional_avg_pool_v2(value,
pooling_ratio,
pseudo_random=False,
overlapping=False,
seed=0,
name=None): # pylint: disable=redefined-builtin
r"""Performs fractional average pooling on the input.
Fractional average pooling is similar to Fractional max pooling in the pooling
region generation step. The only difference is that after pooling regions are
generated, a mean operation is performed instead of a max operation in each
pooling region.
Args:
value: A `Tensor`. 4-D with shape `[batch, height, width, channels]`.
pooling_ratio: A list of `floats` that has length >= 4. Pooling ratio for
each dimension of `value`, currently only supports row and col dimension
and should be >= 1.0. For example, a valid pooling ratio looks like [1.0,
1.44, 1.73, 1.0]. The first and last elements must be 1.0 because we don't
allow pooling on batch and channels dimensions. 1.44 and 1.73 are pooling
ratio on height and width dimensions respectively.
pseudo_random: An optional `bool`. Defaults to `False`. When set to `True`,
generates the pooling sequence in a pseudorandom fashion, otherwise, in a
random fashion. Check paper (Graham, 2015) for difference between
pseudorandom and random.
overlapping: An optional `bool`. Defaults to `False`. When set to `True`,
it means when pooling, the values at the boundary of adjacent pooling
cells are used by both cells. For example:
`index 0 1 2 3 4`
`value 20 5 16 3 7`
If the pooling sequence is [0, 2, 4], then 16, at index 2 will be used
twice. The result would be [20, 16] for fractional avg pooling.
seed: An optional `int`. Defaults to `0`. If set to be non-zero, the
random number generator is seeded by the given seed. Otherwise it is
seeded by a random seed.
name: A name for the operation (optional).
Returns:
A tuple of `Tensor` objects (`output`, `row_pooling_sequence`,
`col_pooling_sequence`).
output: Output `Tensor` after fractional avg pooling. Has the same type as
`value`.
row_pooling_sequence: A `Tensor` of type `int64`.
col_pooling_sequence: A `Tensor` of type `int64`.
References:
Fractional Max-Pooling:
[Graham, 2015](https://arxiv.org/abs/1412.6071)
([pdf](https://arxiv.org/pdf/1412.6071.pdf))
"""
if seed == 0:
return gen_nn_ops.fractional_avg_pool(value, pooling_ratio, pseudo_random,
overlapping, deterministic=False,
seed=0, seed2=0, name=name)
else:
seed1, seed2 = random_seed.get_seed(seed)
return gen_nn_ops.fractional_avg_pool(value, pooling_ratio, pseudo_random,
overlapping, deterministic=True,
seed=seed1, seed2=seed2, name=name)
@ops.RegisterStatistics("Dilation2D", "flops")
def _calc_dilation2d_flops(graph, node):
"""Calculates the compute resources needed for Dilation2D."""
input_shape = graph_util.tensor_shape_from_node_def_name(graph, node.input[0])
input_shape.assert_is_fully_defined()
filter_shape = graph_util.tensor_shape_from_node_def_name(
graph, node.input[1])
filter_shape.assert_is_fully_defined()
output_shape = graph_util.tensor_shape_from_node_def_name(graph, node.name)
output_shape.assert_is_fully_defined()
filter_height = int(filter_shape[0])
filter_width = int(filter_shape[1])
output_count = np.prod(output_shape.as_list(), dtype=np.int64)
return ops.OpStats("flops", (output_count * filter_height * filter_width * 2))
@tf_export(v1=["nn.erosion2d"])
@dispatch.add_dispatch_support
def erosion2d(value, kernel, strides, rates, padding, name=None):
"""Computes the grayscale erosion of 4-D `value` and 3-D `kernel` tensors.
The `value` tensor has shape `[batch, in_height, in_width, depth]` and the
`kernel` tensor has shape `[kernel_height, kernel_width, depth]`, i.e.,
each input channel is processed independently of the others with its own
structuring function. The `output` tensor has shape
`[batch, out_height, out_width, depth]`. The spatial dimensions of the
output tensor depend on the `padding` algorithm. We currently only support the
default "NHWC" `data_format`.
In detail, the grayscale morphological 2-D erosion is given by:
output[b, y, x, c] =
min_{dy, dx} value[b,
strides[1] * y - rates[1] * dy,
strides[2] * x - rates[2] * dx,
c] -
kernel[dy, dx, c]
Duality: The erosion of `value` by the `kernel` is equal to the negation of
the dilation of `-value` by the reflected `kernel`.
Args:
value: A `Tensor`. 4-D with shape `[batch, in_height, in_width, depth]`.
kernel: A `Tensor`. Must have the same type as `value`.
3-D with shape `[kernel_height, kernel_width, depth]`.
strides: A list of `ints` that has length `>= 4`.
1-D of length 4. The stride of the sliding window for each dimension of
the input tensor. Must be: `[1, stride_height, stride_width, 1]`.
rates: A list of `ints` that has length `>= 4`.
1-D of length 4. The input stride for atrous morphological dilation.
Must be: `[1, rate_height, rate_width, 1]`.
padding: A `string` from: `"SAME", "VALID"`.
The type of padding algorithm to use.
name: A name for the operation (optional). If not specified "erosion2d"
is used.
Returns:
A `Tensor`. Has the same type as `value`.
4-D with shape `[batch, out_height, out_width, depth]`.
Raises:
ValueError: If the `value` depth does not match `kernel`' shape, or if
padding is other than `'VALID'` or `'SAME'`.
"""
with ops.name_scope(name, "erosion2d", [value, kernel]) as name:
# Reduce erosion to dilation by duality.
return math_ops.negative(
gen_nn_ops.dilation2d(
input=math_ops.negative(value),
filter=array_ops.reverse_v2(kernel, [0, 1]),
strides=strides,
rates=rates,
padding=padding,
name=name))
@tf_export("nn.erosion2d", v1=[])
@dispatch.add_dispatch_support
def erosion2d_v2(value,
filters,
strides,
padding,
data_format,
dilations,
name=None):
"""Computes the grayscale erosion of 4-D `value` and 3-D `filters` tensors.
The `value` tensor has shape `[batch, in_height, in_width, depth]` and the
`filters` tensor has shape `[filters_height, filters_width, depth]`, i.e.,
each input channel is processed independently of the others with its own
structuring function. The `output` tensor has shape
`[batch, out_height, out_width, depth]`. The spatial dimensions of the
output tensor depend on the `padding` algorithm. We currently only support the
default "NHWC" `data_format`.
In detail, the grayscale morphological 2-D erosion is given by:
output[b, y, x, c] =
min_{dy, dx} value[b,
strides[1] * y - dilations[1] * dy,
strides[2] * x - dilations[2] * dx,
c] -
filters[dy, dx, c]
Duality: The erosion of `value` by the `filters` is equal to the negation of
the dilation of `-value` by the reflected `filters`.
Args:
value: A `Tensor`. 4-D with shape `[batch, in_height, in_width, depth]`.
filters: A `Tensor`. Must have the same type as `value`.
3-D with shape `[filters_height, filters_width, depth]`.
strides: A list of `ints` that has length `>= 4`.
1-D of length 4. The stride of the sliding window for each dimension of
the input tensor. Must be: `[1, stride_height, stride_width, 1]`.
padding: A `string` from: `"SAME", "VALID"`.
The type of padding algorithm to use.
data_format: A `string`, only `"NHWC"` is currently supported.
dilations: A list of `ints` that has length `>= 4`.
1-D of length 4. The input stride for atrous morphological dilation.
Must be: `[1, rate_height, rate_width, 1]`.
name: A name for the operation (optional). If not specified "erosion2d"
is used.
Returns:
A `Tensor`. Has the same type as `value`.
4-D with shape `[batch, out_height, out_width, depth]`.
Raises:
ValueError: If the `value` depth does not match `filters`' shape, or if
padding is other than `'VALID'` or `'SAME'`.
"""
if data_format != "NHWC":
raise ValueError("Data formats other than NHWC are not yet supported")
with ops.name_scope(name, "erosion2d", [value, filters]) as name:
# Reduce erosion to dilation by duality.
return math_ops.negative(
gen_nn_ops.dilation2d(
input=math_ops.negative(value),
filter=array_ops.reverse_v2(filters, [0, 1]),
strides=strides,
rates=dilations,
padding=padding,
name=name))
@tf_export(v1=["math.in_top_k", "nn.in_top_k"])
@dispatch.add_dispatch_support
def in_top_k(predictions, targets, k, name=None):
r"""Says whether the targets are in the top `K` predictions.
This outputs a `batch_size` bool array, an entry `out[i]` is `true` if the
prediction for the target class is finite (not inf, -inf, or nan) and among
the top `k` predictions among all predictions for example `i`. Note that the
behavior of `InTopK` differs from the `TopK` op in its handling of ties; if
multiple classes have the same prediction value and straddle the top-`k`
boundary, all of those classes are considered to be in the top `k`.
More formally, let
\\(predictions_i\\) be the predictions for all classes for example `i`,
\\(targets_i\\) be the target class for example `i`,
\\(out_i\\) be the output for example `i`,
$$out_i = predictions_{i, targets_i} \in TopKIncludingTies(predictions_i)$$
Args:
predictions: A `Tensor` of type `float32`.
A `batch_size` x `classes` tensor.
targets: A `Tensor`. Must be one of the following types: `int32`, `int64`.
A `batch_size` vector of class ids.
k: An `int`. Number of top elements to look at for computing precision.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `bool`. Computed Precision at `k` as a `bool Tensor`.
"""
with ops.name_scope(name, "in_top_k"):
return gen_nn_ops.in_top_kv2(predictions, targets, k, name=name)
@tf_export("math.in_top_k", "nn.in_top_k", v1=[])
@dispatch.add_dispatch_support
def in_top_k_v2(targets, predictions, k, name=None):
return in_top_k(predictions, targets, k, name)
in_top_k_v2.__doc__ = in_top_k.__doc__
tf_export(v1=["nn.quantized_avg_pool"])(
dispatch.add_dispatch_support(gen_nn_ops.quantized_avg_pool))
tf_export(v1=["nn.quantized_conv2d"])(
dispatch.add_dispatch_support(gen_nn_ops.quantized_conv2d))
tf_export(v1=["nn.quantized_relu_x"])(
dispatch.add_dispatch_support(gen_nn_ops.quantized_relu_x))
tf_export(v1=["nn.quantized_max_pool"])(
dispatch.add_dispatch_support(gen_nn_ops.quantized_max_pool))
@tf_export("nn.isotonic_regression", v1=[])
@dispatch.add_dispatch_support
def isotonic_regression(inputs, decreasing=True, axis=-1):
r"""Solves isotonic regression problems along the given axis.
For each vector x, the problem solved is
$$\argmin_{y_1 >= y_2 >= ... >= y_n} \sum_i (x_i - y_i)^2.$$
As the solution is component-wise constant, a second tensor is returned that
encodes the segments. The problems are solved over the given axis.
Consider the following example, where we solve a batch of two problems. The
first input is [3, 1, 2], while the second [1, 3, 4] (as the axis is 1).
>>> x = tf.constant([[3, 1, 2], [1, 3, 4]], dtype=tf.float32)
>>> y, segments = tf.nn.isotonic_regression(x, axis=1)
>>> y # The solution.
<tf.Tensor: shape=(2, 3), dtype=float32, numpy=
array([[3. , 1.5 , 1.5 ],
[2.6666667, 2.6666667, 2.6666667]], dtype=float32)>
Note that the first solution has two blocks [2] and [1.5, 1.5]. The second
solution is constant, and thus has a single segment. These segments are
exactly what the second returned tensor encodes:
>>> segments
<tf.Tensor: shape=(2, 3), dtype=int32, numpy=
array([[0, 1, 1],
[0, 0, 0]], dtype=int32)>
Args:
inputs: A tensor holding the inputs.
decreasing: If set to False, the inequalities in the optimizing constrained
are flipped.
axis: The axis along which the problems should be solved.
Returns:
output: The solutions, same shape as type as the input.
segments: An int32 tensor, same shape as the input indicating the segments
that have the same value. Specifically, those positions that have the same
value correspond to the same segment. These values start at zero, and are
monotonously increasing for each solution.
"""
type_promotions = {
# Float types get mapped to themselves, int8/16 to float32, rest to double
dtypes.float32:
dtypes.float32,
dtypes.half:
dtypes.half,
dtypes.bfloat16:
dtypes.bfloat16,
dtypes.int8:
dtypes.float32,
dtypes.int16:
dtypes.float32,
}
inputs = ops.convert_to_tensor(inputs)
try:
output_dtype = type_promotions[inputs.dtype]
except KeyError:
output_dtype = dtypes.float64
def compute_on_matrix(matrix, name=None):
iso_fn = functools.partial(
gen_nn_ops.isotonic_regression, output_dtype=output_dtype, name=name)
if decreasing:
return iso_fn(matrix)
else:
output, segments = iso_fn(-matrix)
return -output, segments
return _wrap_2d_function(inputs, compute_on_matrix, axis)
|
annarev/tensorflow
|
tensorflow/python/ops/nn_ops.py
|
Python
|
apache-2.0
| 238,507
|
[
"Gaussian"
] |
402706303b388a11d6667adeff92ec69d19c9fa1ad7bca22c48b9b646012da13
|
#!/usr/bin/python
# -*- coding: UTF-8 -*-
#-------------------------------------------------------------------------------
# Name: adawaty
# Purpose: web interface tools for manipulating arabic texts.
#
# Author: Taha Zerrouki (taha.zerrouki[at]gmail.com)
#
# Created: 31-10-2011
# Copyright: (c) Taha Zerrouki 2011
# Licence: GPL
#-------------------------------------------------------------------------------
"""
The original web interface is from webQamoos,
Copyright © 2009, Muayyad Alsadi <[email protected]>
Released under terms of Waqf Public License.
This program is free software; you can redistribute it and/or modify
it under the terms of the latest version Waqf Public License as
published by Ojuba.org.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
The Latest version of the license can be found on
"http://waqf.ojuba.org/license"
"""
from __future__ import (absolute_import, division,
print_function,
unicode_literals)
import sys
import os
import os.path
import re
from glob import glob
import six
unicode = six.text_type
basestring = six.string_types
#~ sys.path.append(os.path.join(__file__, '../../support/'))
#~ sys.path.append(os.path.join(__file__, '../../mishkal'))
sys.path.append(os.path.join(__file__, 'lib/'))
sys.path.append(os.path.join(os.path.dirname(sys.argv[0]), 'lib'))
#~ sys.path.append(os.path.join(__file__, '../../'))
from okasha2.baseWebApp import *
from okasha2.utils import fromFs, toFs
import core.adaat
import datetime
header=u"""
"""
footer=u"""
<footer>
<a href="/mishkal/contact">للاتصال<span class="glyphicon glyphicon-envelope"></span></a>
</span><a href="http://blog.tahadz.com">مدونتي<span class="glyphicon glyphicon-globe"></a>
</footer>
"""
MyJsonHeaders={
"Access-Control-Allow-Methods": "GET, POST, OPTIONS",
"Access-Control-Allow-Credentials": "true",
"Access-Control-Allow-Origin": "*",
"Access-Control-Allow-Headers": "Content-Type, *",
}
#used in local
#comment it on server
# header=re.sub("/mishkal/",'', header);
# footer=re.sub("/mishkal/",'', footer);
class webApp(baseWebApp):
def __init__(self, *args, **kw):
baseWebApp.__init__(self,*args, **kw)
# self.myRepr = core.myrepr.MyRepr();
def _root(self, rq, *args):
raise redirectException(rq.script+'/main')
#~raise redirectException(rq.script+'/temp')
@expose(percentTemplate,["main.html"])
def main(self, rq, *args):
return {
'title':u'أدوات',
'script':rq.script,
'DefaultText':core.adaat.random_text(),
'ResultText':u"السلام عليكم",
'header':header,
"footer":footer,
# 'mode':self.mode, 'version':'0.1.0'
}
@expose(percentTemplate,["main.html"])
def index(self, rq, *args):
return {
'title':u'مشكال لتشكيل النصوص',
'script':rq.script,
'DefaultText':core.adaat.random_text(),
'ResultText':u"السلام عليكم",
'header':header,
"footer":footer,
# 'mode':self.mode, 'version':'0.1.0'
}
@expose(percentTemplate,["body_help.html"])
def help(self, rq, *args):
return {
}
@expose(jsonDumps, headers=MyJsonHeaders)
def ajaxGet(self, rq, *args):
"""
this is an example of using ajax/json
to test it visit http://localhost:8080/ajaxGet"
"""
text = rq.q.getfirst('text','Zerrouki Taha')
action = rq.q.getfirst('action','DoNothing')
order = rq.q.getfirst('order','0')
options = {};
options['lastmark']=rq.q.getfirst('lastmark','0')
if sys.version_info[0] < 3:
text = text.decode('utf-8')
options['lastmark'] = options['lastmark'].decode('utf8')
self.writelog(text,action);
#Handle contribute cases
if action=="Contribute":
return {'result':u"شكرا جزيلا على مساهمتك."}
resulttext = core.adaat.DoAction(text ,action, options)
# self.writelog(repr(resulttext),"ResultText");
# we choose to avoid logging results
# self.writelog(resulttext,"ResultText");
return {'result':resulttext, 'order':order}
@expose(percentTemplate,["doc.html"])
def doc(self, rq, *args):
return {
'script':rq.script,
'header':header,
"footer":footer,
}
@expose(percentTemplate,["download.html"])
def download(self, rq, *args):
return {
'script':rq.script,
'header':header,
"footer":footer,
}
@expose(percentTemplate,["contact.html"])
def contact(self, rq, *args):
return {
'script':rq.script,
'header':header,
"footer":footer,
}
@expose(percentTemplate,["link.html"])
def link(self, rq, *args):
return {
'script':rq.script,
'header':header,
"footer":footer,
}
@expose(percentTemplate,["projects.html"])
def projects(self, rq, *args):
return {
'script':rq.script,
'header':header,
"footer":footer,
}
@expose(percentTemplate,["log.html"])
def log(self, rq, *args):
return {
'script':rq.script,
'header':header,
"footer":footer,
}
@expose(percentTemplate,["whoisqutrub.html"])
def whoisqutrub(self, rq, *args):
return {
'script':rq.script,
'header':header,
"footer":footer,
}
#~@expose(percentTemplate,["temp.html"])
#~def temp(self, rq, *args):
#~return {
#~'script':rq.script,
#~'header':header,
#~"footer":footer,
#~}
def writelog(self,text,action):
"""
@param text: an object to be logged
@type text: object
"""
timelog=datetime.datetime.now().strftime("%Y-%m-%d %I:%M");
textlog=u"\t".join([timelog, action, text]);
self._logger.info(textlog);
|
linuxscout/mishkal
|
interfaces/web/adawaty.py
|
Python
|
gpl-3.0
| 6,234
|
[
"VisIt"
] |
1af9f500381e4ae25f49c19031c09c50ac0f7ae10306c50a204f5fa9802996dc
|
import sys
import time
import numpy as np
from ase.parallel import world
from asap3.Tools.ParameterOptimization.Optimization import ParameterOptimization
def now():
"Time as a string."
return time.strftime("%H:%M:%S")
class ParameterSearch(ParameterOptimization):
"""Parallel parameter optimization."""
def __init__(self, elements, calc_class, initparam, varparam, quantities,
latticeconstants, calc_args=None, debug=False):
ParameterOptimization.__init__(self, elements, calc_class, initparam,
varparam, quantities, latticeconstants,
calc_args, debug)
def random_parameters(self, parameters, std):
change = np.random.normal(1.0, std, len(parameters))
return (np.array(parameters) * change).tolist()
def run(self, std, temp, maxstep=100, xtol=1e-2, ftol=5e-2, delta=5e-3,
ptol=1e-2, dat=None, log=None, err=None):
"""Run the optimization.
First parameter: Gauss width for MC part
Second parameter: MC Temperature
Third parameter: run time, in h or m. Or the number of steps if integer.
xtol and ftol are simplex convergence criterial.
delta is the initial simplex size, relatively.
"""
self.ptol = ptol
self.nsteps = 0
self.numberlist = []
self.errorlist = []
self.countlist = []
self.fitparlist = []
self.optparlist = []
self.optvallist = []
self.maxstep = None
self.endtime = None
self.done(maxstep)
if log != None:
s = log.rfind('.')
log = open(log[:s] + '-' + str(world.rank) + log[s:], 'a')
if err != None:
s = err.rfind('.')
err = open(err[:s] + '-' + str(world.rank) + err[s:], 'w')
if world.rank == 0:
done = np.zeros(world.size)
request_s = [0] * world.size
request_r = [0] * world.size
result = [0] * world.size
else:
done = False
request_s = None
request_r = None
result = np.empty(1)
accepts = 0
old_error = 1e6
old_optpar = None
while not self.done():
# Make a random set of parameters close to the
if old_optpar == None:
initparam = self.parameters_dict2list()
if world.rank == 0:
fitpar = initparam
else:
fitpar = self.random_parameters(initparam, std * 2.0)
else:
fitpar = self.random_parameters(old_optpar, std)
# Run optimization
(error, optpar, optval) = self.fit(fitpar, xtol, ftol, delta,
log, err, True)
self.nsteps += 1
if error < old_error or temp < 1e-6:
prop = 1.0
else:
derr = (error - old_error) / old_error
prop = np.exp(-derr/temp)
if prop > np.random.uniform():
accepts += 1
old_error = error
old_optpar = optpar[:]
self.save_opt(error, fitpar, optpar, optval)
# Gather results and Write output
sys.stderr.write("Task %i [%s]: Finished optimizing, writing results.\n"
% (world.rank, now()))
self.write_local(dat, std, temp, accepts)
if dat != None:
sys.stderr.write("Task %i [%s]: Communicating.\n" % (world.rank, now()))
a = np.zeros(21)
alen = min(len(self.errorlist), 21)
a[:alen] = np.array(self.errorlist)[:alen]
if world.rank == 0:
errors = np.zeros(world.size * len(a))
tasks = np.arange(world.size * len(a)) / 21
if world.size > 1:
world.gather(a, 0, errors)
else:
errors = a
tasks = tasks[errors > 1e-20]
errors = errors[errors > 1e-20]
tasks = tasks[errors.argsort()]
errors = errors[errors.argsort()]
f = open(dat, 'a')
f.write('\nParameter search at ' + time.asctime() + '\n')
f.write('--------------------------------------------\n')
f.write('Rank Task Error function\n')
for i, (n, e) in enumerate(zip(tasks, errors)):
f.write('%-4i %-4i %.5e\n' % (i, n, e))
f.close()
else:
world.gather(a, 0)
sys.stderr.write("Task %i [%s]: Done.\n" % (world.rank, now()))
def done(self, maxstep=None, hard=False):
if maxstep != None:
if isinstance(maxstep, int):
self.maxstep = maxstep
elif isinstance(maxstep, str):
multiplier = {'h': 3600, 'm': 60}
elapse = float(maxstep[:-1]) * multiplier[maxstep[-1]]
self.endtime = time.time() + elapse
else:
raise ValueError('Cannot understand the given maxstep.')
else:
if self.maxstep == None:
return time.time() >= self.endtime
else:
if hard:
return self.nsteps > self.maxstep * 1.2
else:
return self.nsteps >= self.maxstep
def write_local(self, dat, std, temp, accepts):
s = dat.rfind('.')
dat = open(dat[:s] + '-' + str(world.rank) + dat[s:], 'a')
T = time.localtime()
dat.write("Parameter search %02i-%02i-%i " % (T[2], T[1], T[0]) +
"at %02i:%02i:%02i\n" % (T[3], T[4], T[5]) +
"-" * 90 + "\n" +
"%i optimizations has been made and " % (self.nsteps,) +
"%i have been accepted.\n" % (accepts,) +
"Below the initial and the 20 best are listed.\n\n" +
"Gauss width: %.3f\nTemperature: %.3f\n\n" % (std, temp))
for num, err, count, fitpar, optpar, optval in zip(self.numberlist,
self.errorlist,
self.countlist,
self.fitparlist,
self.optparlist,
self.optvallist):
dat.write("Optimization no. %i with " % (num,) +
"error function %.5e (%i):\n" % (err, count))
self.write_result(fitpar, optpar, optval, dat)
dat.close()
def save_opt(self, error, fitpar, optpar, optval):
# Sort and save the 20 best results
length = len(self.errorlist)
if length == 0:
self.numberlist.append(self.nsteps)
self.errorlist.append(error)
self.countlist.append(1)
self.fitparlist.append(fitpar)
self.optparlist.append(optpar)
self.optvallist.append(optval)
else:
new_min = -1
new_rank = -1
for i, err in enumerate(self.errorlist):
op1 = np.array(optpar)
op2 = np.array(self.optparlist[i])
#ov1 = np.array(optval)
#ov2 = np.array(self.optvallist[i])
#ov_dev_max = np.max(2 * np.abs(ov1 - ov2) / np.abs(ov1 + ov2))
op_dev_max = np.max(2 * np.abs(op1 - op2) / np.abs(op1 + op2))
if op_dev_max < self.ptol and new_min < 0 and i > 0: #ov_dev_max < 2e-2 and
new_min = i
if error < err and new_rank < 0 and i > 0:
new_rank = i
#print new_min, new_rank, length
if new_min < 0:
# New minimum found
if new_rank > 0:
# Better error function
self.numberlist.insert(new_rank, self.nsteps)
self.errorlist.insert(new_rank, error)
self.countlist.insert(new_rank, 1)
self.fitparlist.insert(new_rank, fitpar)
self.optparlist.insert(new_rank, optpar)
self.optvallist.insert(new_rank, optval)
if length >= 21:
del self.numberlist[-1]
del self.errorlist[-1]
del self.countlist[-1]
del self.fitparlist[-1]
del self.optparlist[-1]
del self.optvallist[-1]
else:
# Worst error function
if length < 21:
self.numberlist.append(self.nsteps)
self.errorlist.append(error)
self.countlist.append(1)
self.fitparlist.append(fitpar)
self.optparlist.append(optpar)
self.optvallist.append(optval)
else:
# Old minimum found
if new_rank > 0 and new_rank <= new_min:
# Better error function
del self.numberlist[new_min]
del self.errorlist[new_min]
count = self.countlist.pop(new_min)
del self.fitparlist[new_min]
del self.optparlist[new_min]
del self.optvallist[new_min]
self.numberlist.insert(new_rank, self.nsteps)
self.errorlist.insert(new_rank, error)
self.countlist.insert(new_rank, count + 1)
self.fitparlist.insert(new_rank, fitpar)
self.optparlist.insert(new_rank, optpar)
self.optvallist.insert(new_rank, optval)
else:
self.countlist[new_min] += 1
if __name__ == '__main__':
from asap3.Tools.ParameterOptimization.EMT import EMT2011Fit
# Parameter names: ['eta2', 'lambda', 'kappa', 'E0', 'V0', 'S0', 'n0']
initparam = {('Pt','Pt'): [3.4242, 4.1423, 5.9432, -5.85, 4.067, 1.5346, 0.05412]}
varparam = {('Pt','Pt'): [True, True, True, True, True, True, False]}
quantities = [('lattice_constant_a', 'fcc', 'Pt', 3.92, 0.001),
#('lattice_constant_a', 'hcp', 'Pt', 2.77, 0.05),
#('lattice_ratio_ca', 'hcp', 'Pt', 4.78 / 2.77, 0.05),
('bulk_modulus', 'fcc', 'Pt', 278.3, 0.01),
#('elastic_constant_C11', 'fcc', 'Pt', 346.7, 0.1),
#('elastic_constant_C12', 'fcc', 'Pt', 250.7, 0.1),
#('elastic_constant_C44', 'fcc', 'Pt', 76.5, 0.02),
('cohesive_energy', 'fcc', 'Pt', 5.84, 0.01),
#('phase_energy', 'fcc-hcp', 'Pt', -0.05, 0.02),
#('surface_energy', 'fcc111', 'Pt', 0.631, 0.02),
#('surface_ratio', 'fcc111-fcc100', 'Pt', 0.631 / 0.892, 0.01),
]
latticeconstants = [['fcc', 'Pt', 3.9],
['hcp', 'Pt', (2.8, 4.8)]]
opt = ParameterSearch(['Pt'], EMT2011Fit, initparam, varparam, quantities,
latticeconstants, ('kappa',), True)
opt.run(0.01, 0.0, '0.1m', dat='SearchParallel.dat', log='SearchParallel.log',
err='SearchParallel.err')
|
auag92/n2dm
|
Asap-3.8.4/Python/asap3/Tools/ParameterOptimization/SearchParallel.py
|
Python
|
mit
| 11,614
|
[
"ASE"
] |
7b953885eb85cef02fdd1c220091bc57a23c0af2b3f5f50b4d915bb79a78978c
|
from typing import Any
import importlib
from ..core.legacy_plugin_wrapper import LegacyPlugin
class PluginConfig:
"""Plugin Configuration Metadata
This class holds the information needed to lazy-import plugins.
Parameters
----------
name : str
The name of the plugin.
class_name : str
The name of the plugin class inside the plugin module.
module_name : str
The name of the module/package from which to import the plugin.
is_legacy : bool
If True, this plugin is a v2 plugin and will be wrapped in a
LegacyPlugin. Default: False.
package_name : str
If the given module name points to a relative module, then the package
name determines the package it is relative to.
install_name : str
The name of the optional dependency that can be used to install this
plugin if it is missing.
legacy_args : Dict
A dictionary of kwargs to pass to the v2 plugin (Format) upon construction.
Examples
--------
>>> PluginConfig(
name="TIFF",
class_name="TiffFormat",
module_name="imageio.plugins.tifffile",
is_legacy=True,
install_name="tifffile",
legacy_args={
"description": "TIFF format",
"extensions": ".tif .tiff .stk .lsm",
"modes": "iIvV",
},
)
>>> PluginConfig(
name="pillow",
class_name="PillowPlugin",
module_name="imageio.plugins.pillow"
)
"""
def __init__(
self,
name: str,
class_name: str,
module_name: str,
*,
is_legacy: bool = False,
package_name: str = None,
install_name: str = None,
legacy_args: dict = None,
) -> None:
legacy_args = legacy_args or dict()
self.name = name
self.class_name = class_name
self.module_name = module_name
self.package_name = package_name
self.is_legacy = is_legacy
self.install_name = install_name or self.name
self.legacy_args = {"name": name, "description": "A legacy plugin"}
self.legacy_args.update(legacy_args)
@property
def format(self) -> Any:
"""For backwards compatibility with FormatManager
Delete when migrating to v3
"""
if not self.is_legacy:
raise RuntimeError("Can only get format for legacy plugins.")
module = importlib.import_module(self.module_name, self.package_name)
clazz = getattr(module, self.class_name)
return clazz(**self.legacy_args)
@property
def plugin_class(self) -> Any:
"""Get the plugin class (import if needed)
Returns
-------
plugin_class : Any
The class that can be used to instantiate plugins.
"""
module = importlib.import_module(self.module_name, self.package_name)
clazz = getattr(module, self.class_name)
if self.is_legacy:
legacy_plugin = clazz(**self.legacy_args)
def partial_legacy_plugin(request):
return LegacyPlugin(request, legacy_plugin)
clazz = partial_legacy_plugin
return clazz
known_plugins = dict()
known_plugins["pillow"] = PluginConfig(
name="pillow", class_name="PillowPlugin", module_name="imageio.plugins.pillow"
)
# Legacy plugins
# ==============
#
# Which are partly registered by format, partly by plugin, and partly by a mix
# of both. We keep the naming here for backwards compatibility.
# In v3 this should become a single entry per plugin named after the plugin
# We can choose extension-specific priority in ``config.extensions``.
#
# Note: Since python 3.7 order of insertion determines the order of dict().keys()
# This means that the order here determines the order by which plugins are
# checked during the full fallback search. We don't advertise this downstream,
# but it could be a useful thing to keep in mind to choose a sensible default
# search order.
known_plugins["TIFF"] = PluginConfig(
name="TIFF",
class_name="TiffFormat",
module_name="imageio.plugins.tifffile",
is_legacy=True,
install_name="tifffile",
legacy_args={
"description": "TIFF format",
"extensions": ".tif .tiff .stk .lsm",
"modes": "iIvV",
},
)
# PILLOW plugin formats (legacy)
PILLOW_FORMATS = [
("BMP", "Windows Bitmap", ".bmp", "PillowFormat"),
("BUFR", "BUFR", ".bufr", "PillowFormat"),
("CUR", "Windows Cursor", ".cur", "PillowFormat"),
("DCX", "Intel DCX", ".dcx", "PillowFormat"),
("DDS", "DirectDraw Surface", ".dds", "PillowFormat"),
("DIB", "Windows Bitmap", "", "PillowFormat"),
("EPS", "Encapsulated Postscript", ".ps .eps", "PillowFormat"),
("FITS", "FITS", ".fit .fits", "PillowFormat"),
("FLI", "Autodesk FLI/FLC Animation", ".fli .flc", "PillowFormat"),
("FPX", "FlashPix", ".fpx", "PillowFormat"),
("FTEX", "Texture File Format (IW2:EOC)", ".ftc .ftu", "PillowFormat"),
("GBR", "GIMP brush file", ".gbr", "PillowFormat"),
("GIF", "Compuserve GIF", ".gif", "GIFFormat"),
("GRIB", "GRIB", ".grib", "PillowFormat"),
("HDF5", "HDF5", ".h5 .hdf", "PillowFormat"),
("ICNS", "Mac OS icns resource", ".icns", "PillowFormat"),
("ICO", "Windows Icon", ".ico", "PillowFormat"),
("IM", "IFUNC Image Memory", ".im", "PillowFormat"),
("IMT", "IM Tools", "", "PillowFormat"),
("IPTC", "IPTC/NAA", ".iim", "PillowFormat"),
("JPEG", "JPEG (ISO 10918)", ".jfif .jpe .jpg .jpeg", "JPEGFormat"),
(
"JPEG2000",
"JPEG 2000 (ISO 15444)",
".jp2 .j2k .jpc .jpf .jpx .j2c",
"JPEG2000Format",
),
("MCIDAS", "McIdas area file", "", "PillowFormat"),
("MIC", "Microsoft Image Composer", ".mic", "PillowFormat"),
# skipped in legacy pillow
# ("MPEG", "MPEG", ".mpg .mpeg", "PillowFormat"),
("MPO", "MPO (CIPA DC-007)", ".mpo", "PillowFormat"),
("MSP", "Windows Paint", ".msp", "PillowFormat"),
("PCD", "Kodak PhotoCD", ".pcd", "PillowFormat"),
("PCX", "Paintbrush", ".pcx", "PillowFormat"),
("PIXAR", "PIXAR raster image", ".pxr", "PillowFormat"),
("PNG", "Portable network graphics", ".png", "PNGFormat"),
("PPM", "Pbmplus image", ".pbm .pgm .ppm", "PillowFormat"),
("PSD", "Adobe Photoshop", ".psd", "PillowFormat"),
("SGI", "SGI Image File Format", ".bw .rgb .rgba .sgi", "PillowFormat"),
("SPIDER", "Spider 2D image", "", "PillowFormat"),
("SUN", "Sun Raster File", ".ras", "PillowFormat"),
("TGA", "Targa", ".tga", "PillowFormat"),
("TIFF", "Adobe TIFF", ".tif .tiff", "TIFFFormat"),
("WMF", "Windows Metafile", ".wmf .emf", "PillowFormat"),
("XBM", "X11 Bitmap", ".xbm", "PillowFormat"),
("XPM", "X11 Pixel Map", ".xpm", "PillowFormat"),
("XVTHUMB", "XV thumbnail image", "", "PillowFormat"),
]
for id, summary, ext, class_name in PILLOW_FORMATS:
config = PluginConfig(
name=id.upper() + "-PIL",
class_name=class_name,
module_name="imageio.plugins.pillow_legacy",
is_legacy=True,
install_name="pillow",
legacy_args={
"description": summary + " via Pillow",
"extensions": ext,
"modes": "iI" if class_name == "GIFFormat" else "i",
"plugin_id": id,
},
)
known_plugins[config.name] = config
known_plugins["FFMPEG"] = PluginConfig(
name="FFMPEG",
class_name="FfmpegFormat",
module_name="imageio.plugins.ffmpeg",
is_legacy=True,
install_name="ffmpeg",
legacy_args={
"description": "Many video formats and cameras (via ffmpeg)",
"extensions": ".mov .avi .mpg .mpeg .mp4 .mkv .webm .wmv",
"modes": "I",
},
)
known_plugins["BSDF"] = PluginConfig(
name="BSDF",
class_name="BsdfFormat",
module_name="imageio.plugins.bsdf",
is_legacy=True,
install_name="bsdf",
legacy_args={
"description": "Format based on the Binary Structured Data Format",
"extensions": ".bsdf",
"modes": "iIvV",
},
)
known_plugins["DICOM"] = PluginConfig(
name="DICOM",
class_name="DicomFormat",
module_name="imageio.plugins.dicom",
is_legacy=True,
install_name="dicom",
legacy_args={
"description": "Digital Imaging and Communications in Medicine",
"extensions": ".dcm .ct .mri",
"modes": "iIvV",
},
)
known_plugins["FEI"] = PluginConfig(
name="FEI",
class_name="FEISEMFormat",
module_name="imageio.plugins.feisem",
is_legacy=True,
install_name="feisem",
legacy_args={
"description": "FEI-SEM TIFF format",
"extensions": [".tif", ".tiff"],
"modes": "iv",
},
)
known_plugins["FITS"] = PluginConfig(
name="FITS",
class_name="FitsFormat",
module_name="imageio.plugins.fits",
is_legacy=True,
install_name="fits",
legacy_args={
"description": "Flexible Image Transport System (FITS) format",
"extensions": ".fits .fit .fts .fz",
"modes": "iIvV",
},
)
known_plugins["GDAL"] = PluginConfig(
name="GDAL",
class_name="GdalFormat",
module_name="imageio.plugins.gdal",
is_legacy=True,
install_name="gdal",
legacy_args={
"description": "Geospatial Data Abstraction Library",
"extensions": ".tiff .tif .img .ecw .jpg .jpeg",
"modes": "iIvV",
},
)
known_plugins["ITK"] = PluginConfig(
name="ITK",
class_name="ItkFormat",
module_name="imageio.plugins.simpleitk",
is_legacy=True,
install_name="simpleitk",
legacy_args={
"description": "Insight Segmentation and Registration Toolkit (ITK) format",
"extensions": " ".join(
(
".gipl",
".ipl",
".mha",
".mhd",
".nhdr",
".nia",
".hdr",
".nrrd",
".nii",
".nii.gz",
".img",
".img.gz",
".vtk",
".hdf5",
".lsm",
".mnc",
".mnc2",
".mgh",
".mnc",
".pic",
".bmp",
".jpeg",
".jpg",
".png",
".tiff",
".tif",
".dicom",
".dcm",
".gdcm",
)
),
"modes": "iIvV",
},
)
known_plugins["NPZ"] = PluginConfig(
name="NPZ",
class_name="NpzFormat",
module_name="imageio.plugins.npz",
is_legacy=True,
install_name="numpy",
legacy_args={
"description": "Numpy's compressed array format",
"extensions": ".npz",
"modes": "iIvV",
},
)
known_plugins["SPE"] = PluginConfig(
name="SPE",
class_name="SpeFormat",
module_name="imageio.plugins.spe",
is_legacy=True,
install_name="spe",
legacy_args={
"description": "SPE file format",
"extensions": ".spe",
"modes": "iIvV",
},
)
known_plugins["SWF"] = PluginConfig(
name="SWF",
class_name="SWFFormat",
module_name="imageio.plugins.swf",
is_legacy=True,
install_name="swf",
legacy_args={
"description": "Shockwave flash",
"extensions": ".swf",
"modes": "I",
},
)
known_plugins["SCREENGRAB"] = PluginConfig(
name="SCREENGRAB",
class_name="ScreenGrabFormat",
module_name="imageio.plugins.grab",
is_legacy=True,
install_name="pillow",
legacy_args={
"description": "Grab screenshots (Windows and OS X only)",
"extensions": [],
"modes": "i",
},
)
known_plugins["CLIPBOARDGRAB"] = PluginConfig(
name="CLIPBOARDGRAB",
class_name="ClipboardGrabFormat",
module_name="imageio.plugins.grab",
is_legacy=True,
install_name="pillow",
legacy_args={
"description": "Grab from clipboard (Windows only)",
"extensions": [],
"modes": "i",
},
)
# LYTRO plugin (legacy)
lytro_formats = [
("lytro-lfr", "Lytro Illum lfr image file", ".lfr", "i", "LytroLfrFormat"),
(
"lytro-illum-raw",
"Lytro Illum raw image file",
".raw",
"i",
"LytroIllumRawFormat",
),
("lytro-lfp", "Lytro F01 lfp image file", ".lfp", "i", "LytroLfpFormat"),
("lytro-f01-raw", "Lytro F01 raw image file", ".raw", "i", "LytroF01RawFormat"),
]
for name, des, ext, mode, class_name in lytro_formats:
config = PluginConfig(
name=name.upper(),
class_name=class_name,
module_name="imageio.plugins.lytro",
is_legacy=True,
install_name="lytro",
legacy_args={
"description": des,
"extensions": ext,
"modes": mode,
},
)
known_plugins[config.name] = config
# FreeImage plugin (legacy)
FREEIMAGE_FORMATS = [
(
"BMP",
0,
"Windows or OS/2 Bitmap",
".bmp",
"i",
"FreeimageBmpFormat",
"imageio.plugins.freeimage",
),
(
"CUT",
21,
"Dr. Halo",
".cut",
"i",
"FreeimageFormat",
"imageio.plugins.freeimage",
),
(
"DDS",
24,
"DirectX Surface",
".dds",
"i",
"FreeimageFormat",
"imageio.plugins.freeimage",
),
(
"EXR",
29,
"ILM OpenEXR",
".exr",
"i",
"FreeimageFormat",
"imageio.plugins.freeimage",
),
(
"G3",
27,
"Raw fax format CCITT G.3",
".g3",
"i",
"FreeimageFormat",
"imageio.plugins.freeimage",
),
(
"GIF",
25,
"Static and animated gif (FreeImage)",
".gif",
"iI",
"GifFormat",
"imageio.plugins.freeimagemulti",
),
(
"HDR",
26,
"High Dynamic Range Image",
".hdr",
"i",
"FreeimageFormat",
"imageio.plugins.freeimage",
),
(
"ICO",
1,
"Windows Icon",
".ico",
"iI",
"IcoFormat",
"imageio.plugins.freeimagemulti",
),
(
"IFF",
5,
"IFF Interleaved Bitmap",
".iff .lbm",
"i",
"FreeimageFormat",
"imageio.plugins.freeimage",
),
(
"J2K",
30,
"JPEG-2000 codestream",
".j2k .j2c",
"i",
"FreeimageFormat",
"imageio.plugins.freeimage",
),
(
"JNG",
3,
"JPEG Network Graphics",
".jng",
"i",
"FreeimageFormat",
"imageio.plugins.freeimage",
),
(
"JP2",
31,
"JPEG-2000 File Format",
".jp2",
"i",
"FreeimageFormat",
"imageio.plugins.freeimage",
),
(
"JPEG",
2,
"JPEG - JFIF Compliant",
".jpg .jif .jpeg .jpe",
"i",
"FreeimageJpegFormat",
"imageio.plugins.freeimage",
),
(
"JPEG-XR",
36,
"JPEG XR image format",
".jxr .wdp .hdp",
"i",
"FreeimageFormat",
"imageio.plugins.freeimage",
),
(
"KOALA",
4,
"C64 Koala Graphics",
".koa",
"i",
"FreeimageFormat",
"imageio.plugins.freeimage",
),
# not registered in legacy pillow
# ("MNG", 6, "Multiple-image Network Graphics", ".mng", "i", "FreeimageFormat", "imageio.plugins.freeimage"),
(
"PBM",
7,
"Portable Bitmap (ASCII)",
".pbm",
"i",
"FreeimageFormat",
"imageio.plugins.freeimage",
),
(
"PBMRAW",
8,
"Portable Bitmap (RAW)",
".pbm",
"i",
"FreeimageFormat",
"imageio.plugins.freeimage",
),
(
"PCD",
9,
"Kodak PhotoCD",
".pcd",
"i",
"FreeimageFormat",
"imageio.plugins.freeimage",
),
(
"PCX",
10,
"Zsoft Paintbrush",
".pcx",
"i",
"FreeimageFormat",
"imageio.plugins.freeimage",
),
(
"PFM",
32,
"Portable floatmap",
".pfm",
"i",
"FreeimageFormat",
"imageio.plugins.freeimage",
),
(
"PGM",
11,
"Portable Greymap (ASCII)",
".pgm",
"i",
"FreeimageFormat",
"imageio.plugins.freeimage",
),
(
"PGMRAW",
12,
"Portable Greymap (RAW)",
".pgm",
"i",
"FreeimageFormat",
"imageio.plugins.freeimage",
),
(
"PICT",
33,
"Macintosh PICT",
".pct .pict .pic",
"i",
"FreeimageFormat",
"imageio.plugins.freeimage",
),
(
"PNG",
13,
"Portable Network Graphics",
".png",
"i",
"FreeimagePngFormat",
"imageio.plugins.freeimage",
),
(
"PPM",
14,
"Portable Pixelmap (ASCII)",
".ppm",
"i",
"FreeimagePnmFormat",
"imageio.plugins.freeimage",
),
(
"PPMRAW",
15,
"Portable Pixelmap (RAW)",
".ppm",
"i",
"FreeimagePnmFormat",
"imageio.plugins.freeimage",
),
(
"PSD",
20,
"Adobe Photoshop",
".psd",
"i",
"FreeimageFormat",
"imageio.plugins.freeimage",
),
(
"RAS",
16,
"Sun Raster Image",
".ras",
"i",
"FreeimageFormat",
"imageio.plugins.freeimage",
),
(
"RAW",
34,
"RAW camera image",
".3fr .arw .bay .bmq .cap .cine .cr2 .crw .cs1 .dc2 "
".dcr .drf .dsc .dng .erf .fff .ia .iiq .k25 .kc2 .kdc .mdc .mef .mos .mrw .nef .nrw .orf "
".pef .ptx .pxn .qtk .raf .raw .rdc .rw2 .rwl .rwz .sr2 .srf .srw .sti",
"i",
"FreeimageFormat",
"imageio.plugins.freeimage",
),
(
"SGI",
28,
"SGI Image Format",
".sgi .rgb .rgba .bw",
"i",
"FreeimageFormat",
"imageio.plugins.freeimage",
),
(
"TARGA",
17,
"Truevision Targa",
".tga .targa",
"i",
"FreeimageFormat",
"imageio.plugins.freeimage",
),
(
"TIFF",
18,
"Tagged Image File Format",
".tif .tiff",
"i",
"FreeimageFormat",
"imageio.plugins.freeimage",
),
(
"WBMP",
19,
"Wireless Bitmap",
".wap .wbmp .wbm",
"i",
"FreeimageFormat",
"imageio.plugins.freeimage",
),
(
"WebP",
35,
"Google WebP image format",
".webp",
"i",
"FreeimageFormat",
"imageio.plugins.freeimage",
),
(
"XBM",
22,
"X11 Bitmap Format",
".xbm",
"i",
"FreeimageFormat",
"imageio.plugins.freeimage",
),
(
"XPM",
23,
"X11 Pixmap Format",
".xpm",
"i",
"FreeimageFormat",
"imageio.plugins.freeimage",
),
]
for name, i, des, ext, mode, class_name, module_name in FREEIMAGE_FORMATS:
config = PluginConfig(
name=name.upper() + "-FI",
class_name=class_name,
module_name=module_name,
is_legacy=True,
install_name="freeimage",
legacy_args={
"description": des,
"extensions": ext,
"modes": mode,
"fif": i,
},
)
known_plugins[config.name] = config
# exists for backwards compatibility with FormatManager
# delete in V3
_original_order = [x for x, config in known_plugins.items() if config.is_legacy]
|
imageio/imageio
|
imageio/config/plugins.py
|
Python
|
bsd-2-clause
| 20,029
|
[
"VTK"
] |
c213322054999619168d430fabbca7df18a4795bed031698e2cbd0c2a822d439
|
from PyQt4 import QtCore, QtGui, uic
import vtk
import os
import sys
from MovePlanet import updateSunsystem
class PlanetSimulation(QtGui.QDialog):
__timer = QtCore.QTimer()
__renderer = vtk.vtkRenderer()
__list_of_planets = []
__list_of_actors = []
__scale_sun_radius = 0.00000006
__scale_planet_radius = 0.000001
__scale_planet_orbit_inner = 0.000000001
__correction_factor = 1e7
__delta_t = 1000
# constructor
def __init__(self):
self.__time = 0.0
self.__timer.setInterval(1000 / 25)
self.connect(self.__timer, QtCore.SIGNAL('timeout()'),
self.__doAnimationStep)
QtGui.QDialog.__init__(self)
# Set up the user interface from Designer.
# CAUTION: custom. Path
self.ui = uic.loadUi(os.path.join(os.path.dirname(__file__),"planetSim.ui"))
self.ui.show()
# set up VTK pipeline
self.ui.vtkWidget.Initialize()
self.ui.vtkWidget.Start()
# camera
cam = self.__renderer.GetActiveCamera()
cam.SetPosition(1, 0, -400)
cam.SetFocalPoint(0, 0, 0)
cam.SetClippingRange(.1, 40)
cam.SetViewUp(0, 1, 0)
cam.SetViewAngle(50)
self.__renderer.SetActiveCamera(cam)
# add to vtkWidget
self.ui.vtkWidget.GetRenderWindow().AddRenderer(self.__renderer)
# set interaction style (optional)
style = vtk.vtkInteractorStyleTrackballCamera()
self.ui.vtkWidget.SetInteractorStyle(style)
# Connect up the buttons
self.connect(self.ui.startButton, QtCore.SIGNAL('clicked()'),
self.__startAnimation)
self.connect(self.ui.stopButton, QtCore.SIGNAL('clicked()'),
self.__stopAnimation)
#
# Connect Buttons
#
self.connect(self.ui.btn_sunsystem, QtCore.SIGNAL('clicked()'),
self.__load_sunsystem)
self.connect(self.ui.btn_custom_system, QtCore.SIGNAL('clicked()'),
self.__load_custom_sunsystem)
#
# Connect Dials and Sliders with there assigned TextFields
#
self.connect(self.ui.slider_amount_bodies, QtCore.SIGNAL('valueChanged(int)'),
self.__update_amount_of_bodies)
self.connect(self.ui.slider_sun_mass_factor, QtCore.SIGNAL('valueChanged(int)'),
self.__update_sun_mass_factor)
self.connect(self.ui.dail_planet_mass_min, QtCore.SIGNAL('valueChanged(int)'),
self.__update_planet_mass_min)
self.connect(self.ui.dail_planet_mass_max, QtCore.SIGNAL('valueChanged(int)'),
self.__update_planet_mass_max)
self.connect(self.ui.dail_planet_radius_min, QtCore.SIGNAL('valueChanged(int)'),
self.__update_planet_radius_min)
self.connect(self.ui.dail_planet_radius_max, QtCore.SIGNAL('valueChanged(int)'),
self.__update_planet_radius_max)
self.connect(self.ui.dail_planet_orbit_min, QtCore.SIGNAL('valueChanged(int)'),
self.__update_planet_orbit_min)
self.connect(self.ui.dail_planet_orbit_max, QtCore.SIGNAL('valueChanged(int)'),
self.__update_planet_orbit_max)
# Slider
self.connect(self.ui.slider_animation_speed, QtCore.SIGNAL('valueChanged(int)'),
self.__update_speed)
self.connect(self.ui.slider_delta_t, QtCore.SIGNAL('valueChanged(int)'),
self.___update_delta_t)
self.connect(self.ui.slider_correction_factor, QtCore.SIGNAL('valueChanged(int)'),
self.__update_correction_factor)
# Generate Renderer from PlanetList
def __init_renderer(self, list_of_planets):
for planet in list_of_planets:
actor = vtk.vtkActor()
#sphere = vtk.vtkSphereSource()
sphere = vtk.vtkTexturedSphereSource()
mapper = vtk.vtkPolyDataMapper()
sphere.SetPhiResolution(20)
sphere.SetThetaResolution(20)
scaled_radius = planet.get_radius() * self.__scale_planet_radius
if(planet.id == 0):
scaled_radius = planet.get_radius() * self.__scale_sun_radius
sphere.SetRadius(scaled_radius)
mapper.SetInput(sphere.GetOutput())
graphic_name = "../textures/"+planet.get_name()+".jpg"
graphic_reader = vtk.vtkJPEGReader()
graphic_reader.SetFileName(graphic_name)
graphic_texture = vtk.vtkTexture()
graphic_texture.SetInputConnection(graphic_reader.GetOutputPort())
graphic_texture.InterpolateOn()
actor.SetTexture(graphic_texture)
actor.SetMapper(mapper)
actor.SetScale(1,1,1)
actor.SetPosition(int(self.__scale_planet_orbit_inner*planet.get_posVector_x()),
int(self.__scale_planet_orbit_inner*planet.get_posVector_y()),
int(self.__scale_planet_orbit_inner*planet.get_posVector_z()))
self.__renderer.AddActor(actor)
self.__list_of_actors.append(actor)
def __update_renderer(self, list_of_planets):
i = 0
#new_renderer = vtk.vtkRenderer()
while i < len(list_of_planets):
planet = list_of_planets[i]
self.__list_of_actors[i].SetPosition(self.__scale_planet_orbit_inner*planet.get_posVector_x(),
self.__scale_planet_orbit_inner*planet.get_posVector_y(),
self.__scale_planet_orbit_inner*planet.get_posVector_z())
#new_renderer.AddActor(self.__list_of_actors[i])
i = i + 1
self.__list_of_planets = list_of_planets
#return new_renderer
# Update UI functions
def __update_amount_of_bodies(self):
self.ui.tf_amout_bodies.setText(str(self.ui.slider_amount_bodies.value()))
def __update_sun_mass_factor(self):
self.ui.tf_sun_mass_factor.setText(str(self.ui.slider_sun_mass_factor.value()))
def __update_planet_mass_min(self):
self.ui.tf_planet_mass_min.setText(str(self.ui.dail_planet_mass_min.value()))
def __update_planet_mass_max(self):
self.ui.tf_planet_mass_max.setText(str(self.ui.dail_planet_mass_max.value()))
def __update_planet_radius_min(self):
self.ui.tf_planet_radius_min.setText(str(self.ui.dail_planet_radius_min.value()))
def __update_planet_radius_max(self):
self.ui.tf_planet_radius_max.setText(str(self.ui.dail_planet_radius_max.value()))
def __update_planet_orbit_min(self):
self.ui.tf_planet_orbit_min.setText(str(self.ui.dail_planet_orbit_min.value()))
def __update_planet_orbit_max(self):
self.ui.tf_planet_orbit_max.setText(str(self.ui.dail_planet_orbit_max.value()))
def ___update_delta_t(self):
self.__delta_t = self.ui.slider_delta_t.value() * 1e3
def __update_speed(self):
self.__timer.setInterval(1000 / self.ui.slider_animation_speed.value())
def __update_correction_factor(self):
self.__correction_factor = self.ui.slider_correction_factor.value() * 1e7
# animation step
def __doAnimationStep(self):
self.__time = self.__time + .01
#print "time: ", self.__time
self.__renderer = self.__update_renderer(updateSunsystem(self.__list_of_planets, self.__delta_t, self.__correction_factor))
self.ui.vtkWidget.update()
# start animation
def __startAnimation(self):
self.__timer.start()
# stop animation
def __stopAnimation(self):
self.__timer.stop()
# load our sunsystem
def __load_sunsystem(self):
from EarthSunsystemCalc import generate_our_sunsystem
self.__list_of_planets = generate_our_sunsystem()
self.__init_renderer(self.__list_of_planets)
def __load_custom_sunsystem(self):
from RandomSunsystemCalc import generate_random_system
self.__list_of_planets = generate_random_system(int(self.ui.tf_amout_bodies.toPlainText()) ,
int(self.ui.tf_sun_mass_factor.toPlainText()),
int(self.ui.tf_planet_mass_min.toPlainText()),
int(self.ui.tf_planet_mass_max.toPlainText()),
int(self.ui.tf_planet_radius_min.toPlainText()),
int(self.ui.tf_planet_radius_max.toPlainText()),
int(self.ui.tf_planet_orbit_min.toPlainText()),
int(self.ui.tf_planet_orbit_max.toPlainText()))
self.__init_renderer(self.__list_of_planets)
if __name__ == '__main__':
app = QtGui.QApplication(sys.argv)
uiDemo = PlanetSimulation()
sys.exit(app.exec_())
|
tscholze/ts-py-planet-sim
|
stable/planetSim.py
|
Python
|
mit
| 9,343
|
[
"VTK"
] |
1163dde28c3fc70921318b48e53260f2fda544bf9b0249c92da4fa2cd78d5d01
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Subliminal post-processing script for NZBGet and SABnzbd
#
# Copyright (C) 2015-2019 Chris Caron <[email protected]>
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with subliminal. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
### NZBGET POST-PROCESSING/SCHEDULER SCRIPT ###
# Download Subtitles.
#
# The script searches subtitles on various web-sites and saves them into
# destination directory near video files.
#
# This post-processing script is a wrapper for "Subliminal",
# a python library to search and download subtitles, written
# by Antoine Bertin (Diaoul Ael).
#
# Info about this Subliminal NZB Script:
# Author: Chris Caron ([email protected]).
# Date: Sun, Feb 21th, 2019.
# License: GPLv3 (http://www.gnu.org/licenses/gpl.html).
# Script Version: 1.0.1
#
# NOTE: This script requires Python to be installed on your system.
#
##############################################################################
### OPTIONS ###
# List of language codes.
#
# Language code according to ISO 639-1.
# Few examples: English - en, German - de, Dutch - nl, French - fr.
# For the full list of language codes see
# http://www.loc.gov/standards/iso639-2/php/English_list.php.
# Language Setting
#
# Subtitles for multiple languages can be downloaded. Separate multiple
# languages with some type of delimiter (space, comma, etc)
# codes with commas. Example: en, fr
#Languages=en
# Subliminal Single Mode Setting (yes, no).
#
# Download content without the language code in the subtitles filename.
# NOTE: If multiple languages are specified while this flag is set, then then
# the the search is ceased in the event that subtitles were found using at
# least one of the specified languages.
#Single=yes
# Subtitle Fetch Mode (ImpairedOnly, StandardOnly, BestScore, ImpairedFirst, StandardFirst).
#
# Define the types of subtitles you would like to scan for, the options
# break down as follows:
# ImpairedOnly - Only download hearing-impaired subtitles.
# StandardOnly - Only download non hearing-impaired subtitles.
# BestScore - Download the best matching subtitles reguardless of if they
# are flagged for the hearing-impaired or not.
# ImpairedFirst - Attempt to download the hearing-impaired subtitles
# first. In the event that they there are not available,
# then attempt to acquire the non hearing-impaired versions
# instead.
# StandardFirst - Attempt to download the standard (non hearing-impaired)
# subtitles first. In the event that they are not available,
# then attempt to acquire the the hearing-impaired versions
# instead.
#FetchMode=BestScore
# Search Mode (basic, advanced).
#
# basic - presumed subtitles are guessed based on the (deobsfucated)
# filename alone.
# advanced - presumed subtiltes are guessed based on the (deobsfucated)
# filename (same as basic). But further processing occurs to
# help obtain more accurate results. Meta data extracted from
# the actual video in question such as it's length, FPS, and
# encoding (including if subs are already included or not).
# This mode yields the best results but at the cost of additional
# time and CPU.
#SearchMode=advanced
# Ignore Embedded Subtitle Matching (yes, no).
#
# Identify how you want to handle embedded subititles if they are detected
# in the video file being scanned. If you set this value to 'no', you will
# use match embedded subtitles instead and no further script processing
# will take place.
# If you set this to 'yes', The script will then attempt to detect any embedded
# subtitles already present with the video (in addition to their languages). If
# the language is already present then no further processing is done.
# NOTE: Embedded subtitles can only be detected if you are using the advanced
# search mode identified above. Therefore this switch has no bearing
# on a Basic check.
# NOTE: This feature can not detect hard-coded subtitles; these are ones that are
# permanently embedded in the video itself.
#IgnoreEmbedded=no
# Minimum File Size (in MB)
#
# Any video that is equal to this size or larger will not be filtered out from
# having it checked for subtitles. This option prevents unnecessary queries
# to subtitle providers when the video in question is just a sample or preview
# file anyway. The sample/preview videos will get filtered out by this option
# but still allow for a subtitle checks against the real thing.
# Setting this value to 0 (zero) will disable this filter feature and attempted
# to fetch subtitles on all matched video formats (not recommended).
#MinSize=150
# Minimum File Score
#
# When more then one subtitle is matched against a video, they are individually
# scored based on their likelyhood of being an exact match to the video they
# are being searched on. The highest scored match is the chosen one at the
# end of the day. A high score (almost perfect) is 50ish, but most videos
# score in the high 30's and low 40's. This score identifies the elimination
# which subtitles should not even be considered if it scores this value or
# lower. If you set this too high, you'll never match any subtitles. If
# you set this too low, you'll almost always acqurie a subtitle for the video
# in question, but it may not be the correct one.
# If 0 is specified, the default value assigned by the subliminal core
# application will be used.
#MinScore=20
# Default Core Subtitle Providers
#
# Supply a core (master) list of subtitle providers you want to reference
# against each video you scan. The specified subtitle providers should be
# separated by a comma and or a space. The default (if none is
# specified) are used: opensubtitles, tvsubtitles, podnapisi, addic7ed, thesubdb
#Providers=opensubtitles, tvsubtitles, podnapisi, addic7ed, thesubdb
# Movie (Exclusive) Subtitle Providers
#
# Optionally specify Movie Providers you wish to exclusively use when
# a movie is detected. If nothing is specified, then the Default
# Core Subtitle Providers (identified above) are used instead.
#
# Providers specified should be separated by a comma and or a space. An example
# of what one might specify here is: opensubtitles, podnapisi, thesubdb
#MovieProviders=
# TV Show (Exclusive) Subtitle Providers
#
# Optionally specify TV Show Providers you wish to exclusively use when
# a TV Show is detected. If nothing is specified, then the Default
# Core Subtitle Providers (identified above) are used instead.
#
# Providers specified should be separated by a comma and or a space.
# An example of what one might specify here is: tvsubtitles, addic7ed
#TVShowProviders=
# File extensions for video files.
#
# Only files with these extensions are processed. Extensions must
# be separated with commas.
# Example=.mkv,.avi,.divx,.xvid,.mov,.wmv,.mp4,.mpg,.mpeg,.vob,.iso
#VideoExtensions=.mkv,.avi,.divx,.xvid,.mov,.wmv,.mp4,.mpg,.mpeg,.vob,.iso
# Force Subtitle Encoding (None, UTF-8, UTF-16, ISO-8859-1, ISO-8859-2).
#
# Force the encoding of a subtitle file to be of a certain type. If set to
# None, then the subtitle will left as it was retrieved.
# - UTF-8: This is the encoding used by most Linux/Unix filesystems. just
# check the global variable $LANG to see if that's what you are.
# - UTF-16: This is the encoding usually used by OS/X systems and NTFS.
# - ISO-8859-1: Also referred to as Latin-1; Microsoft Windows used this
# encoding for years (in the past), and still do in some
# cases. It supports the English, Spanish, and French language
# character sets.
# - ISO-8859-2: Also referred to as Latin-2; It supports Czech, German,
# Hungarian, Polish, Romanian, Croatian, Slovak, and
# Slovene character sets.
#
# If you wish to add another encoding; just email me and i'll add it.
#ForceEncoding=None
# My Systems File Encoding (UTF-8, UTF-16, ISO-8859-1, ISO-8859-2).
#
# All systems have their own encoding; here is a loose guide you can use
# to determine what encoding you are (if you're not sure):
# - UTF-8: This is the encoding used by most Linux/Unix filesystems. just
# check the global variable $LANG to see if that's what you are.
# - UTF-16: This is the encoding usually used by OS/X systems and NTFS.
# - ISO-8859-1: Also referred to as Latin-1; Microsoft Windows used this
# encoding for years (in the past), and still do in some
# cases. It supports the English, Spanish, and French language
# character sets.
# - ISO-8859-2: Also referred to as Latin-2; It supports Czech, German,
# Hungarian, Polish, Romanian, Croatian, Slovak, and
# Slovene character sets.
#
# If you wish to add another encoding; just email me and i'll add it.
# All files that are downloaded will be written to your filesystem using
# the same encoding your operating system uses. Since there is no way
# to detect this (yet), by specifying it here, you can make it possible
# to handle files with the extended character sets.
#
#SystemEncoding=UTF-8
# Cross Reference File Paths.
#
# Specify directories local to NZBGet that contain subtitles previously
# downloaded. Once found, they'll be automatically moved over and will
# take priority over actually checking the internet. You can specify
# more then one local directory using the space (and or comma) to
# delimit each entry.
#XRefPaths=
# Cache Directory
#
# This directory is used for storing temporary cache files created when
# fetching subtitles.
#CacheDir=${TempDir}/subliminal
# List of TV categories.
#
# Comma separated list of categories for TV. VideoSort automatically
# distinguishes movies from series and dated TV shows. But it needs help
# to distinguish movies from other TV shows because they are named
# using same conventions. If a download has associated category listed in
# option <TvCategories>, Subliminal uses this information to help figure out
# the video being scanned sometimes.
# NOTE: This option is only applied to Post Processing.
#
# Category names must match categories defined in NZBGet.
#TvCategories=tv, tv2, Series
# Overwrite Mode (yes, no).
#
# Overwrite subtitles even if they previously exist.
# NOTE: This option is only applied to Post Processing.
#Overwrite=no
# Correct Videos Timestamp (yes, no).
#
# Set this to yes if you want freshly downloaded videos to have their file
# timestamp updated to `now`.
# NOTE: This option is only applied to Post Processing.
#UpdateTimestamp=yes
# Correct Video Permissions (yes, no).
#
# Set this to yes if you want to adjust the permissions associated with
# all downloaded videos (Unix/Linux only).
# NOTE: This option is only applied to Post Processing.
#UpdatePermissions=no
# Video Permission Value
#
# Specify the video permissions to set. This is only used if UpdatePermissions
# (identified above) is set to yes.
# NOTE: This option is only applied to Post Processing.
#VideoPermissions=644
# Directories to Scan
#
# Specify any number of directories this script can (recursively) check
# delimited by a comma and or space. ie: /home/nuxref/mystuff, /path/no3, etc
# For windows users, you can specify: C:\My Downloads, \\My\Network\Path, etc.
# NOTE: This option is only applied to Scheduling.
#ScanDirectories=
# Maximum File Age
#
# The maximum amount of time that can elapse before we can assume that if
# there are still no subtitles after this duration, then there never will
# be. This option prevents thrashing and requesting subtitles for something
# over and over again for no reason. This value is identified in hours
# relative to each file checked.
#
# NOTE: This option is only applied to Scheduling.
#MaxAge=24
# Addic7ed Username
#
# If you wish to utilize the addic7ed provider, you are additionally required
# to provide a username and password. Specify the `username` here.
#Addic7edUser=
# Addic7ed Password
#
# If you wish to utilize the addic7ed provider, you are additionally required
# to provide a username and password. Specify the `password` here.
#Addic7edPass=
# Open Subtitles Username
#
# If you wish to utilize the Open Subtitles provider, you are additionally
# required to provide a username and password. Specify the `username` here.
#OpenSubtitlesUser=
# Open Subtitles Password
#
# If you wish to utilize the Open Subtitles provider, you are additionally
# required to provide a username and password. Specify the `password` here.
#OpenSubtitlesPass=
# Notify URLs
#
# Define as many Notification URLs as you want (separated by a space and/or
# comma) to have services notified after a subtitle has been retrieved. For
# Information on how to construct these URLs, visit:
# https://github.com/caronc/apprise .
#NotifyURLs=
# Throttle Threshold.
#
# The threshold defines the number of concurrent requests made to the remote
# subtitle websites before a temporary wait/pause occurs (defined by
# Throttle). The goal of the threshold is to prevent one from being
# banned for abusing the server (which can happen if you make to many
# requests). This setting is ideal for those users who are scanning and
# getting subtitles for a very large media library. Set this value to 0 (zero)
# if you want to disable this feature.
#ThrottleThreshold=5
# Throttle.
#
# Defines the number of seconds a throttle/block will occur for when/if
# a Throttle Threshold is reached.
#Throttle=3
# Enable debug logging (yes, no).
#
# If subtitles are not downloaded as expected, activate debug logging
# to get a more verbose output from subliminal. This will greatly help in
# diagnosing the problem.
#Debug=no
# Tidy Subtitles (on, off).
#
# Open the downloaded subtitle file and perform some additional optimizations
# to it. This is a work in progress, currently it does the following:
# - Correct all EOL (End of Lines) in the event they're inconsistent
#TidySub=off
# Issue a scan of any directories you defined above here:
#SubliminalScan@Scan Defined Paths
### NZBGET POST-PROCESSING/SCHEDULER SCRIPT ###
##############################################################################
import re
from os import sep as os_sep
from os.path import join
import errno
from shutil import move as _move
from os import getcwd
from os.path import split
from os.path import basename
from os.path import abspath
from os.path import dirname
from os.path import splitext
from os.path import isfile
from os.path import exists
from os.path import isdir
from os import unlink
from os import chdir
from os import makedirs
from time import sleep
import logging
from ConfigParser import ConfigParser
from ConfigParser import Error as ConfigException
from ConfigParser import NoOptionError as ConfigNoOption
# This is required if the below environment variables
# are not included in your environment already
import sys
sys.path.insert(0, join(abspath(dirname(__file__)), 'Subliminal'))
# For copying our configuration file
from shutil import copy
from shutil import copyfile
# Script dependencies identified below
from guessit import matcher
from guessit import Guess
from datetime import timedelta
from datetime import datetime
from subliminal import Video
from subliminal import Episode
from subliminal import MutexLock
from subliminal import cache_region
from subliminal import scan_video
from subliminal import download_best_subtitles
from subliminal.subtitle import detect
import babelfish
# pynzbget Script Wrappers
from nzbget import SABPostProcessScript
from nzbget import PostProcessScript
from nzbget import SchedulerScript
from nzbget import EXIT_CODE
from nzbget import SCRIPT_MODE
# Inherit Push Notification Scripts
from apprise import Apprise
from apprise import NotifyType
from apprise import NotifyFormat
from apprise import AppriseAsset
def move(src, dst):
"""
This move() function was written for people using this script to write
their content in Linux to a fuse based filesystem that does not have
support for the move() command.
"""
try:
# first try the standard approach
_move(src, dst)
except OSError, e:
if e[0] == errno.ENOSYS:
# Function not implimented error; try a copy/remove instead
# without tracking metadata (this is useful when we're
# moving files across different filesystems such as
# xfs -> fat32 or ext4 -> ntfs which can't preserve the
# linux modes and settings.
try:
copyfile(src, dst)
try:
unlink(src)
except:
raise OSError(errno.EPERM, "copyfile() failed.")
except OSError:
# most likely error 38 again (ENOSYS)
pass
else:
# the move failed...
raise
return
class FETCH_MODE(object):
IMPAIRED_ONLY = "ImpairedOnly"
STANDARD_ONLY = "StandardOnly"
BESTSCORE = "BestScore"
IMPAIRED_FIRST = "ImpairedFirst"
STANDARD_FIRST = "StandardFirst"
FETCH_MODES = (
FETCH_MODE.IMPAIRED_ONLY,
FETCH_MODE.STANDARD_ONLY,
FETCH_MODE.BESTSCORE,
FETCH_MODE.STANDARD_FIRST,
FETCH_MODE.IMPAIRED_FIRST,
)
FETCH_MODE_DEFAULT = FETCH_MODE.BESTSCORE
class SEARCH_MODE(object):
BASIC = "basic"
ADVANCED = "advanced"
# A file that provides system defaults when populated that override
# the defaults defined below in this file
# the syntax looks like this
# [main]
# IgnoreEmbedded: Yes
DEFAULTS_CONFIG_FILE = join(abspath(dirname(__file__)), 'Subliminal.ini')
# If our default configuration file isn't present, then we attempt to
# gracefully copy a default configuration file in place.
SAMPLE_CONFIG_FILE = join(abspath(dirname(__file__)), 'Subliminal.ini.sample')
# Ensure everything is defined under this [main] heading
DEFAULTS_CONFIG_FILE_SECTION = 'main'
# Some Default Environment Variables (used with CLI)
DEFAULT_EXTENSIONS = \
'.mkv,.avi,.divx,.xvid,.mov,.wmv,.mp4,.mpg,.mpeg,.vob,.iso'
DEFAULT_MAXAGE = 24
DEFAULT_LANGUAGE = 'en'
DEFAULT_PROVIDERS = [
'opensubtitles',
'tvsubtitles',
'podnapisi',
'addic7ed',
'thesubdb',
]
# System Encodings
DEFAULT_ENCODINGS = (
# Most Linux Systems
'UTF-8',
# NTFS/OS-X
'UTF-16',
# Most French/English/Spanish Windows Systems
'ISO-8859-1',
# Czech, German, Hungarian, Polish, Romanian,
# Croatian, Slovak, Slovene.
'ISO-8859-2',
)
DEFAULT_UPDATE_TIMESTAMP = False
DEFAULT_UPDATE_PERMISSIONS = False
DEFAULT_VIDEO_PERMISSIONS = 0o644
DEFAULT_SINGLE = False
DEFAULT_FORCE = 'no'
DEFAULT_TIDYSUB = 'no'
DEFAULT_SEARCH_MODE = SEARCH_MODE.ADVANCED
DEFAULT_IGNORE_EMBEDDED = 'no'
DEFAULT_FORCE_ENCODING = 'None'
DEFAULT_SYSTEM_ENCODING = 'UTF-8'
DEFAULT_THROTTLE_THRESHOLD = 5
DEFAULT_THROTTLE_WAITTIME = 3
# A list of compiled regular expressions identifying files to not parse ever
IGNORE_FILELIST_RE = (
# Samples
re.compile('^.*[-.]sample(\.[^.]*)?$', re.IGNORECASE),
re.compile('^sample-.*$', re.IGNORECASE),
)
# The number of MegaBytes the detected video must be (with respect
# to it's filesize). If it is less than this value, then it is presumed
# no subtitles exists for it.
DEFAULT_MIN_VIDEO_SIZE_MB = 150
# The minimum score to accept a potentially matched subtitle that
# was paired against a video.
DEFAULT_MIN_VIDEO_SCORE = 20
# A simple regular expression that scans the video downloaded and
# detects the season/episode information from it.
DETECT_TVSHOW_RE = re.compile(
r'^.*[^A-Za-z0-9]?S([0-9]{1,4})E([0-9]{1,4}(E[0-9]{1,4})*)[^A-Za-z0-9]',
re.IGNORECASE,
)
# stat is used to test if the .srt file was fetched okay or not
from os import stat
# used for updating timestamp of the video
from os import utime
# used for updating video permissions
from os import chmod
def _to_alpha2(lang):
"""
A wrapper to babbelfish to lookup the alpha2 code associated with
a language defined by it's ISO 639-2 Terminology (T) and
Bibliographic (B) alpha3 code.
None is returned if the code could not be resolved otherwise
the 2 leter alpha2 code is returned.
"""
_lang = None
if len(lang) > 3:
try:
# Try by name (such as English, French, Dutch, etc)
_lang = babelfish.Language.fromcode(lang, 'name')
return _lang
except babelfish.exceptions.LanguageReverseError:
pass
elif len(lang) == 3:
try:
# Terminology
_lang = babelfish.Language.fromcode(lang, 'alpha3t')
return _lang
except babelfish.exceptions.LanguageReverseError:
try:
# Bibliographic
_lang = babelfish.Language.fromcode(lang, 'alpha3b')
return _lang
except babelfish.exceptions.LanguageReverseError:
pass
elif len(lang) == 2:
try:
_lang = babelfish.Language.fromcode(lang.lower(), 'alpha2')
return _lang
except babelfish.exceptions.LanguageReverseError:
pass
return _lang
def decode(str_data, encoding=None, lang=None):
"""
Returns the unicode string of the data passed in
otherwise it throws a ValueError() exception. This function makes
use of the chardet library
If encoding == None then it is attempted to be detected by chardet
If encoding is a string, then only that encoding is used
If encoding is a list or tuple, then each item is tried before
giving up.
"""
if isinstance(str_data, unicode):
return str_data
if encoding is None:
decoded = detect(str_data, lang)
encoding = decoded['encoding']
if isinstance(encoding, str):
encoding = ( encoding, )
if not isinstance(encoding, (tuple, list)):
return str_data
# Convert to unicode
for enc in encoding:
try:
str_data = str_data.decode(
enc,
errors='ignore',
)
return str_data
except UnicodeError:
raise ValueError(
'%s contains invalid characters' % (
str_data,
))
except KeyError:
raise ValueError(
'%s encoding could not be detected ' % (
str_data,
))
except TypeError:
try:
str_data = str_data.decode(
enc,
'ignore',
)
return str_data
except UnicodeError:
raise ValueError(
'%s contains invalid characters' % (
str_data,
))
except KeyError:
raise ValueError(
'%s encoding could not be detected ' % (
str_data,
))
return None
class SubliminalScript(SABPostProcessScript, PostProcessScript,
SchedulerScript):
"""A wrapper to Subliminal written for NZBGet
"""
# Default theme to use
default_theme = 'general'
# A list of possible subtitles to use found locally
# that take priority over a check on the internet
# if matched.
xref_paths = []
def apply_nzbheaders(self, guess):
""" Applies NZB headers (if exist) """
nzb_used = False
nzb_proper_name = self.nzb_get('propername', '')
nzb_episode_name = self.nzb_get('episodename', '')
nzb_movie_year = self.nzb_get('movieyear', '')
nzb_more_info = self.nzb_get('moreinfo', '')
if nzb_proper_name != '':
nzb_used = True
self.logger.debug('Using DNZB-ProperName')
if guess['vtype'] == 'series':
proper_name = nzb_proper_name
guess['series'] = proper_name
else:
guess['title'] = nzb_proper_name
if nzb_episode_name != '' and guess['vtype'] == 'series':
nzb_used = True
self.logger.debug('Using DNZB-EpisodeName')
guess['title'] = nzb_episode_name
if nzb_movie_year != '':
nzb_used = True
self.logger.debug('Using DNZB-MovieYear')
guess['year'] = nzb_movie_year
if nzb_more_info != '':
nzb_used = True
self.logger.debug('Using DNZB-MoreInfo')
if guess['type'] == 'movie':
regex = re.compile(
r'^http://www.imdb.com/title/(tt[0-9]+)/$', re.IGNORECASE)
matches = regex.match(nzb_more_info)
if matches:
guess['imdb'] = matches.group(1)
guess['cpimdb'] = 'cp(' + guess['imdb'] + ')'
if nzb_used:
if isinstance(guess, Guess):
self.logger.debug(guess.nice_string())
else:
self.logger.debug(str(guess))
def guess_info(self, filename, shared,
deobfuscate=True, use_nzbheaders=True):
""" Parses the filename using guessit-library """
# Year regular expression checker
year_re = re.compile('^[^(]+\((?P<year>[123][0-9]{3})\).+$')
tv_categories = [
cat.lower() for cat in \
self.parse_list(self.get('TvCategories', [])) ]
if deobfuscate:
filename = self.deobfuscate(filename)
if isinstance(filename, str):
system_encoding = self.get('SystemEncoding', DEFAULT_SYSTEM_ENCODING)
_filename = decode(filename, system_encoding)
if not _filename:
# could not detect unicode type
self.logger.debug('Could not detect unicode type.')
else:
filename = _filename
if isinstance(filename, unicode):
self.logger.debug('Guessing using: %s' % filename.encode('utf-8'))
else:
self.logger.debug('Guessing using: %s' % filename)
# Acquire a default year if we can
result = year_re.match(filename)
detected_year = None
if result:
detected_year = result.group('year')
# Pull Guess from NZBGet
if shared:
guess = self.pull_guess()
else:
guess = None
if not guess:
_matcher = matcher.IterativeMatcher(
decode(filename),
filetype='autodetect',
opts={'nolanguage': True, 'nocountry': True},
)
mtree = _matcher.match_tree
guess = _matcher.matched()
if self.vdebug:
# Verbose Mode Only
self.logger.vdebug(mtree)
for node in mtree.nodes():
if node.guess:
self.logger.vdebug(node.guess)
# Guess output prior to mangling it
self.logger.vdebug(guess.nice_string())
# fix some strange guessit guessing:
# if guessit doesn't find a year in the file name it
# thinks it is episode, but we prefer it to be handled
# as movie instead
if guess.get('type') == 'episode' and \
guess.get('episodeNumber', '') == '':
guess['type'] = 'movie'
guess['title'] = guess.get('series')
guess['year'] = '1900'
self.logger.debug(
'An episode without episode # becomes a movie',
)
# detect if year is part of series name
if guess['type'] == 'episode':
last_node = None
for node in mtree.nodes():
if node.guess:
if last_node != None and \
node.guess.get('year') != None and \
last_node.guess.get('series') != None:
if 'year' in guess:
if detected_year != str(guess['year']):
self.logger.debug(
'Detected year (%s) updated to %s!' % (
guess['year'], detected_year,
))
# Apply override
guess['year'] = detected_year
guess['series'] += ' ' + str(guess['year'])
self.logger.debug('Detected year as part of title.')
break
last_node = node
if 'year' not in guess and detected_year:
self.logger.debug(
'Setting detected year %s!' % (
detected_year,
))
# Apply override
guess['year'] = detected_year
if 'series' in guess:
guess['series'] += ' ' + str(guess['year'])
if guess['type'] == 'movie':
category = self.get('CATEGORY', '').lower()
force_tv = category in tv_categories
matches = DETECT_TVSHOW_RE.match(filename)
if matches:
# Enforce TV Show
force_tv = True
# Help out with guessed info
_season = int(matches.group(1))
_episodeList = sorted(re.split('[eE]', matches.group(2)), key=int)
_episode = int(_episodeList[0])
if u'episode' not in guess:
guess[u'episode'] = _episode
if u'season' not in guess:
guess[u'season'] = _season
if len(_episodeList) > 1 and u'episodeList' not in guess:
guess[u'episodeList'] = _episodeList
date = guess.get('date')
if date:
guess['vtype'] = 'dated'
elif force_tv:
guess['vtype'] = 'othertv'
else:
guess['vtype'] = 'movie'
if detected_year:
if 'year' not in guess:
self.logger.debug(
'Setting detected year %s!' % (
detected_year,
))
# Apply override
guess['year'] = detected_year
elif detected_year != str(guess['year']):
self.logger.debug(
'Detected year (%s) updated to %s!' % (
guess['year'], detected_year,
))
# Apply override
guess['year'] = detected_year
elif guess['type'] == 'episode':
guess['vtype'] = 'series'
self.logger.debug(guess.nice_string())
else:
self.logger.debug('Guessed content already provided by NZBGet!')
if 'vtype' not in guess:
raise ValueError("Non-guessable filename.")
self.logger.debug('Type: %s' % guess['vtype'])
if use_nzbheaders:
# Apply nzb meta information to guess if present
self.apply_nzbheaders(guess)
if shared:
# Push Guess to NZBGet
self.push_guess(guess)
return guess
def tidy_subtitle(self, fname):
"""post process applied to filename
"""
self.logger.debug(
'Post processing subtitle %s' % \
basename(fname),
)
tmp_fname = '%s.tmp' % fname
old_fname = '%s.old' % fname
try:
unlink(tmp_fname)
#self.logger.debug(
# 'Removed temporary srt re-encode file : %s' % \
# basename(tmp_fname),
#)
except:
# no problem
pass
try:
unlink(old_fname)
#self.logger.debug(
# 'Removed old srt re-encode file : %s' % \
# basename(old_fname),
#)
except:
# no problem
pass
try:
f = open(fname, 'rb')
except IOError:
self.logger.error(
'Could not open %s for post processing.' % \
basename(fname),
)
return False
try:
fw = open(tmp_fname, 'wb')
except:
self.logger.error(
'Could not create new file %s.' % \
basename(tmp_fname),
)
try:
f.close()
except:
pass
return False
# Broken Lines
# These have been appearing in Python 2.7.11 results
re_broken_lines = re.compile('\r\r\n', re.MULTILINE)
def readchunk():
"""Lazsy function (generator) to read a file piece by piece.
Default chunk size: 204800 bytes (200K)."""
return f.read(204800)
for chunk in iter(readchunk, ''):
processed = re_broken_lines.sub('\r\n', chunk)
try:
fw.write(processed)
except:
self.logger.error(
'Could not write to new file %s.' % \
basename(tmp_fname),
)
try:
f.close()
except:
pass
try:
fw.close()
except:
pass
return False
try:
f.close()
except:
pass
try:
fw.close()
except:
pass
try:
move(fname, old_fname)
except OSError:
self.logger.error(
'Could not move %s to %s' % (
basename(fname),
basename(old_fname),
)
)
try:
unlink(tmp_fname)
except:
pass
return False
try:
move(tmp_fname, fname)
except OSError:
self.logger.error(
'Could not move %s to %s' % (
basename(tmp_fname),
basename(fname),
)
)
try:
unlink(fname)
except:
pass
try:
move(old_fname, fname)
except:
pass
try:
unlink(tmp_fname)
except:
pass
return False
try:
unlink(old_fname)
except:
pass
self.logger.info(
"Post processed subtitles %s encoding." % (
basename(fname),
)
)
return True
def convert_encoding(self, fname, encoding, lang):
"""Takes a filename and encoding and converts it's contents
"""
self.logger.debug(
'Detecting subtitle encoding for %s' % \
basename(fname),
)
tmp_fname = '%s.tmp' % fname
old_fname = '%s.old' % fname
try:
unlink(tmp_fname)
#self.logger.debug(
# 'Removed temporary srt re-encode file : %s' % \
# basename(tmp_fname),
#)
except:
# no problem
pass
try:
unlink(old_fname)
#self.logger.debug(
# 'Removed old srt re-encode file : %s' % \
# basename(old_fname),
#)
except:
# no problem
pass
try:
f = open(fname, 'rb')
except IOError:
self.logger.error(
'Could not open %s for encoding testing' % \
basename(fname),
)
return False
try:
fw = open(tmp_fname, 'wb')
except:
self.logger.error(
'Could not create new file %s.' % \
basename(tmp_fname),
)
try:
f.close()
except:
pass
return False
def readchunk():
"""Lazy function (generator) to read a file piece by piece.
Default chunk size: 204800 bytes (200K)."""
return f.read(204800)
for chunk in iter(readchunk, ''):
detected = detect(chunk, lang)
if detected['encoding'] is not None:
self.logger.debug(
"Detecting '%s' (%f confidence) subtitle encoding for %s" % (
detected['encoding'],
detected['confidence'],
basename(fname),
)
)
if detected['encoding'].lower() not in [ encoding.lower(), 'ascii' ]:
try:
chunk = chunk.decode(
detected['encoding'], errors='replace')\
.encode(encoding, errors='replace')
except UnicodeError:
raise ValueError(
'%s contains invalid characters' % (
basename(fname),
))
except KeyError:
raise ValueError(
'%s encoding could not be detected ' % (
basename(fname),
))
except TypeError:
try:
chunk = chunk.decode(
detected['encoding'], 'replace')\
.encode(encoding, 'replace')
except UnicodeError:
raise ValueError(
'%s contains invalid characters' % (
basename(fname),
))
except KeyError:
raise ValueError(
'%s encoding could not be detected ' % (
basename(fname),
))
try:
fw.write(chunk)
except:
self.logger.error(
'Could not write to new file %s.' % \
basename(tmp_fname),
)
try:
f.close()
except:
pass
try:
fw.close()
except:
pass
return False
try:
f.close()
except:
pass
try:
fw.close()
except:
pass
try:
move(fname, old_fname)
except OSError:
self.logger.error(
'Could not move %s to %s' % (
basename(fname),
basename(old_fname),
)
)
try:
unlink(tmp_fname)
except:
pass
return False
try:
move(tmp_fname, fname)
except OSError:
self.logger.error(
'Could not move %s to %s' % (
basename(tmp_fname),
basename(fname),
)
)
try:
unlink(fname)
except:
pass
try:
move(old_fname, fname)
except:
pass
try:
unlink(tmp_fname)
except:
pass
return False
try:
unlink(old_fname)
except:
pass
self.logger.info(
"Converted %s to '%s' encoding." % (
basename(fname),
encoding,
)
)
return True
def subliminal_fetch(self, files, single_mode=True, shared=True,
deobfuscate=True, use_nzbheaders=True,
overwrite=False):
"""This function fetches the subtitles
"""
# Apprise Asset Object
asset = AppriseAsset(theme=self.default_theme)
asset.app_id = 'NZB-Subliminal'
asset.app_desc = 'Subtitle Retrieval Notification'
asset.app_url = 'https://github.com/caronc/nzb-subliminal'
# Source Theme from GitHub Page
asset.image_url_mask = 'https://raw.githubusercontent.com' \
'/caronc/nzb-subliminal/master/Subliminal' \
'/apprise-theme/{THEME}/apprise-{TYPE}-{XY}.png'
asset.image_path_mask = join(
dirname(__file__),
'Subliminal', 'apprise-theme', '{THEME}',
'apprise-{TYPE}-{XY}.png')
# Create our apprise object
a = Apprise(asset=asset)
for url in self.parse_list(self.get('NotifyURLs', '')):
# Add our URL
if not a.add(url):
# Validation Failure
self.logger.error(
'Could not initialize %s notification instance.' % url,
)
continue
# Get configuration
cache_dir = self.get('CACHEDIR', self.get('TEMPDIR'))
cache_file = join(cache_dir, 'subliminal.cache.dbm')
cache_sub_dir = join(cache_dir, 'srt')
# Encoding
force_encoding = self.get('ForceEncoding', DEFAULT_FORCE_ENCODING)
if force_encoding.lower() == 'none':
force_encoding = None
# Tidy Subtitle
tidy_subtitle = self.parse_bool(
self.get('TidySub', DEFAULT_TIDYSUB))
# Minimum Score
minscore = int(self.get('MinScore', DEFAULT_MIN_VIDEO_SCORE))
if minscore < 0:
# Use Default
minscore = 0
# Use Embedded Subtitles
ignore_embedded = self.parse_bool(
self.get('IgnoreEmbedded', DEFAULT_IGNORE_EMBEDDED),
)
# Search Mode
search_mode = self.get('SearchMode', DEFAULT_SEARCH_MODE)
self.logger.info('Using %s search mode' % search_mode)
if not isdir(cache_dir):
try:
makedirs(cache_dir)
except:
self.logger.error('Could not create directory %s' % (
cache_dir,
))
return False
if not isdir(cache_sub_dir):
try:
makedirs(cache_sub_dir)
except:
self.logger.error('Could not create sub directory %s' % (
cache_sub_dir,
))
return False
# Change to our cache directory; we do this because subliminal (the one
# we wrap downloads content to the directory we're standing in at
# first). This causes a problem if we're in a system directory which
# some admin's like to remove write permission from (for good reason
# too). Our cache directory acts as a good temporary location to work
# out of.
try:
chdir(cache_sub_dir)
except OSError:
self.logger.error('Could not access directory %s' % (
cache_sub_dir,
))
return False
# Attempt to detect a category and manage exclusive provider lists (if
# specified)
movie_providers = self.parse_list(self.get('MovieProviders', ''))
if not movie_providers:
# Handle providers, if list is empty, then use default
movie_providers = self.parse_list(
self.get('Providers', DEFAULT_PROVIDERS))
tvshow_providers = self.parse_list(self.get('TVShowProviders', ''))
if not tvshow_providers:
# Handle providers, if list is empty, then use default
tvshow_providers = self.parse_list(
self.get('Providers', DEFAULT_PROVIDERS))
# parse provider list and remove entries that are not valid
movie_providers = [ p.lower() for p in movie_providers \
if p.lower() in DEFAULT_PROVIDERS ]
# parse provider list and remove entries that are not valid
tvshow_providers = [ p.lower() for p in tvshow_providers \
if p.lower() in DEFAULT_PROVIDERS ]
if not movie_providers:
movie_providers = DEFAULT_PROVIDERS
self.logger.debug('Using default provider list for movies.')
else:
self.logger.debug('Using the following movie providers: %s' %(
', '.join(movie_providers)
))
if not tvshow_providers:
tvshow_providers = DEFAULT_PROVIDERS
self.logger.debug('Using default provider list for movies.')
else:
self.logger.debug('Using the following tv show providers: %s' %(
', '.join(tvshow_providers)
))
provider_configs = {}
_addic7ed_user = self.get('Addic7edUser')
_addic7ed_pass = self.get('Addic7edPass')
if _addic7ed_user and _addic7ed_pass:
# Only if the credentials are set should we initialize
# them with the provider
provider_configs['addic7ed'] = {
'username': _addic7ed_user,
'password': _addic7ed_pass,
}
_opensubs_user = self.get('OpenSubtitlesUser')
_opensubs_pass = self.get('OpenSubtitlesPass')
if _opensubs_user and _opensubs_pass:
# Only if the credentials are set should we initialize
# them with the provider
provider_configs['opensubtitles'] = {
'username': _opensubs_user,
'password': _opensubs_pass,
}
lang = self.parse_list(self.get('Languages', 'en'))
if not lang:
self.logger.error('No valid language was set')
return False
# Set up some arguments based on the fetch mode specified
fetch_mode = self.get('FetchMode', FETCH_MODE_DEFAULT)
try:
# Correct ID if required
fetch_mode = [ m for m in FETCH_MODES \
if fetch_mode.upper() == m.upper()][0]
self.logger.debug('Fetch Mode: %s' % fetch_mode)
except IndexError:
self.logger.warning(
'Invalid FetchMode specified, using default: %s' %\
FETCH_MODE_DEFAULT,
)
fetch_mode = FETCH_MODE_DEFAULT
hearing_impaired = None
hi_score_adjust = 0
if fetch_mode is FETCH_MODE.IMPAIRED_ONLY:
# Force Hearing-Impaired Only
hearing_impaired = True
elif fetch_mode is FETCH_MODE.STANDARD_ONLY:
# Force Non Hearing-Impaired Only
hearing_impaired = False
elif fetch_mode is FETCH_MODE.STANDARD_FIRST:
# Fetch Non Hearing-Impaired First by lowering the score of
# matched hearing-impaired subs.
hi_score_adjust = -3
elif fetch_mode is FETCH_MODE.IMPAIRED_FIRST:
# Fetch Hearing-Impaired First by lowering the score of
# matched non hearing-impaired subs.
hi_score_adjust = +3
else: # FETCH_MODE.BESTSCORE
pass
lang = set(_to_alpha2(l) for l in lang)
if None in lang:
# Eliminate this entry
lang.remove(None)
if not len(lang):
# No Languages to process
self.logger.error('An error occured processing the language list')
return None
# Now we build a list of local subtitle founds (if any exist or were
# defined)
xref_paths = {}
if len(self.xref_paths) > 0:
# Fetch Scan Paths
xref_paths = self.get_files(
self.xref_paths,
suffix_filter='.srt',
max_depth=1,
)
# xref_paths = dict([
# (basename(k), v) for (k, v) in self.get_files(
# self.xref_paths,
# suffix_filter='.srt',
# max_depth=1,
# ).iteritems()
# ])
srt_extract_re = re.compile(
'^(?P<name>.*?)(?P<alpha>\.[a-z]{2}[a-z]?)?(?P<extension>'\
'\.(sub|idx|srt))$',
re.IGNORECASE,
)
for key in xref_paths.keys():
match = srt_extract_re.match(key)
if not match:
continue
entry = match.group('name')
alpha2 = _to_alpha2(match.group('alpha')[1:])
if alpha2 is None:
# Treat the Alpha as part of the filename since it's not a
# valid language code
entry += match.group('alpha')
alpha2 = ''
else:
# Get expected language code
alpha2 = alpha2.alpha2
try:
# Add Guessed Information; but we simulate a video file
# to help our guessed path
xref_paths[key]['video'] = Video.fromguess(
'%s.mkv' % basename(entry),
self.guess_info(
'%s.mkv' % entry,
shared=False,
deobfuscate=False,
use_nzbheaders=False,
)
)
# Store some meta information we can use later to help
# assemble our filename
if alpha2:
xref_paths[key]['_file_prefix'] = match.group('name')
xref_paths[key]['_file_suffix'] = '%s%s' % (
match.group('alpha'),
match.group('extension'),
)
else:
xref_paths[key]['_file_prefix'] = entry
xref_paths[key]['_file_suffix'] = \
match.group('extension')
except ValueError as e:
# fromguess() throws a ValueError if show matches couldn't
# be detected using the content guessit matched.
if isinstance(e, basestring):
self.logger.debug('Error message: %s' % e)
self.logger.warning(
'Ignoring un-detectable srt file: %s' % basename(key),
)
# Remove entry
del xref_paths[key]
continue
# Configure cache
cache_region.configure(
'dogpile.cache.dbm',
expiration_time=timedelta(days=30),
arguments={'filename': cache_file, 'lock_factory': MutexLock},
)
# initialize fetch counter
f_count = 0
# Default system encoding
system_encoding = self.get('SystemEncoding', DEFAULT_SYSTEM_ENCODING)
# Throttle
throttle_threshold = int(self.get(
'ThrottleThreshold', DEFAULT_THROTTLE_THRESHOLD))
if throttle_threshold <= 0:
# if set to zero; disable
throttle_threshold = None
throttle = int(self.get(
'Throttle', DEFAULT_THROTTLE_WAITTIME))
for entry in files:
if True in [ v.match(entry) is not None \
for v in IGNORE_FILELIST_RE ]:
self.logger.debug('Skipping - Ignored file: %s' % basename(entry))
continue
full_path = entry
if search_mode == SEARCH_MODE.BASIC:
full_path = join(cache_sub_dir, basename(entry))
# Figure out the encoding of the file
detected_encoding = system_encoding
if isinstance(entry, str):
try:
_entry = entry.decode(detected_encoding)
except UnicodeError:
decoded = detect(entry)
detected_encoding = decoded['encoding']
self.logger.debug(
'Detected %s file encoding' % detected_encoding,
)
try:
_entry = entry.decode(detected_encoding)
except UnicodeError:
# We failed to decode our file
self.logger.debug(
'Skipping - Unknown character encoding: %s' % \
basename(entry))
# We want our file to be encoded for
# Create a copy of the lang object
_lang = set(lang)
for l in lang:
# Check that file doesn't already exist
srt_path = dirname(_entry)
srt_file = basename(splitext(_entry)[0])
srt_file_re = re.escape(srt_file)
if l.alpha3t == l.alpha3b:
srt_regex = '^(%s(\.(%s|%s))?.(idx|sub|srt))$' % (
srt_file_re, l.alpha3t, l.alpha2,
)
else:
srt_regex = '^(%s(\.(%s|%s|%s))?.(idx|sub|srt))$' % (
srt_file_re, l.alpha3t, l.alpha3b, l.alpha2,
)
# look in the directory and extract all matches
_matches = self.get_files(
search_dir=srt_path,
regex_filter=srt_regex,
max_depth=1,
)
if not overwrite and len(_matches):
self.logger.debug(
'%s subtitle match: %s' % (
str(l),
', '.join([ basename(_srt) \
for _srt in _matches.keys() ]),
))
_lang.remove(l)
continue
if len(_lang) == 0:
self.logger.debug(
'Skipping - Subtitle(s) already exist for: %s' % (
basename(_entry),
))
continue
self.logger.debug('Scanning [%s] using %s lang=%s' % (
search_mode,
full_path,
', '.join([ str(l) for l in _lang ]),
))
# Before we start our scan, we want to strip out any information
# in the directory that may obstruct our results since the directory
# information is sometimes used to help figure out things.
filename = split(_entry)[1]
matches = DETECT_TVSHOW_RE.match(filename)
if matches:
# Enforce TV Show (use last 2 directories)
_prevew = os_sep.join(_entry.split(os_sep)[-3:])
else:
# Enforce Movie (use last directory only)
_prevew = os_sep.join(_entry.split(os_sep)[-2:])
try:
# Add Guessed Information
video = Video.fromguess(
filename,
self.guess_info(
_prevew,
shared=shared,
deobfuscate=deobfuscate,
use_nzbheaders=use_nzbheaders,
),
)
except ValueError as e:
# fromguess() throws a ValueError if show matches couldn't
# be detected using the content guessit matched.
if isinstance(e, basestring):
self.logger.debug('Error message: %s' % e)
self.logger.warning(
'Skipping - Invalid file: %s' % basename(_entry),
)
continue
if search_mode == SEARCH_MODE.ADVANCED:
# Deep Enzyme Scan
video = scan_video(
full_path,
subtitles=not overwrite,
embedded_subtitles=not ignore_embedded,
video=video,
)
if throttle_threshold is not None and throttle > 0:
# Calculate our threshold
if throttle_threshold > 0:
# Adjust
throttle_threshold -= 1
if throttle_threshold <= 0:
# Throttle
self.logger.info(
'Throttling connection for %ds' % throttle)
sleep(throttle)
# Reset our threshold value
throttle_threshold = int(self.get(
'ThrottleThreshold', DEFAULT_THROTTLE_THRESHOLD))
if babelfish.Language('und') in video.subtitle_languages:
# This means we found embedded subtitles, it causes the
# download_best_subtitles() to skip over this because of
# this. To alter the default action of ignoring searching
# all together, we remove this entry here so we can keep
# going.
video.subtitle_languages.remove(babelfish.Language('und'))
if not ignore_embedded:
self.logger.debug(
'Skipping - unknown embedded subtitle ' + \
'language(s) already exist for: %s' % basename(_entry),
)
continue
# Based on our results, we may need to skip searching
# further for subtitles
if not ignore_embedded:
# clean out languages we have already
for l in video.subtitle_languages:
if l in _lang:
self.logger.debug(
'Skipping - Embedded %s subtitle ' % str(l) + \
'already exist for: %s' % basename(_entry),
)
_lang.remove(l)
# One last language check
if len(_lang) == 0:
continue
# Depending if we are dealing with a TV Show or A Movie, we swap
# our list of providers
if isinstance(video, Episode):
# use TV Series providers
providers = tvshow_providers
else:
# use Movie providers
providers = movie_providers
if not len(providers):
self.logger.warning(
'There were no valid providers for this video type.',
)
continue
# early match
local_match = False
dst_file = ''
if len(xref_paths) > 0:
# Check cross reference paths first
for key in xref_paths.keys():
if video == xref_paths[key]['video']:
# Move our fetched file to it's final destination
self.logger.info('Found local (xref) match %s' % \
basename(key))
# Toggle flag
local_match = True
# re fetch our file
match = srt_extract_re.match(key)
srt_path = abspath(dirname(_entry))
srt_file = basename(splitext(_entry)[0])
dst_file = '%s%s' % (
join(srt_path, srt_file),
xref_paths[key]['_file_suffix'],
)
if exists(dst_file):
self.logger.warning(
'The subtitle %s exists already (Skipping).' % (
basename(dst_file),
))
elif key == dst_file:
self.logger.warning(
'The xref dir and video dir are the same;' +\
'Ignoring %s.' % (
basename(dst_file),
))
else:
try:
move(key, dst_file)
self.logger.info('Placed %s' % (
basename(dst_file),
))
except OSError:
self.logger.error(
'Could not move %s to %s' % (
basename(key),
basename(dst_file),
)
)
# Remove entry (since we matched it already now)
del xref_paths[key]
if local_match:
# increment counter
f_count += 1
title = "Local Subtitle Set: %s" % basename(dst_file)
body = "## Subtitle Location\n%s" % abspath(dst_file)
# Notify our servers
a.notify(
body=body, title=title, notify_type=NotifyType.INFO,
body_format=NotifyFormat.MARKDOWN,
)
# Go back to top; we're done
continue
# download best subtitles
subtitles = download_best_subtitles(
[video, ],
_lang,
providers=providers,
provider_configs=provider_configs,
single=single_mode,
min_score=minscore,
hearing_impaired=hearing_impaired,
hi_score_adjust=hi_score_adjust,
)
if not subtitles:
self.logger.warning('No subtitles were found for %s' % basename(_entry))
continue
for l in _lang:
srt_path = abspath(dirname(_entry))
srt_file = basename(splitext(_entry)[0])
srt_lang = l.alpha2
if single_mode:
expected_file = join(srt_path, '%s.srt' % srt_file)
else:
expected_file = join(srt_path, '%s.%s.srt' % (
srt_file, srt_lang,
))
self.logger.debug('Expecting .srt: %s' % expected_file)
# Provide other possible locations (unique list)
potential_files = list(set([ \
p for p in [
join(abspath(getcwd()), basename(expected_file)),
join(cache_sub_dir, basename(expected_file)),
] if isfile(p) and p != expected_file
]))
if self.debug:
# Helpful information
for potential in potential_files:
self.logger.debug(
'Potential .srt: %s' % potential
)
if isfile(expected_file):
# File was found in the same folder as the movie is
# no change is nessisary
pass
elif len(potential_files):
# Pop the first item from the potential list
while len(potential_files):
move_from = potential_files.pop()
self.logger.debug(
'Expected not found, retrieving: %s' % move_from,
)
try:
# Move our file
move(move_from, expected_file)
# Move our fetched file to it's final destination
self.logger.info('Successfully placed %s' % \
basename(expected_file))
# leave loop
break
except OSError as e:
self.logger.error(
'Could not move %s to %s' % (
basename(move_from),
expected_file,
)
)
self.logger.debug(
'move() exception: %s' % str(e),
)
# Remove any lingering potential files
try:
expected_stat = stat(expected_file)
except OSError:
# weird, expected file was not found..
expected_stat = ()
while len(potential_files):
p = potential_files.pop()
try:
if stat(f) != expected_stat:
# non-linked files... proceed
unlink(p)
self.logger.debug(
'Removed lingering extra: %s' % \
p,
)
except:
pass
if not isfile(expected_file):
# We can't find anything
self.logger.error(
'Could not locate a fetched (%s) subtitle.' % l
)
continue
# File Conversion Option
if force_encoding:
self.convert_encoding(
expected_file,
force_encoding,
srt_lang,
)
# Post Processing Tidying
if tidy_subtitle:
self.tidy_subtitle(
expected_file,
)
# increment counter
f_count += 1
title = "Subtitle Retrieved: %s" % basename(expected_file)
body = "## Subtitle Location\n%s" % abspath(expected_file)
# Perform any notifications (if set to do so)
a.notify(
body=body, title=title, notify_type=NotifyType.INFO,
body_format=NotifyFormat.MARKDOWN,
)
# When you're all done handling the file, just return
# the error code that best represents how everything worked
if f_count > 0:
return True
# Nothing fetched, nothing gained or lost
return None
def sabnzbd_postprocess_main(self, *args, **kwargs):
"""
SABNZBd PostProcessing Support
"""
search_mode = (self.get('SearchMode', DEFAULT_SEARCH_MODE) == SEARCH_MODE.ADVANCED)
return self.postprocess_main(use_nzbheaders=search_mode, shared=False, *args, **kwargs)
def postprocess_main(self, use_nzbheaders=True, shared=True, *args, **kwargs):
if not self.health_check():
# No sense scanning something that did not download successfully
return None
if not self.validate(keys=('Languages')):
return False
# Environment
video_extension = self.get('VideoExtensions', DEFAULT_EXTENSIONS)
minsize = int(self.get('MinSize', DEFAULT_MIN_VIDEO_SIZE_MB)) * 1048576
self.xref_paths = self.parse_path_list(self.get('XRefPaths'))
# Overwrite Mode
overwrite = self.parse_bool(self.get('Overwrite', 'no'))
# Single Mode (don't download language extension)
single_mode = self.parse_bool(
self.get('Single', DEFAULT_SINGLE))
# Update Timestamp
update_timestamp = self.parse_bool(
self.get('UpdateTimestamp', DEFAULT_UPDATE_TIMESTAMP))
# Update Permissions
update_permissions = self.parse_bool(
self.get('UpdatePermissions', DEFAULT_UPDATE_PERMISSIONS))
try:
video_permissions = int('0o%d' % self.get(
'VideoPermissions',
int(DEFAULT_VIDEO_PERMISSIONS),
))
except (ValueError, TypeError):
video_permissions = DEFAULT_VIDEO_PERMISSIONS
# Build file list
files = self.get_files(suffix_filter=video_extension, fullstats=True)
# Apply Filters
_files = dict([ (k, v) for (k, v) in files.items() if \
v['filesize'] >= minsize ]).keys()
if self.debug and len(_files) != len(files):
# Debug Mode - Print filtered content for peace of mind and
# debugging other peoples logs
for file in list(set(files.keys()) - set(_files)):
size = 0.0
if files[file]['filesize'] > 0:
size = (float(files[file]['filesize']) / 1048576.0)
self.logger.debug('Filtered "%s" (%.2f MB)' % (file, size))
if not _files:
self.logger.info('There were no files found.')
return None
self.logger.info('Found %d matched file(s).' % len(_files))
for file in _files:
if self.debug:
size = 0.0
if files[file]['filesize'] > 0:
size = (float(files[file]['filesize']) / 1048576.0)
self.logger.debug('Scanning "%s" (%.2f MB)' % (file, size))
# Update Permissions (if specified to do so)
if update_permissions:
try:
chmod(file, video_permissions)
self.logger.debug(
'Video permissions set to 0%o.', video_permissions,
)
except:
self.logger.error(
'Failed to update video permissions for "%s"' % file,
)
# Update Timestamps (if specified to do so)
if update_timestamp:
try:
utime(file, None)
self.logger.debug('Video timestamp updated.')
except:
self.logger.error(
'Failed to update timestamp for "%s"' % file,
)
if _files:
return self.subliminal_fetch(
_files,
single_mode=single_mode,
deobfuscate=True,
use_nzbheaders=use_nzbheaders,
shared=shared,
overwrite=overwrite,
)
def scheduler_main(self, *args, **kwargs):
if not self.validate(keys=(
'MaxAge',
'MinSize',
'MinScore',
'Single',
'IgnoreEmbedded',
'Providers',
'MovieProviders',
'TVShowProviders',
'SearchMode',
'FetchMode',
'ScanDirectories',
'VideoExtensions',
'XRefPaths',
'ForceEncoding',
'SystemEncoding',
'Languages')):
return False
# Environment
video_extension = self.get('VideoExtensions', DEFAULT_EXTENSIONS)
maxage = int(self.get('MaxAge', DEFAULT_MAXAGE))
minsize = int(self.get('MinSize', DEFAULT_MIN_VIDEO_SIZE_MB)) * 1048576
paths = self.parse_path_list(self.get('ScanDirectories'))
self.xref_paths = self.parse_path_list(self.get('XRefPaths'))
# Single Mode (don't download language extension)
single_mode = self.parse_bool(
self.get('Single', DEFAULT_SINGLE))
# Fetch Scan Paths
files = self.get_files(
paths,
suffix_filter=video_extension,
fullstats=True,
)
# Apply Filters
ref_time = datetime.now() - timedelta(hours=maxage)
_files = dict([ (k, v) for (k, v) in files.items() if \
v['filesize'] >= minsize and \
v['modified'] >= ref_time ]).keys()
if self.debug and len(_files) != len(files):
# Debug Mode - Print filtered content for peace of mind and
# debugging other peoples logs
for file in list(set(files.keys()) - set(_files)):
size = 0.0
if files[file]['filesize'] > 0:
size = (float(files[file]['filesize']) / 1048576.0)
self.logger.debug('Filtered "%s" (%.2f MB)' % (file, size))
if not _files:
self.logger.info('There were no files found.')
return None
self.logger.info('Found %d matched file(s).' % len(_files))
if self.debug:
for file in _files:
size = 0.0
if files[file]['filesize'] > 0:
size = (float(files[file]['filesize']) / 1048576.0)
self.logger.debug('Scanning "%s" (%.2f MB)' % (file, size))
if _files:
return self.subliminal_fetch(
_files,
single_mode=single_mode,
shared=False,
deobfuscate=False,
use_nzbheaders=False,
)
def action_subliminalscan(self, *args, **kwargs):
"""
Execute the SubliminalScan Test Action
"""
self.scheduler_main(self, *args, **kwargs)
return True
def main(self, *args, **kwargs):
"""CLI
"""
# Environment
video_extension = self.get('VideoExtensions', DEFAULT_EXTENSIONS)
maxage = int(self.get('MaxAge', DEFAULT_MAXAGE))
minsize = int(self.get('MinSize', DEFAULT_MIN_VIDEO_SIZE_MB)) * 1048576
force = self.parse_bool(self.get('Force', DEFAULT_FORCE))
paths = self.parse_path_list(self.get('ScanDirectories'))
self.xref_paths = self.parse_path_list(self.get('XRefPaths'))
# Append any absolute scan directories (avoiding the parse_path_list)
paths += script.get('AbsoluteScanDirectories', [])
# Single Mode (don't download language extension)
single_mode = self.parse_bool(
self.get('Single', DEFAULT_SINGLE))
# Fetch Scan Paths
files = self.get_files(
paths,
suffix_filter=video_extension,
fullstats=True,
)
# Apply Filters
if not force:
ref_time = datetime.now() - timedelta(hours=maxage)
_files = dict([ (k, v) for (k, v) in files.items() if \
v['filesize'] >= minsize and \
v['modified'] >= ref_time ]).keys()
else:
_files = dict([ (k, v) for (k, v) in files.items() if \
v['filesize'] >= minsize ]).keys()
if self.debug and len(_files) != len(files):
# Debug Mode - Print filtered content for peace of mind and
# debugging other peoples logs
for file in list(set(files.keys()) - set(_files)):
size = 0.0
if files[file]['filesize'] > 0:
size = (float(files[file]['filesize']) / 1048576.0)
self.logger.debug('Filtered "%s" (%.2f MB)' % (file, size))
if not _files:
self.logger.info('There were no files found.')
return True
self.logger.info('Found %d matched file(s).' % len(_files))
if self.debug:
for file in _files:
size = 0.0
if files[file]['filesize'] > 0:
size = (float(files[file]['filesize']) / 1048576.0)
self.logger.debug('Scanning "%s" (%.2f MB)' % (file, size))
if files:
return self.subliminal_fetch(
_files,
single_mode=single_mode,
shared=False,
deobfuscate=False,
use_nzbheaders=False,
)
else:
self.logger.warning(
'There were no files detected less the %dhr(s) ' % maxage +\
'in age requiring subtitles.')
self.logger.info(
'Try adding --force (-f) to force this downloads.'
)
return None
# Call your script as follows:
if __name__ == "__main__":
from sys import exit
from optparse import OptionParser
# Support running from the command line
parser = OptionParser()
parser.add_option(
"-S",
"--scandir",
dest="scandir",
help="The directory to scan against. Note: that by setting this " + \
"variable, it is implied that you are running this from " + \
"the command line.",
metavar="DIR",
)
parser.add_option(
"-a",
"--maxage",
dest="maxage",
help="The maximum age a file can be to be considered searchable. " + \
"This value is represented in hours. The default value is %d" % \
DEFAULT_MAXAGE + " hours.",
metavar="AGE",
)
parser.add_option(
"-n",
"--encoding",
dest="encoding",
help="The system encoding to use (utf-8, ISO-8859-1, etc)." + \
" The default value is '%s'" % DEFAULT_SYSTEM_ENCODING + ".",
metavar="ENCODING",
)
parser.add_option(
"-l",
"--language",
dest="language",
help="The language the fetch the subtitles in (en, fr, etc)." + \
" The default value is '%s'" % DEFAULT_LANGUAGE + ".",
metavar="LANG",
)
parser.add_option(
"-p",
"--providers",
dest="providers",
help="Specify a list of providers (use commas as delimiters) to " + \
"identify the providers you wish to use. The following will " + \
"be used by default: '%s'" % ','.join(DEFAULT_PROVIDERS),
metavar="PROVIDER1,PROVIDER2,etc",
)
parser.add_option(
"-s",
"--single",
action="store_true",
dest="single_mode",
help="Download content without the language code in the subtitle " + \
"filename.",
)
parser.add_option(
"-b",
"--basic",
action="store_true",
dest="basic_mode",
help="Do not attempt to parse additional information from the " + \
"video file. Running in a basic mode is much faster but can " + \
"make it more difficult to determine the correct subtitle if " + \
"more then one is matched."
)
parser.add_option(
"-x",
"--cross-reference",
dest="xrefpath",
help="Specify an optional list of directories to scan for subs " + \
"first before checking on the internet. This is for " +\
"directories containing subs (.srt files) that you have " +\
"already downloaded ahead of time.",
metavar="PATH1,PATH2,etc",
)
parser.add_option(
"-z",
"--minsize",
dest="minsize",
help="Specify the minimum size a video must be to be worthy of " + \
"of checking for subtiles. This value is interpreted in MB " + \
"(Megabytes) and defaults to %d MB." % DEFAULT_MIN_VIDEO_SIZE_MB,
metavar="SIZE_IN_MB",
)
parser.add_option(
"-c",
"--minscore",
dest="minscore",
help="When scoring multiple matched subtitles for a video, this " + \
"value identifies the threshold to assume the subtitle is no good " + \
"and should be thrown away when being compared against others. " + \
"It currently defaults to %d." % DEFAULT_MIN_VIDEO_SCORE,
metavar="MINSCORE",
)
parser.add_option(
"-k",
"--ignore-embedded",
dest="ignore_embedded",
action="store_true",
help="If embedded subtitles were detected, choose not to use them " + \
"and continue to search for the subtitles hosted by the " + \
"identified provider(s).",
)
parser.add_option(
"-e",
"--force-encoding",
dest="force_encoding",
help="Optionally specify the subtitle's file encoding to" + \
"a specific type (utf-8, ISO-8859-1, etc). If none is specified " + \
"then the file is left as is.",
metavar="ENCODING",
)
parser.add_option(
"-f",
"--force",
action="store_true",
dest="force",
help="Force a download reguardless of the file age. This " + \
"switch negates any value specified by the --age (-a) switch.",
)
parser.add_option(
"-o",
"--overwrite",
action="store_true",
dest="overwrite",
help="Overwrite a subtitle in the event one is already present.",
)
parser.add_option(
"-m",
"--fetch-mode",
dest="fetch_mode",
help="Identify the fetch mode you wish to invoke," + \
" the options are: '%s'" % "', '".join(FETCH_MODES) + ". " +\
"The default value is: %s" % FETCH_MODE_DEFAULT,
metavar="MODE",
)
parser.add_option(
"--addic7ed-user",
dest="addic7ed_user",
help="Optionally use login credentials when accessing " + \
"Addic7ed's server. This option is ignored if the " + \
"--addic7ed-pass switch is not specified.",
metavar="USER",
)
parser.add_option(
"--addic7ed-pass",
dest="addic7ed_pass",
help="Optionally use login credentials when accessing " + \
"Addic7ed's server. This option is ignored if the " + \
"--addic7ed-user switch is not specified.",
metavar="PASS",
)
parser.add_option(
"--opensubs-user",
dest="opensubs_user",
help="Optionally use login credentials when accessing " + \
"Open Subtitles's server. This option is ignored if the " + \
"--opensubs-pass switch is not specified.",
metavar="USER",
)
parser.add_option(
"--opensubs-pass",
dest="opensubs_pass",
help="Optionally use login credentials when accessing " + \
"Open Subtitles's server. This option is ignored if the " + \
"--opensubs-user switch is not specified.",
metavar="PASS",
)
parser.add_option(
"-t",
"--tidy-subs",
action="store_true",
dest="tidysub",
help="Post process tidying of subtitle.",
)
parser.add_option(
"-u",
"--notify-urls",
dest="notify_urls",
help="Specify 1 or more notification URLs in their URL format ie: " + \
"growl://mypass@localhost. " + \
"See https://github.com/caronc/apprise for more information " +\
"on the different kinds of supported Notification URLs.",
metavar="URL(s)",
)
parser.add_option(
"-T",
"--throttle-threshold",
dest="threshold",
help="The threshold defines the number of concurrent requests " + \
"made to the remote subtitle websites before a temporary " + \
"wait/pause occurs (defined by --throttle). The goal of the " + \
"threshold is to prevent one from being banned for abusing the " + \
"server (which can happen if you make to many requests). This " + \
"setting is ideal for those users who are scanning and getting " + \
"subtitles for a very large media library. Set this value to 0 " + \
" (zero) if you want to disable this feature. It currently " + \
"defaults to %d." % DEFAULT_THROTTLE_THRESHOLD,
metavar="COUNT",
)
parser.add_option(
"-W",
"--throttle",
dest="throttle",
help="Defines the number of seconds a throttle/block will occur " + \
"for when/if a throttle threshold is reached. It currently " + \
"defaults to %d." % DEFAULT_THROTTLE_WAITTIME,
metavar="SEC",
)
parser.add_option(
"-L",
"--logfile",
dest="logfile",
help="Send output to the specified logfile instead of stdout.",
metavar="FILE",
)
parser.add_option(
"-D",
"--debug",
action="store_true",
dest="debug",
help="Debug Mode",
)
options, _args = parser.parse_args()
logger = options.logfile
if not logger:
# True = stdout
logger = True
debug = options.debug
script_mode = None
if options.scandir:
scandir = options.scandir
else:
# No arguments at all specified
scandir = ''
script = SubliminalScript(
logger=logger,
debug=debug,
script_mode=script_mode,
)
# Define directories specified without use of the -S switch
# These directories are not later parsed further and can provide
# work-arounds to people wanting to scan directories containing
# comma's and or irregular spacing.
if script.script_mode is SCRIPT_MODE.NONE and len(_args):
# Support command line arguments too if no other script mode
# is detected NONE = CLI
script.set('AbsoluteScanDirectories', _args)
if script.script_mode == SCRIPT_MODE.SABNZBD_POSTPROCESSING:
# We're using SABnzbd. Since there is no way to submit the many
# configuration options available to this script to the user.
# We want to at least try to make their life as easy as possible and
# move a sample configuration file into place they can edit with their
# own free-will.
if not isfile(DEFAULTS_CONFIG_FILE) and isfile(SAMPLE_CONFIG_FILE):
try:
copy(SAMPLE_CONFIG_FILE, DEFAULTS_CONFIG_FILE)
script.logger.info('Placed default configuration file: %s' % (
DEFAULTS_CONFIG_FILE,
))
except:
# copy is not possible, we don't panic; it is what it is
pass
# We define a configuration file users can over-ride the defaults
# with.
cfg = ConfigParser()
if isfile(DEFAULTS_CONFIG_FILE):
try:
cfg.read(DEFAULTS_CONFIG_FILE)
if options.encoding is None:
# Get Default
try:
options.encoding = \
cfg.get(DEFAULTS_CONFIG_FILE_SECTION, 'SystemEncoding')
except ConfigNoOption:
pass
if options.language is None:
# Get Default
try:
options.language = \
cfg.get(DEFAULTS_CONFIG_FILE_SECTION, 'Languages')
except ConfigNoOption:
pass
if options.maxage is None:
# Get Default
try:
options.maxage = \
cfg.get(DEFAULTS_CONFIG_FILE_SECTION, 'MaxAge')
except ConfigNoOption:
pass
if options.force_encoding is None:
# Get Default
try:
options.force_encoding = \
cfg.get(DEFAULTS_CONFIG_FILE_SECTION, 'ForceEncoding')
except ConfigNoOption:
pass
if options.notify_urls is None:
# Get Default
try:
options.notify_urls = \
cfg.get(DEFAULTS_CONFIG_FILE_SECTION, 'NotifyURLs')
except ConfigNoOption:
pass
if options.minsize is None:
# Get Default
try:
options.minsize = \
cfg.get(DEFAULTS_CONFIG_FILE_SECTION, 'MinSize')
except ConfigNoOption:
pass
if options.minscore is None:
# Get Default
try:
options.minscore = \
cfg.get(DEFAULTS_CONFIG_FILE_SECTION, 'MinScore')
except ConfigNoOption:
pass
if options.throttle is None:
# Get Default
try:
options.throttle = \
cfg.get(DEFAULTS_CONFIG_FILE_SECTION, 'Throttle')
except ConfigNoOption:
pass
if options.threshold is None:
# Get Default
try:
options.threshold = \
cfg.get(DEFAULTS_CONFIG_FILE_SECTION, 'ThrottleThreshold')
except ConfigNoOption:
pass
if options.single_mode is None:
# Get Default
try:
options.single_mode = script.parse_bool(
cfg.get(DEFAULTS_CONFIG_FILE_SECTION, 'Single'),
)
except ConfigNoOption:
pass
if options.overwrite is None:
# Get Default
try:
options.overwrite = script.parse_bool(
cfg.get(DEFAULTS_CONFIG_FILE_SECTION, 'Overwrite'),
)
except ConfigNoOption:
pass
if options.ignore_embedded is None:
# Get Default
try:
options.ignore_embedded = script.parse_bool(
cfg.get(DEFAULTS_CONFIG_FILE_SECTION, 'IgnoreEmbedded'),
)
except ConfigNoOption:
pass
if options.basic_mode is None:
# Get Default
try:
options.basic_mode = \
(str(cfg.get(DEFAULTS_CONFIG_FILE_SECTION, 'SearchMode')).lower() != SEARCH_MODE.ADVANCED)
except ConfigNoOption:
pass
if options.xrefpath is None:
# Get Default
try:
options.xrefpath = \
cfg.get(DEFAULTS_CONFIG_FILE_SECTION, 'XRefPaths')
except ConfigNoOption:
pass
if options.tidysub is None:
# Get Default
try:
options.tidysub = script.parse_bool( \
cfg.get(DEFAULTS_CONFIG_FILE_SECTION, 'TidySub'),
)
except ConfigNoOption:
pass
if options.providers is None:
# Get Default
try:
options.providers = \
cfg.get(DEFAULTS_CONFIG_FILE_SECTION, 'Providers')
except ConfigNoOption:
pass
if options.fetch_mode is None:
# Get Default
try:
options.fetch_mode = \
cfg.get(DEFAULTS_CONFIG_FILE_SECTION, 'FetchMode')
except ConfigNoOption:
pass
if options.addic7ed_user is None:
# Get Default
try:
options.addic7ed_user = \
cfg.get(DEFAULTS_CONFIG_FILE_SECTION, 'Addic7edUser')
except ConfigNoOption:
pass
if options.addic7ed_pass is None:
# Get Default
try:
options.addic7ed_pass = \
cfg.get(DEFAULTS_CONFIG_FILE_SECTION, 'Addic7edPass')
except ConfigNoOption:
pass
if options.opensubs_user is None:
# Get Default
try:
options.opensubs_user = \
cfg.get(DEFAULTS_CONFIG_FILE_SECTION, 'OpenSubtitlesUser')
except ConfigNoOption:
pass
if options.opensubs_pass is None:
# Get Default
try:
options.opensubs_pass = \
cfg.get(DEFAULTS_CONFIG_FILE_SECTION, 'OpenSubtitlesPass')
except ConfigNoOption:
pass
if debug is None:
# Get Default
try:
script.set_debugging(script.parse_bool( \
cfg.get(DEFAULTS_CONFIG_FILE_SECTION, 'Debug')
))
except ConfigNoOption:
pass
try:
script.set('VideoExtensions',
cfg.get(DEFAULTS_CONFIG_FILE_SECTION, 'VideoExtensions'))
except ConfigNoOption:
pass
try:
script.set('TvCategories',
cfg.get(DEFAULTS_CONFIG_FILE_SECTION, 'TvCategories'))
except ConfigNoOption:
pass
try:
script.set('UpdateTimestamp',
cfg.get(DEFAULTS_CONFIG_FILE_SECTION, 'UpdateTimestamp'))
except ConfigNoOption:
pass
try:
script.set('UpdatePermissions',
cfg.get(DEFAULTS_CONFIG_FILE_SECTION, 'UpdatePermissions'))
except ConfigNoOption:
pass
try:
script.set('VideoPermissions',
cfg.get(DEFAULTS_CONFIG_FILE_SECTION, 'VideoPermissions'))
except ConfigNoOption:
pass
try:
script.set('TVShowProviders',
cfg.get(DEFAULTS_CONFIG_FILE_SECTION, 'TVShowProviders'))
except ConfigNoOption:
pass
try:
script.set('MovieProviders',
cfg.get(DEFAULTS_CONFIG_FILE_SECTION, 'MovieProviders'))
except ConfigNoOption:
pass
except ConfigException, e:
script.logger.warning(
'An exception occured parsing %s: %s' % (
DEFAULTS_CONFIG_FILE, str(e)),
)
# We always enter this part of the code, so we have to be
# careful to only set() values that have been set by an
# external switch. Otherwise we use defaults or what might
# already be resident in memory (environment variables).
_encoding = options.encoding
_language = options.language
_maxage = options.maxage
_force_encoding = options.force_encoding
_minsize = options.minsize
_minscore = options.minscore
_single_mode = options.single_mode is True
_overwrite = options.overwrite is True
_ignore_embedded = options.ignore_embedded is True
_basic_mode = options.basic_mode is True
_xrefpath = options.xrefpath
_force = options.force is True
_tidysub = options.tidysub is True
_providers = options.providers
_fetch_mode = options.fetch_mode
_addic7ed_user = options.addic7ed_user
_addic7ed_pass = options.addic7ed_pass
_opensubs_user = options.opensubs_user
_opensubs_pass = options.opensubs_pass
_notify_urls = options.notify_urls
_throttle = options.throttle
_threshold = options.threshold
if _maxage is not None:
try:
_maxage = str(abs(int(_maxage)))
script.set('MaxAge', _maxage)
if _maxage == '0':
# remove ambiguity; allow setting maxage to 0 (zero)
# Setting maxage to zero implies scanning everything;
# so... toggle the force switch (same thing - for now)
_force = True
except (ValueError, TypeError):
script.logger.error(
'An invalid `maxage` (%s) was specified.' % (_maxage)
)
exit(EXIT_CODE.FAILURE)
if _minsize is not None:
try:
_minsize = str(abs(int(_minsize)))
script.set('MinSize', _minsize)
except (ValueError, TypeError):
script.logger.error(
'An invalid `minsize` (%s) was specified.' % (_minsize)
)
exit(EXIT_CODE.FAILURE)
if _minscore is not None:
try:
_minscore = str(abs(int(_minscore)))
script.set('MinScore', _minscore)
except (ValueError, TypeError):
script.logger.error(
'An invalid `minscore` (%s) was specified.' % (_minscore)
)
exit(EXIT_CODE.FAILURE)
if _threshold is not None:
try:
_threshold = abs(int(_threshold))
script.set('ThrottleThreshold', _threshold)
except (ValueError, TypeError):
script.logger.error(
'An invalid `throttle-threshold` (%s) was specified.' % (_threshold)
)
exit(EXIT_CODE.FAILURE)
if _throttle is not None:
try:
_throttle = abs(int(_throttle))
script.set('Throttle', _throttle)
except (ValueError, TypeError):
script.logger.error(
'An invalid `throttle` (%s) was specified.' % (_throttle)
)
exit(EXIT_CODE.FAILURE)
if _overwrite:
script.set('Overwrite', True)
if _tidysub:
script.set('TidySub', True)
if _force_encoding:
script.set('ForceEncoding', _force_encoding.lower())
if _basic_mode:
script.set('SearchMode', SEARCH_MODE.BASIC)
if _xrefpath:
script.set('XRefPaths', _xrefpath)
if _ignore_embedded:
script.set('IgnoreEmbedded', True)
if _single_mode:
script.set('Single', True)
if _force:
script.set('Force', True)
if _providers:
script.set('Providers', _providers)
if _notify_urls:
script.set('NotifyURLs', _notify_urls)
if _language:
script.set('Languages', _language)
if _encoding:
script.set('SystemEncoding', _encoding)
if _fetch_mode:
if _fetch_mode.upper() in [ f.upper() for f in FETCH_MODES ]:
script.set('FetchMode', _fetch_mode.upper())
else:
script.logger.warning(
'Invalid FetchMode specified, using default: %s' %\
FETCH_MODE_DEFAULT)
script.set('FetchMode', FETCH_MODE_DEFAULT)
if _addic7ed_user and _addic7ed_pass:
script.set('Addic7edUser', _addic7ed_user)
script.set('Addic7edPass', _addic7ed_pass)
if _opensubs_user and _opensubs_pass:
script.set('OpenSubtitlesUser', _opensubs_user)
script.set('OpenSubtitlesPass', _opensubs_pass)
# Set some defaults if they are not already set
if script.get('ThrottleThreshold') is None:
script.set('ThrottleThreshold', DEFAULT_THROTTLE_THRESHOLD)
if script.get('Throttle') is None:
script.set('Throttle', DEFAULT_THROTTLE_WAITTIME)
if script.get('MaxAge') is None:
script.set('MaxAge', DEFAULT_MAXAGE)
if script.get('MinSize') is None:
script.set('MinSize', DEFAULT_MIN_VIDEO_SIZE_MB)
if script.get('MinScore') is None:
script.set('MinScore', DEFAULT_MIN_VIDEO_SCORE)
if script.get('Languages') is None:
# Force defaults if not set
script.set('Languages', DEFAULT_LANGUAGE)
if script.get('SystemEncoding') is None:
# Force defaults if not set
script.set('SystemEncoding', DEFAULT_SYSTEM_ENCODING)
if script.get('FetchMode') is None:
script.set('FetchMode', FETCH_MODE_DEFAULT)
# Generic Video Extensions
if not script.get('VideoExtensions'):
script.set('VideoExtensions', DEFAULT_EXTENSIONS)
if not script.get('ScanDirectories') and scandir:
# Finally set the directory the user specified for scanning
script.set('ScanDirectories', scandir)
if not script.script_mode and not script.get('ScanDirectories'):
# Provide some CLI help when ScanDirectories has been
# detected as not being identified
parser.print_help()
exit(1)
# Attach Subliminal logging to output by connecting to its namespace
logging.getLogger('subliminal').\
addHandler(script.logger.handlers[0])
logging.getLogger('subliminal').\
setLevel(script.logger.getEffectiveLevel())
# Attach Apprise logging to output by connecting to its namespace
logging.getLogger('apprise').\
addHandler(script.logger.handlers[0])
logging.getLogger('apprise').\
setLevel(script.logger.getEffectiveLevel())
# call run() and exit() using it's returned value
exit(script.run())
|
caronc/nzbget-subliminal
|
Subliminal.py
|
Python
|
gpl-3.0
| 102,462
|
[
"VisIt"
] |
e266fdd1bb439a57c4a80f644e180f24514edbe3c105436d786ecb532fe3584b
|
# Copyright (c) Charl P. Botha, TU Delft
# All rights reserved.
# See COPYRIGHT for details.
from module_base import ModuleBase
from module_mixins import ScriptedConfigModuleMixin
import module_utils
import vtk
import input_array_choice_mixin # need this for constants
reload(input_array_choice_mixin)
from input_array_choice_mixin import InputArrayChoiceMixin
class warpPoints(ScriptedConfigModuleMixin, InputArrayChoiceMixin,
ModuleBase):
_defaultVectorsSelectionString = 'Default Active Vectors'
_userDefinedString = 'User Defined'
def __init__(self, module_manager):
ModuleBase.__init__(self, module_manager)
InputArrayChoiceMixin.__init__(self)
self._config.scaleFactor = 1
configList = [
('Scale factor:', 'scaleFactor', 'base:float', 'text',
'The warping will be scaled by this factor'),
('Vectors selection:', 'vectorsSelection', 'base:str', 'choice',
'The attribute that will be used as vectors for the warping.',
(self._defaultVectorsSelectionString, self._userDefinedString))]
self._warpVector = vtk.vtkWarpVector()
ScriptedConfigModuleMixin.__init__(
self, configList,
{'Module (self)' : self,
'vtkWarpVector' : self._warpVector})
module_utils.setup_vtk_object_progress(self, self._warpVector,
'Warping points.')
self.sync_module_logic_with_config()
def close(self):
# we play it safe... (the graph_editor/module_manager should have
# disconnected us by now)
for input_idx in range(len(self.get_input_descriptions())):
self.set_input(input_idx, None)
# this will take care of all display thingies
ScriptedConfigModuleMixin.close(self)
# get rid of our reference
del self._warpVector
def execute_module(self):
self._warpVector.Update()
if self.view_initialised:
# second element in configuration list
choice = self._getWidget(1)
self.iac_execute_module(self._warpVector, choice, 0)
def get_input_descriptions(self):
return ('VTK points/polydata with vector attribute',)
def set_input(self, idx, inputStream):
if inputStream is None:
self._warpVector.SetInputConnection(0, None)
else:
self._warpVector.SetInput(inputStream)
def get_output_descriptions(self):
return ('Warped data',)
def get_output(self, idx):
# we only return something if we have something
if self._warpVector.GetNumberOfInputConnections(0):
return self._warpVector.GetOutput()
else:
return None
def logic_to_config(self):
self._config.scaleFactor = self._warpVector.GetScaleFactor()
# this will extract the possible choices
self.iac_logic_to_config(self._warpVector, 0)
def config_to_view(self):
# first get our parent mixin to do its thing
ScriptedConfigModuleMixin.config_to_view(self)
# the vector choice is the second configTuple
choice = self._getWidget(1)
self.iac_config_to_view(choice)
def config_to_logic(self):
self._warpVector.SetScaleFactor(self._config.scaleFactor)
self.iac_config_to_logic(self._warpVector, 0)
|
chrisidefix/devide
|
modules/filters/warpPoints.py
|
Python
|
bsd-3-clause
| 3,444
|
[
"VTK"
] |
0ab44018522e02f92f3b8a8df68ac5c5ec6cbc665676854ed42957077e4e2b47
|
# ----------------------------------------------------------------------------
# Copyright (c) 2013--, scikit-bio development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function
import warnings
import textwrap
import decorator
from ._exception import OverrideError
class _state_decorator(object):
""" Base class for decorators of all public functionality.
"""
_required_kwargs = ()
def _get_indentation_level(self, docstring_lines,
default_existing_docstring=4,
default_no_existing_docstring=0):
""" Determine the level of indentation of the docstring to match it.
The indented content after the first line of a docstring can
differ based on the nesting of the functionality being documented.
For example, a top-level function may have its "Parameters" section
indented four-spaces, but a method nested under a class may have
its "Parameters" section indented eight spaces. This function
determines the indentation level of the first non-whitespace line
following the initial summary line.
"""
# if there is no existing docstring, return the corresponding default
if len(docstring_lines) == 0:
return default_no_existing_docstring
# if there is an existing docstring with only a single line, return
# the corresponding default
if len(docstring_lines) == 1:
return default_existing_docstring
# find the first non-blank line (after the initial summary line) and
# return the number of leading spaces on that line
for line in docstring_lines[1:]:
if len(line.strip()) == 0:
# ignore blank lines
continue
else:
return len(line) - len(line.lstrip())
# if there is an existing docstring with only a single non-whitespace
# line, return the corresponding default
return default_existing_docstring
def _update_docstring(self, docstring, state_desc,
state_desc_prefix='State: '):
# Hande the case of no initial docstring
if docstring is None:
return "%s%s" % (state_desc_prefix, state_desc)
docstring_lines = docstring.split('\n')
docstring_content_indentation = \
self._get_indentation_level(docstring_lines)
# wrap lines at 79 characters, accounting for the length of
# docstring_content_indentation and start_desc_prefix
len_state_desc_prefix = len(state_desc_prefix)
wrap_at = 79 - (docstring_content_indentation + len_state_desc_prefix)
state_desc_lines = textwrap.wrap(state_desc, wrap_at)
# The first line of the state description should start with
# state_desc_prefix, while the others should start with spaces to align
# the text in this section. This is for consistency with numpydoc
# formatting of deprecation notices, which are done using the note
# Sphinx directive.
state_desc_lines[0] = '%s%s%s' % (' ' * docstring_content_indentation,
state_desc_prefix,
state_desc_lines[0])
header_spaces = ' ' * (docstring_content_indentation +
len_state_desc_prefix)
for i, line in enumerate(state_desc_lines[1:], 1):
state_desc_lines[i] = '%s%s' % (header_spaces, line)
new_doc_lines = '\n'.join(state_desc_lines)
docstring_lines[0] = '%s\n\n%s' % (docstring_lines[0], new_doc_lines)
return '\n'.join(docstring_lines)
def _validate_kwargs(self, **kwargs):
for required_kwarg in self._required_kwargs:
if required_kwarg not in kwargs:
raise ValueError('%s decorator requires parameter: %s' %
(self.__class__, required_kwarg))
class stable(_state_decorator):
""" State decorator indicating stable functionality.
Used to indicate that public functionality is considered ``stable``,
meaning that its API will be backward compatible unless it is deprecated.
Decorating functionality as stable will update its doc string to indicate
the first version of scikit-bio when the functionality was considered
stable.
Parameters
----------
as_of : str
First release version where functionality is considered to be stable.
See Also
--------
experimental
deprecated
Examples
--------
>>> @stable(as_of='0.3.0')
... def f_stable():
... \"\"\" An example stable function.
... \"\"\"
... pass
>>> help(f_stable)
Help on function f_stable in module skbio.util._decorator:
<BLANKLINE>
f_stable()
An example stable function.
<BLANKLINE>
State: Stable as of 0.3.0.
<BLANKLINE>
"""
_required_kwargs = ('as_of', )
def __init__(self, *args, **kwargs):
self._validate_kwargs(**kwargs)
self.as_of = kwargs['as_of']
def __call__(self, func):
state_desc = 'Stable as of %s.' % self.as_of
func.__doc__ = self._update_docstring(func.__doc__, state_desc)
return func
class experimental(_state_decorator):
""" State decorator indicating experimental functionality.
Used to indicate that public functionality is considered experimental,
meaning that its API is subject to change or removal with little or
(rarely) no warning. Decorating functionality as experimental will update
its doc string to indicate the first version of scikit-bio when the
functionality was considered experimental.
Parameters
----------
as_of : str
First release version where feature is considered to be experimental.
See Also
--------
stable
deprecated
Examples
--------
>>> @experimental(as_of='0.3.0')
... def f_experimental():
... \"\"\" An example experimental function.
... \"\"\"
... pass
>>> help(f_experimental)
Help on function f_experimental in module skbio.util._decorator:
<BLANKLINE>
f_experimental()
An example experimental function.
<BLANKLINE>
State: Experimental as of 0.3.0.
<BLANKLINE>
"""
_required_kwargs = ('as_of', )
def __init__(self, *args, **kwargs):
self._validate_kwargs(**kwargs)
self.as_of = kwargs['as_of']
def __call__(self, func):
state_desc = 'Experimental as of %s.' % self.as_of
func.__doc__ = self._update_docstring(func.__doc__, state_desc)
return func
class deprecated(_state_decorator):
""" State decorator indicating deprecated functionality.
Used to indicate that a public class or function is deprecated, meaning
that its API will be removed in a future version of scikit-bio. Decorating
functionality as experimental will update its doc string to indicate the
first version of scikit-bio when the functionality was deprecated, the
first version of scikit-bio when the functionality will no longer exist,
and the reason for deprecation of the API. It will also cause calls to the
API to raise a ``DeprecationWarning``.
Parameters
----------
as_of : str
First development version where feature is considered to be deprecated.
until : str
First release version where feature will no longer exist.
reason : str
Brief description of why the API is deprecated.
See Also
--------
stable
experimental
Examples
--------
>>> @deprecated(as_of='0.3.0', until='0.3.3',
... reason='Use skbio.g().')
... def f_deprecated(x, verbose=False):
... \"\"\" An example deprecated function.
... \"\"\"
... pass
>>> help(f_deprecated)
Help on function f_deprecated in module skbio.util._decorator:
<BLANKLINE>
f_deprecated(x, verbose=False)
An example deprecated function.
<BLANKLINE>
.. note:: Deprecated as of 0.3.0 for removal in 0.3.3. Use skbio.g().
<BLANKLINE>
"""
_required_kwargs = ('as_of', 'until', 'reason')
def __init__(self, *args, **kwargs):
self._validate_kwargs(**kwargs)
self.as_of = kwargs['as_of']
self.until = kwargs['until']
self.reason = kwargs['reason']
def __call__(self, func, *args, **kwargs):
state_desc = 'Deprecated as of %s for removal in %s. %s' %\
(self.as_of, self.until, self.reason)
func.__doc__ = self._update_docstring(func.__doc__, state_desc,
state_desc_prefix='.. note:: ')
def wrapped_f(*args, **kwargs):
warnings.warn('%s is deprecated as of scikit-bio version %s, and '
'will be removed in version %s. %s' %
(func.__name__, self.as_of, self.until, self.reason),
DeprecationWarning)
# args[0] is the function being wrapped when this is called
# after wrapping with decorator.decorator, but why???
return func(*args[1:], **kwargs)
return decorator.decorator(wrapped_f, func)
# Adapted from http://stackoverflow.com/a/8313042/579416
@experimental(as_of="0.4.0")
def overrides(interface_class):
"""Decorator for class-level members.
Used to indicate that a member is being overridden from a specific parent
class. If the member does not have a docstring, it will pull one from the
parent class. When chaining decorators, this should be first as it is
relatively nondestructive.
Parameters
----------
interface_class : class
The class which has a member overridden by the decorated member.
Returns
-------
function
The function is not changed or replaced.
Raises
------
OverrideError
If the `interface_class` does not possess a member of the same name
as the decorated member.
"""
def overrider(method):
if method.__name__ not in dir(interface_class):
raise OverrideError("%r is not present in parent class: %r." %
(method.__name__, interface_class.__name__))
if method.__doc__ is None:
method.__doc__ = getattr(interface_class, method.__name__).__doc__
return method
return overrider
class classproperty(property):
"""Decorator for class-level properties.
Supports read access only. The property will be read-only within an
instance. However, the property can always be redefined on the class, since
Python classes are mutable.
Parameters
----------
func : function
Method to make a class property.
Returns
-------
property
Decorated method.
Raises
------
AttributeError
If the property is set on an instance.
"""
@experimental(as_of="0.4.0")
def __init__(self, func):
name = func.__name__
doc = func.__doc__
super(classproperty, self).__init__(classmethod(func))
self.__name__ = name
self.__doc__ = doc
@experimental(as_of="0.4.0")
def __get__(self, cls, owner):
return self.fget.__get__(None, owner)()
@experimental(as_of="0.4.0")
def __set__(self, obj, value):
raise AttributeError("can't set attribute")
|
Achuth17/scikit-bio
|
skbio/util/_decorator.py
|
Python
|
bsd-3-clause
| 11,769
|
[
"scikit-bio"
] |
4091320b0196e9e7716814c5ae0532e1daf8f4c25a5eac30b60ace8b93ce0091
|
"""Expression for IPI format.
IPI is nearly swissprot, but contains some differents which makes the
Swissprot parsers choke.
"""
import warnings
warnings.warn("Bio.expressions was deprecated, as it does not work with recent versions of mxTextTools. If you want to continue to use this module, please get in contact with the Biopython developers at [email protected] to avoid permanent removal of this module from Biopython", DeprecationWarning)
from Bio import Std
import Martel
from Martel import Time
import sprot40
# The ID line contains a versioned period number
ID_exp = Martel.Group("ID",
Martel.Str("ID ") + \
Std.dbid(Martel.Group("entry_name", Martel.Re("[\w.]+")),
{"type": "primary", "dbname": "sp"}) + \
Martel.Spaces() + \
Martel.Word("data_class_table") + \
Martel.Str(";") + Martel.Spaces() + \
Martel.Word("molecule_type") + \
Martel.Str(";") + Martel.Spaces() + \
Martel.Digits("sequence_length") + \
Martel.Str(" AA.") + \
Martel.AnyEol()
)
# The DT formatted lines look different, and there is not
# a third DT line for annotations
# DT 04-MAR-2003 (IPI Human rel. 2.17, Created)
# DT 04-MAR-2003 (IPI Human rel. 2.17, Last sequence update)
DT_created_exp = (Martel.Str("DT ") +
Time.make_expression("%(DD)-%(Jan)-%(YYYY)") + \
Martel.Str(" (IPI Human rel. ") + \
Martel.Float("release") + \
Martel.Str(", Created)") + Martel.AnyEol())
DT_seq_update_exp = (Martel.Str("DT ") +
Time.make_expression("%(DD)-%(Jan)-%(YYYY)") + \
Martel.Str(" (IPI Human rel. ") + \
Martel.Float("release") + \
Martel.Str(", Last sequence update)") + Martel.AnyEol())
DT_ann_update_exp = (Martel.Str("DT ") +
Time.make_expression("%(DD)-%(Jan)-%(YYYY)") + \
Martel.Str(" (IPI Human rel. ") + \
Martel.Float("release") + \
Martel.Str(", Last annotation update)") + Martel.AnyEol())
replacements = [
("ID", ID_exp),
("DT_created", DT_created_exp),
("DT_seq_update", DT_seq_update_exp),
("DT_ann_update", Martel.Opt(DT_ann_update_exp))
]
record = Martel.replace_groups(sprot40.record, replacements)
format_expression = Martel.replace_groups(
sprot40.format_expression, replacements)
format = Martel.replace_groups(sprot40.format, replacements)
|
dbmi-pitt/DIKB-Micropublication
|
scripts/mp-scripts/Bio/expressions/swissprot/ipi.py
|
Python
|
apache-2.0
| 2,637
|
[
"Biopython"
] |
83aeb52de2d6e426df0516880b524f4e9f19b8b0524620da8da804a82b6b827b
|
from __future__ import division
from __future__ import print_function
from mpop.satellites import GeostationaryFactory
from mpop.projector import get_area_def
import datetime
import numpy as np
# in this example we read parallax corrected data
# from an netCDF file, do some modifications to it.
# you could also use another data source,
# or another modification, e.g. Lagrangian movement.
# Afterwards, we create another geostationaryFactory object
# and demonstrate how to add an artificial channel
## uncomment these two lines for more debugging information
from mpop.utils import debug_on
debug_on()
if False:
from my_msg_module import get_last_SEVIRI_date
time_slot = get_last_SEVIRI_date(True, delay=10)
else:
import sys
if len(sys.argv) <= 2:
time_slot = datetime.datetime(2015, 7, 7, 12, 00)
else:
# python
year = int(sys.argv[1])
month = int(sys.argv[2])
day = int(sys.argv[3])
hour = int(sys.argv[4])
minute = int(sys.argv[5])
timeslot = datetime.datetime(year, month, day, hour, minute)
print (" ")
print ('*** load data for time:', str(time_slot))
#global_data = GeostationaryFactory.create_scene("Meteosat-10", "", "seviri", time_slot)
global_data = GeostationaryFactory.create_scene("Meteosat-9", "", "seviri", time_slot)
#global_data = GeostationaryFactory.create_scene("Meteosat-8", "", "seviri", time_slot)
from my_composites import get_image
obj_image = get_image(global_data, 'HRoverview')
print ("... read following channels: ", obj_image.prerequisites)
# read parallax corrected data from netCDF
global_data.load(obj_image.prerequisites, reader_level="seviri-level9")
print (" ")
print ('*** some info about the loaded data')
print (global_data)
# data manipulation, we move by 100pixels to the East and 50 to the South
dx=50
dy=100
HRV = np.roll(global_data['HRV'].data , dx,axis=0)
HRV = np.roll(HRV , dy,axis=1)
VIS006 = np.roll(global_data['VIS006'].data, dx,axis=0)
VIS006 = np.roll(VIS006 , dy,axis=1)
VIS008 = np.roll(global_data['VIS008'].data, dx,axis=0)
VIS008 = np.roll(VIS008, dy,axis=1)
IR_108 = np.roll(global_data['IR_108'].data, dx,axis=0)
IR_108 = np.roll(IR_108, dy,axis=1)
# create another geostat object
data = GeostationaryFactory.create_scene("Meteosat-9", "", "seviri", time_slot)
from mpop.channel import Channel
# external definiton of the area
from mpop.projector import get_area_def
area = 'ccs4'
area_def = get_area_def(area)
print (" ")
print ('*** some info about the area definition')
print (area_def)
# print (area_def.proj_id) # 'somerc'
# print (area_def.proj_dict) # {'ellps': 'bessel', 'k_0': '1', 'lat_0': '46.9524055555556', 'lon_0': '7.43958333333333', 'proj': 'somerc', 'x_0': '600000', 'y_0': '200000'}
# print (area_def.aex) # (255000.0, -21000.0, 965000.0, 330000.0)
# print (area_def.x_size) # 710
# print (area_def.y_size) # 640 -> this is changed later!
create_subregion=False
if create_subregion:
y1=150
y2=500
HRV = HRV[y1:y2,:]
VIS006 = VIS006[y1:y2,:]
VIS008 = VIS008[y1:y2,:]
IR_108 = IR_108[y1:y2,:]
# print (HRV.shape) # (350, 710)
# get_area_extent_for_subset(self, row_LR, col_LR, row_UL, col_UL):
aex = area_def.get_area_extent_for_subset(y2,0,y1,area_def.x_size-1)
from pyresample.geometry import AreaDefinition
area = "on_the_fly"
# redefine area_def (using still the original area_def on the right side)
area_def = AreaDefinition(area, area, area_def.proj_id, area_def.proj_dict, area_def.x_size, y2-y1, aex)
print (" ")
print ('*** some info about the NEW area definition')
print (area_def)
# defining a new channels named 'var', the wavenlegth range is not that critial (for radar data I usually use [0.,0.,0.])
data.channels.append(Channel(name='var', wavelength_range=[0.0,0.0,0.0], data=HRV ))
data['var'].area = area
data['var'].area_def = area_def
# here we replace the already defined channels
data['HRV'].data = HRV
data['HRV'].area = area
data['HRV'].area_def = area_def
data['VIS006'].data = VIS006
data['VIS006'].area = area
data['VIS006'].area_def = area_def
data['VIS008'].data = VIS008
data['VIS008'].area = area
data['VIS008'].area_def = area_def
data['IR_108'].data = IR_108
data['IR_108'].area = area
data['IR_108'].area_def = area_def
print (" ")
print ('*** some info about the manipulated data')
print (data)
print (" ")
print ('*** create the image')
img = data.image.hr_overview()
#if True:
# img.show()
#else:
# filename=time_slot.strftime('MSG_'+chn+'-'+area+'_%y%m%d%H%M.png')
# img.save(filename)
PIL_image=img.pil_image()
add_map_overlay=True
if add_map_overlay:
print (" ")
print ('*** add the map overlap')
from pycoast import ContourWriterAGG
cw = ContourWriterAGG('/opt/users/common/shapes/')
# define area
proj4_string = area_def.proj4_string
# e.g. proj4_string = '+proj=geos +lon_0=0.0 +a=6378169.00 +b=6356583.80 +h=35785831.0'
area_extent = area_def.area_extent
# e.g. area_extent = (-5570248.4773392612, -5567248.074173444, 5567248.074173444, 5570248.4773392612)
area_tuple = (proj4_string, area_extent)
from plot_msg import add_borders_and_rivers
## possible resolutions
## f full resolution: Original (full) data resolution.
## h high resolution: About 80 % reduction in size and quality.
## i intermediate resolution: Another ~80 % reduction.
## l low resolution: Another ~80 % reduction.
## c crude resolution: Another ~80 % reduction.
add_borders_and_rivers(PIL_image, cw, area_tuple,
add_borders=True, border_color='red',
add_rivers=False, river_color='blue',
resolution='i', verbose=False)
if True:
PIL_image.show()
print ("*** show image in x-Window ")
else:
filename=time_slot.strftime('MSG_'+chn+'-'+area+'_%y%m%d%H%M_shifted.png')
PIL_image.save(filename)
print ("*** display "+filename)
|
meteoswiss-mdr/monti-pytroll
|
scripts/demo_add_channel.py
|
Python
|
lgpl-3.0
| 6,339
|
[
"NetCDF"
] |
d5e0f62df8ded3a8019c28bc63c0585689c6dc7163751b537ae1d2e68666d2b6
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#==============================================================================
# Copyright 2012 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Amazon Software License (the "License"). You may not use
# this file except in compliance with the License. A copy of the License is
# located at
#
# http://aws.amazon.com/asl/
#
# or in the "license" file accompanying this file. This file is distributed on
# an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, express or
# implied. See the License for the specific language governing permissions
# and limitations under the License.
#==============================================================================
from lib.utility import misc
from lib.utility.basetype import OrderedEnum
from scli.constants import EbCliVersion, ParameterName
#----------------------------------------------
# Command line arguments
#----------------------------------------------
class EBSCliAttr(object):
Name = u'AWS Elastic Beanstalk Command Line Interface'
Version = u'AWS Elastic Beanstalk Command Line Interface ' + misc.to_unicode(EbCliVersion)
Usage = u'%(prog)s {COMMAND} [options]'
ErrorMsg =u'Error: {0}\n{1}Try {2} --help for more information.\n'
InvalidCommand = u'Invalid command: {0}'
CommandType = OrderedEnum([
u'INIT',
u'BRANCH',
u'START',
u'STATUS',
u'UPDATE',
u'STOP',
u'DELETE',
])
""" Define command line argument switch """
CLISwitch = {
ParameterName.Command : u'command',
ParameterName.AwsCredentialFile : u'aws-credential-file',
ParameterName.AwsAccessKeyId : u'access-key-id',
ParameterName.AwsSecretAccessKey : u'secret-key',
ParameterName.Region : u'region',
ParameterName.ServiceEndpoint : u'url',
ParameterName.SolutionStack : u'solution-stack',
ParameterName.ApplicationName : u'application-name',
ParameterName.ApplicationVersionName : u'version-label',
ParameterName.EnvironmentName : u'environment-name',
ParameterName.WaitForFinishTimeout : u'polling-timeout',
ParameterName.Force : u'force',
ParameterName.Verbose : u'verbose',
}
""" Define help message for each command line argument """
CLISwitchMsg = {
ParameterName.Command : u'',
ParameterName.AwsCredentialFile : u'File location where your AWS credentials ' \
'are saved. Use the environment variable AWS_CREDENTIAL_FILE to set the ' \
'file location.',
ParameterName.AwsAccessKeyId : u'Your AWS Access Key ID.',
ParameterName.AwsSecretAccessKey : u'Your AWS Secret Key.',
ParameterName.Region : u'AWS Elastic Beanstalk region (i.e., us-east-1).',
ParameterName.ServiceEndpoint : u'AWS Elastic Beanstalk endpoint (i.e., ' \
'elasticbeanstalk.us-east-1.amazonaws.com).',
ParameterName.SolutionStack : u'Solution stack used as an application ' \
'container type.',
ParameterName.ApplicationName : u'The name of the application to deploy.',
ParameterName.ApplicationVersionName : u'The name of the application version ' \
'to deploy.',
ParameterName.EnvironmentName : u'A unique name for the environment deployment. ' \
'The environment name is used in the application URL.',
ParameterName.WaitForFinishTimeout : u'Number of seconds before a start or stop ' \
'operation times out when polling for environment status. ' \
'The default is {0} second.',
ParameterName.Force : u'Skip confirmation prompt.',
ParameterName.Verbose : u'Display verbose information.',
}
#----------------------------------------------
# Terminals
#----------------------------------------------
class TerminalMessage(object):
CurrentValue = u' (current value is "{0}")'
AutoGeneratedValue = u' (auto-generated value is "{0}")'
SingleChoice = u'Select: '
AvailableRegion = u'Available service regions are:'
AvailableSolutionStack = u'Available solution stacks are:'
ConfirmToProceed = u'Are you sure? [y/n]: '
AWSKeyLocateHelp = u'To get your AWS Access Key ID and Secret Access Key, \n visit ' \
'"https://aws-portal.amazon.com/gp/aws/securityCredentials".'
CopyDefaultToBranch = u'Do you want to copy the settings from environment "{0}" for the new branch? [y/n]: '
NoBranchToRegister = u"The branch doesn't exist."
FallBackToDefaultBranch = u'The current branch "{0}" is not associated with an Elastic Beanstalk '\
'environment. Call "eb branch" to set up a new environment for this branch. '\
'\nProceeding with default settings.'
""" Define prompt message for terminal interface """
TerminalPromptAskingMessage = {
ParameterName.AwsAccessKeyId : u'Enter your AWS Access Key ID{0}: ',
ParameterName.AwsSecretAccessKey : u'Enter your AWS Secret Access Key{0}: ',
ParameterName.Region : u'Select an AWS Elastic Beanstalk service region{0}.',
ParameterName.ServiceEndpoint : u'Enter AWS Elastic Beanstalk service endpoint{0}: ',
ParameterName.SolutionStack : u'Select a solution stack{0}.',
ParameterName.ApplicationName : u'Enter an AWS Elastic Beanstalk application name{0}: ',
ParameterName.ApplicationVersionName : u'Enter an AWS Elastic Beanstalk application version name{0}: ',
ParameterName.EnvironmentName : u'Enter an AWS Elastic Beanstalk environment name{0}: ',
ParameterName.RdsEnabled : u'Create an RDS DB Instance? [y/n]{0}: ',
ParameterName.RdsSourceSnapshotName : u'Enter your snapshot ID: ',
ParameterName.RdsMasterPassword : u'Enter an RDS DB master password{0}: ',
ParameterName.RdsDeletionPolicy: u'It you terminate your environment, your RDS DB '\
'Instance will be deleted and you will lose your data.\nCreate snapshot? [y/n]{0}: ',
}
TerminalPromptSettingParameterMessage = {
ParameterName.EnvironmentName : u'Using auto-generated AWS Elastic Beanstalk environment name: "{0}".',
ParameterName.Region : u'Setting AWS Elastic Beanstalk region to "{0}".',
ParameterName.ServiceEndpoint : u'Setting AWS Elastic Beanstalk service endpoint to "{0}".',
ParameterName.RdsEndpoint : u'Setting AWS Relational Database Service endpoint to "{0}".',
ParameterName.SolutionStack : u'Setting solution stack to "{0}".',
ParameterName.RdsSnippetUrl : u'Setting RDS extension url to "{0}".',
ParameterName.CurrentBranch : u'The current branch is "{0}".',
}
TerminalPromptUsingParameterMessage = {
ParameterName.AwsAccessKeyId : u'AWS Access Key ID: ***',
ParameterName.AwsSecretAccessKey : u'AWS Secret Access Key: ***',
ParameterName.Region : u'AWS Elastic Beanstalk service region: {0}.',
ParameterName.ServiceEndpoint : u'AWS Elastic Beanstalk service endpoint: {0}.',
ParameterName.SolutionStack : u'Solution stack {0}.',
ParameterName.ApplicationName : u'AWS Elastic Beanstalk application: {0}.',
ParameterName.ApplicationVersionName : u'AWS Elastic Beanstalk application version : {0}.',
ParameterName.EnvironmentName : u'AWS Elastic Beanstalk environment: {0}.',
}
class RdsTerminalMessage(object):
RdsSnapshotNameSelect = u'Create an RDS BD Instance from{0}:'
NoSnapshot = u'No snapshot'
OtherSnapshot = u'Other snapshot'
PasswordConfirmation = u'Retype password to confirm: '
PasswordNotMatch = u'Error: passwords do not match.'
PasswordMatchFailure = u'Rerun "eb init" to set up configuration.'
PasswordWrongFormat = u'Error: password must contain from 8 to 41 alphanumeric characters.'
#----------------------------------------------
# Operation
#----------------------------------------------
class BaseOpMessage(object):
WaitForEnv = u'Waiting for environment "{0}" to {1}.'
EnvNotExist = u'Environment "{0}" does not exist.'
Running = u'Running.'
UserCanInterrupt = u'Press Ctrl+C to stop polling. The operation will continue on the server side.'
class CreateApplicationOpMessage(object):
Start = u'Now creating application "{0}".'
AlreadyExist = u'Starting application "{0}".'
Succeed = u'Created application "{0}".'
class DeleteApplicationOpMessage(object):
Start = u'Now deleting application "{0}".'
AlreadyDelete = u'Application "{0}" has already been deleted.'
Succeed = u'Deleted application "{0}".'
class DescribeEnvironmentOpMessage(object):
Start = u'Retrieving status of environment "{0}".'
NoEnvironment = u'Environment "{0}" is not running.'
Result = u'URL\t: {0}\nStatus\t: {1}\nHealth\t: {2}'
Detail = u'Environment Name:\t{0}\nEnvironment ID:\t\t{1}\nSolution Stack:\t\t{2}\n'\
'Version Label:\t\t{3}\nDate Created:\t\t{4}\nDate Updated:\t\t{5}\nDescription:\t\t{6}'
RdsInfo = u'\nRDS Database: {0} | {1}:{2}'
RdsDetail = u'Database Engine:\t{0}\nAllocated Storage:\t{1}\nInstance Class:\t\t{2}\n'\
'Multi AZ:\t\t{3}\nMaster Username:\t{4}\nCreation Time:\t\t{5}\n'\
'DB Instance Status:\t{6}'
class CreateEnvironmentOpMessage(object):
Start = u'Now creating Environment "{0}".'
AlreadyExist = u'Environment "{0}" already exists. Skipped creating.'
UsingOptionSetting = u'Using option settings retrieved from file "{0}" to create environment.'
Succeed = u'Started creating environment.'
WaitAfterLaunch = u'Waiting for environment "{0}" to launch.'
class WaitForCreateEnvironmentFinishOpMessage(object):
Action = u'launch'
Succeed = u'Creation of environment "{0}" has completed.'
Timeout = u'Timeout for waiting Environment {0} finishing launch.'
Result = u'Application is available at "{0}".'
class TerminateEnvironmentOpMessage(object):
Start = u'Now terminating environment "{0}".'
Succeed = u'Stopping environment "{0}". This may take a few minutes.'
class WaitForTerminateEnvironmentFinishOpMessage(object):
Action = u'stop'
Succeed = u'Stop of environment "{0}" has completed.'
Timeout = u'Timeout for waiting Environment "{0}" finishing termination.'
Status = u'Environment Status is "{0}". Health is "{1}".'
Result = u'Environment status is: "{0}".'
class UpdateEnvironmentOptionSettingOpMessage(object):
Continue = u'Continue? [y/n]: '
Start = u'Now updating environment "{0}"\'s option settings.'
UsingOptionSetting = u'Using option settings retrieved from file "{0}" to update environment.'
Succeed = u'Updating environment "{0}". This may take a few minutes.'
class WaitForUpdateEnvOptionSettingFinishOpMessage(object):
Action = u'updating'
Succeed = u'Update of environment "{0}" has completed.'
Timeout = u'Timeout for waiting Environment "{0}" finishing update.'
Result = u'Environment URL is "{0}", status is: "{1}", health is "{2}".'
class CreateApplicationVersionOpMessage(object):
Start = u'Now creating application version "{0}" using the sample application.'
AlreadyExist = u'Application version "{0}" already exists. Skipped creating.'
HasDefaultAppSource = u'Specified solution stack "{0}" has default application.'
CannotRecognize = u'Cannot infer sample app for solution stack "{0}".'
Skip = u'Skipped creating sample application version.'
Succeed = u'Created application version "{0}".'
class ListSolutionStackOpMessage(object):
Start = u'Retrieving available solution stacks.'
Result = u'Available solution stacks: "{0}"'
class SaveConfigurationSettingOpMessage(object):
Start = u'Now retrieving options settings for environment "{0}".'
Succeed = u'Saved option settings in "{0}"'
class WriteAwsCredentialFileOpMessage(object):
Confirm = u'Confirm to create AWS Credential file "{0}"? [y/n]'
Succeed = u'Updated AWS Credential file at "{0}".'
class AskConfirmationOpMessage(object):
CommandConfirmation ={
CommandType.STOP : u'Terminate environment? [y/n]: ',
CommandType.DELETE : u'Delete application? [y/n]: ',
CommandType.UPDATE : u'Update environment? [y/n]: ',
}
CommandWarning ={
CommandType.STOP : u'If you terminate your environment, your RDS DB Instance will be'\
' deleted and you will lose your data.',
CommandType.DELETE : u'If you delete your application, your RDS DB Instance will be'\
' deleted and you will lose your data.',
CommandType.UPDATE : u'If you udpate your environment, your RDS DB Instance will be'\
' deleted and you will lose your data.',
}
#----------------------------------------------
# Validation
#----------------------------------------------
class ValidationMessage(object):
EmptyString = u"{0}'s value cannot be empty."
InvalidNumber = u'Invalid number "{0}"'
NumberTooBig = u'{0} is bigger than allowed range'
NumberTooSmall = u'{0} is smaller than allowed range"'
InvalidSolutionStack = u'Specified solution stack "{0}" is invalid.'
InvalidRegion = u'Specified region "{0}" is invalid.'
ValidateSettingError = u'{0}: Namespace "{1}", Option name "{2}"\n {3}'
#----------------------------------------------
# Configuration file
#----------------------------------------------
class GeneralFileMessage(object):
RenameFile = u'Renamed file "{0}" to "{1}".'
RotationNameNotAvailable = u'File {0} cannot be rotated because no rotation name is available.'
class CredentialFileMessage(object):
CannotFind = u'The AWS credential file, "{0}", cannot be found. Use the "{1}" command to create it.'
CorrectionSuggestion = u'The AWS credential, "{0}", could not be parsed. ' \
'Delete the file, or try the {1} command to recreate.'
class CredentialFileErrorMessage(object):
ReadError = u'Encountered error while reading from file {0}.'
WriteError = u'Encountered error while writing file {0}.'
class ConfigFileMessage(object):
CannotFind = u'The configuration file, "{0}", cannot be found. Use the "{1}" command to create it.'
CorrectionSuggestion = u'The configuration file, "{0}", could not be parsed. '\
'Delete the file, or try the "{1}" command to reconfigure.'
BranchResetWarning = u'Existing branches have been reset due to a change in the application name, '\
'region, or solution stack.'
class ConfigFileErrorMessage(object):
ReadError = u'Encountered error while reading from file {0}.'
WriteError = u'Encountered error while saving file {0}.'
PermissionError = u'Warning. File "{0}" has incorrect access permission '\
'and can be accessed by other users.'
class DevToolsMessage(object):
GitCommandError = u'Unable to call Git. Install Git to deploy to Elastic Beanstalk.'
GitRepoNotExist = u'Warning: Your directory has not been initialized as a Git repository. '\
'To create a local Git repository, run "git init" and then re-run the "eb init" command.'
InitError = u'Failed to update local Git repository setting.'
ExecutionError = u'Failed to update local Git configuration. '\
'Follow the instructions at "{0}" to set up your Git repository, and then try again.'
FileMissingError = u'Cannot run AWS Dev Tools initialization script at "{0}".'
class OptionSettingFileMessage(object):
CannotFind = u'The option setting file, "{0}", cannot be found. Use the "{1}" command to create it.'
CorrectionSuggestion = u'The option setting, "{0}", could not be parsed. ' \
'Delete the file, or try the {1} command to recreate.'
class OptionSettingFileErrorMessage(object):
ReadError = u'Encountered error while reading from file {0}.'
WriteError = u'Encountered error while saving file {0}.'
|
JoaoVasques/aws-devtool
|
eb/macosx/python2.7/scli/resources.py
|
Python
|
apache-2.0
| 16,173
|
[
"VisIt"
] |
9844b927a3fd99604bfbc4d03f232cb09da7cf9889a50d03c2ef01175ab3604c
|
##
# Copyright 2013-2020 Ghent University
#
# This file is part of EasyBuild,
# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),
# with support of Ghent University (http://ugent.be/hpc),
# the Flemish Supercomputer Centre (VSC) (https://www.vscentrum.be),
# Flemish Research Foundation (FWO) (http://www.fwo.be/en)
# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).
#
# https://github.com/easybuilders/easybuild
#
# EasyBuild is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation v2.
#
# EasyBuild is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.
##
"""
EasyBuild support for DL_POLY Classic, implemented as an easyblock
@author: Jens Timmerman (Ghent University)
@author: Kenneth Hoste (Ghent University)
"""
import glob
import os
import shutil
from easybuild.tools.build_log import EasyBuildError
from easybuild.tools.filetools import copy_file, copy_dir
from easybuild.tools.run import run_cmd
from easybuild.easyblocks.generic.configuremake import ConfigureMake
class EB_DL_underscore_POLY_underscore_Classic(ConfigureMake):
"""Support for building and installing DL_POLY Classic."""
def __init__(self, *args, **kwargs):
"""Easyblock constructor; initialize class variables."""
super(EB_DL_underscore_POLY_underscore_Classic, self).__init__(*args, **kwargs)
# check whether PLUMED is listed as a dependency
self.with_plumed = 'PLUMED' in [dep['name'] for dep in self.cfg['dependencies']]
# create PLUMED patch in prepare_step rather than patch_step,
# so we can rely on being in the unpacked source directory
def prepare_step(self, *args, **kwargs):
"""Generate PLUMED patch if PLUMED is listed as a dependency."""
super(EB_DL_underscore_POLY_underscore_Classic, self).prepare_step(*args, **kwargs)
if self.with_plumed:
# see https://groups.google.com/d/msg/plumed-users/cWaIDU5F6Bw/bZUW3J9cCAAJ
diff_pat = 'dlpoly-*.diff'
try:
diff_hits = glob.glob(os.path.join(self.builddir, diff_pat))
except OSError as err:
raise EasyBuildError("Failed to find list of files/dirs that match '%s': %s", diff_pat, err)
if len(diff_hits) == 1:
plumed_patch = diff_hits[0]
elif not self.dry_run:
raise EasyBuildError("Expected to find exactly one match for '%s' in %s, found: %s",
diff_pat, self.builddir, diff_hits)
if not self.dry_run:
try:
os.rename('source', 'srcmod')
except OSError as err:
raise EasyBuildError("Failed to move 'source' directory to 'srcmod': %s", err)
engine = os.path.splitext(os.path.basename(plumed_patch))[0]
cmd = "plumed-patch -p --runtime -e %s -d %s" % (engine, plumed_patch)
run_cmd(cmd, log_all=True, simple=True)
def configure_step(self):
"""Copy the makefile to the source directory and use MPIF90 to do a parrallel build"""
self.cfg.update('buildopts', 'LD="$MPIF90 -o" FC="$MPIF90 -c" par')
if self.with_plumed:
source_dir = 'srcmod'
self.cfg.update('buildopts', 'LDFLAGS="${LDFLAGS} -lplumed -ldl"')
else:
source_dir = 'source'
copy_file(os.path.join('build', 'MakePAR'), os.path.join(source_dir, 'Makefile'))
try:
os.chdir(source_dir)
except OSError as err:
raise EasyBuildError("Failed to change to %s: %s", source_dir, err)
def install_step(self):
"""Copy the executables to the installation directory"""
self.log.debug("copying %s/execute to %s, (from %s)", self.cfg['start_dir'], self.installdir, os.getcwd())
# create a 'bin' subdir, this way we also get $PATH to be set correctly automatically
install_path = os.path.join(self.cfg['start_dir'], 'execute')
bin_path = os.path.join(self.installdir, 'bin')
copy_dir(install_path, bin_path)
def sanity_check_step(self):
"""Custom sanity check step for DL_POLY Classic"""
custom_paths = {
'files': ['bin/DLPOLY.X'],
'dirs': [],
}
super(EB_DL_underscore_POLY_underscore_Classic, self).sanity_check_step(custom_paths=custom_paths)
|
pescobar/easybuild-easyblocks
|
easybuild/easyblocks/d/dl_poly_classic.py
|
Python
|
gpl-2.0
| 4,824
|
[
"DL_POLY"
] |
623f7033a76628395955c57fc2c4b88de9f71de85b417e2d6d9f3e0729e87eab
|
"""Handle extraction of final files from processing pipelines into storage.
"""
import datetime
import os
import toolz as tz
from bcbio import log, utils
from bcbio.upload import shared, filesystem, galaxy, s3, irods
from bcbio.pipeline import run_info
from bcbio.variation import vcfutils
import bcbio.pipeline.datadict as dd
from bcbio.rnaseq.ericscript import EricScriptConfig
_approaches = {"filesystem": filesystem,
"galaxy": galaxy,
"s3": s3,
"irods": irods}
def project_from_sample(sample):
upload_config = sample.get("upload")
if upload_config:
approach = _approaches[upload_config.get("method", "filesystem")]
for finfo in _get_files_project(sample, upload_config):
approach.update_file(finfo, None, upload_config)
return [[sample]]
def from_sample(sample):
"""Upload results of processing from an analysis pipeline sample.
"""
upload_config = sample.get("upload")
if upload_config:
approach = _approaches[upload_config.get("method", "filesystem")]
for finfo in _get_files(sample):
approach.update_file(finfo, sample, upload_config)
return [[sample]]
def get_all_upload_paths_from_sample(sample):
upload_path_mapping = dict()
upload_config = sample.get("upload")
if upload_config:
method = upload_config.get("method", "filesystem")
if method == "filesystem":
approach = _approaches[method]
for finfo in _get_files_project(sample, upload_config):
path = approach.get_upload_path(finfo, None, upload_config)
upload_path_mapping[finfo["path"]] = path
for finfo in _get_files(sample):
path = approach.get_upload_path(finfo, sample, upload_config)
upload_path_mapping[finfo["path"]] = path
return upload_path_mapping
# ## File information from sample
def _get_files(sample):
"""Retrieve files for the sample, dispatching by analysis type.
Each file is a dictionary containing the path plus associated
metadata about the file and pipeline versions.
"""
analysis = sample.get("analysis")
if analysis.lower() in ["variant", "snp calling", "variant2", "standard"]:
return _get_files_variantcall(sample)
elif analysis.lower() in ["rna-seq", "fastrna-seq"]:
return _get_files_rnaseq(sample)
elif analysis.lower() in ["smallrna-seq"]:
return _get_files_srnaseq(sample)
elif analysis.lower() in ["chip-seq"]:
return _get_files_chipseq(sample)
elif analysis.lower() in ["scrna-seq"]:
return _get_files_scrnaseq(sample)
else:
return []
def _get_files_rnaseq(sample):
out = []
algorithm = sample["config"]["algorithm"]
out = _maybe_add_summary(algorithm, sample, out)
out = _maybe_add_alignment(algorithm, sample, out)
out = _maybe_add_transcriptome_alignment(sample, out)
out = _maybe_add_disambiguate(algorithm, sample, out)
out = _maybe_add_counts(algorithm, sample, out)
out = _maybe_add_cufflinks(algorithm, sample, out)
out = _maybe_add_stringtie(algorithm, sample, out)
out = _maybe_add_oncofuse(algorithm, sample, out)
out = _maybe_add_pizzly(algorithm, sample, out)
out = _maybe_add_rnaseq_variant_file(algorithm, sample, out)
out = _maybe_add_sailfish_files(algorithm, sample, out)
out = _maybe_add_salmon_files(algorithm, sample, out)
out = _maybe_add_kallisto_files(algorithm, sample, out)
out = _maybe_add_ericscript_files(algorithm, sample, out)
return _add_meta(out, sample)
def _get_files_srnaseq(sample):
out = []
algorithm = sample["config"]["algorithm"]
out = _maybe_add_summary(algorithm, sample, out)
out = _maybe_add_trimming(algorithm, sample, out)
out = _maybe_add_seqbuster(algorithm, sample, out)
out = _maybe_add_trna(algorithm, sample, out)
out = _maybe_add_transcriptome_alignment(sample, out)
return _add_meta(out, sample)
def _get_files_scrnaseq(sample):
out = []
algorithm = sample["config"]["algorithm"]
out = _maybe_add_summary(algorithm, sample, out)
out = _maybe_add_transcriptome_alignment(sample, out)
out = _maybe_add_scrnaseq(algorithm, sample, out)
out = _maybe_add_barcode_histogram(algorithm, sample, out)
return _add_meta(out, sample)
def _get_files_chipseq(sample):
out = []
algorithm = sample["config"]["algorithm"]
out = _maybe_add_summary(algorithm, sample, out)
out = _maybe_add_alignment(algorithm, sample, out)
out = _maybe_add_peaks(algorithm, sample, out)
return _add_meta(out, sample)
def _add_meta(xs, sample=None, config=None):
"""Add top level information about the sample or flowcell to output.
Sorts outputs into sample names (sample input) and project (config input).
"""
out = []
for x in xs:
if not isinstance(x["path"], basestring) or not os.path.exists(x["path"]):
raise ValueError("Unexpected path for upload: %s" % x)
x["mtime"] = shared.get_file_timestamp(x["path"])
if sample:
sample_name = dd.get_sample_name(sample)
if "sample" not in x:
x["sample"] = sample_name
elif x["sample"] != sample_name:
x["run"] = sample_name
if config:
fc_name = config.get("fc_name") or "project"
fc_date = config.get("fc_date") or datetime.datetime.now().strftime("%Y-%m-%d")
x["run"] = "%s_%s" % (fc_date, fc_name)
out.append(x)
return out
def _get_files_variantcall(sample):
"""Return output files for the variant calling pipeline.
"""
out = []
algorithm = sample["config"]["algorithm"]
out = _maybe_add_summary(algorithm, sample, out)
out = _maybe_add_alignment(algorithm, sample, out)
out = _maybe_add_disambiguate(algorithm, sample, out)
out = _maybe_add_variant_file(algorithm, sample, out)
out = _maybe_add_sv(algorithm, sample, out)
out = _maybe_add_hla(algorithm, sample, out)
out = _maybe_add_heterogeneity(algorithm, sample, out)
out = _maybe_add_validate(algorithm, sample, out)
return _add_meta(out, sample)
def _maybe_add_validate(algorith, sample, out):
for i, plot in enumerate(tz.get_in(("validate", "grading_plots"), sample, [])):
ptype = os.path.splitext(plot)[-1].replace(".", "")
out.append({"path": plot,
"type": ptype,
"ext": "validate%s" % ("" if i == 0 else "-%s" % (i + 1))})
return out
def _maybe_add_rnaseq_variant_file(algorithm, sample, out):
vfile = sample.get("vrn_file")
if vfile:
ftype = "vcf.gz" if vfile.endswith(".gz") else "vcf"
out.append({"path": vfile,
"type": ftype})
return out
def _maybe_add_variant_file(algorithm, sample, out):
if sample.get("align_bam") is not None and sample.get("vrn_file"):
for x in sample["variants"]:
if not _sample_variant_file_in_population(x):
out.extend(_get_variant_file(x, ("vrn_file",)))
if x.get("bed_file"):
out.append({"path": x["bed_file"],
"type": "bed",
"ext": "%s-callregions" % x["variantcaller"],
"variantcaller": x["variantcaller"]})
if x.get("vrn_stats"):
for extra, fname in x["vrn_stats"].items():
ext = utils.splitext_plus(fname)[-1].replace(".", "")
out.append({"path": fname,
"type": ext,
"ext": "%s-%s" % (x["variantcaller"], extra),
"variantcaller": x["variantcaller"]})
if x.get("germline") and os.path.exists(x["germline"]):
out.extend(_get_variant_file(x, ("germline",), "-germline"))
return out
def _maybe_add_hla(algorithm, sample, out):
if sample.get("align_bam") is not None and sample.get("hla") and "call_file" in sample["hla"]:
out.append({"path": sample["hla"]["call_file"],
"type": "csv",
"ext": "hla-%s" % (sample["hla"]["hlacaller"])})
return out
def _maybe_add_heterogeneity(algorithm, sample, out):
for hetinfo in sample.get("heterogeneity", []):
report = hetinfo.get("report")
if report and os.path.exists(report):
out.append({"path": report,
"type": utils.splitext_plus(report)[-1].replace(".", "").replace("-", ""),
"ext": "%s-report" % (hetinfo["caller"])})
for plot_type, plot_file in hetinfo.get("plots", {}).items():
if plot_file and os.path.exists(plot_file):
out.append({"path": plot_file,
"type": utils.splitext_plus(plot_file)[-1].replace(".", ""),
"ext": "%s-%s-plot" % (hetinfo["caller"], plot_type)})
return out
def _get_batch_name(sample):
"""Retrieve batch name for use in SV calling outputs.
Handles multiple batches split via SV calling.
"""
batch = dd.get_batch(sample) or dd.get_sample_name(sample)
if isinstance(batch, (list, tuple)) and len(batch) > 1:
batch = dd.get_sample_name(sample)
return batch
def _maybe_add_sv(algorithm, sample, out):
if sample.get("align_bam") is not None and sample.get("sv"):
batch = _get_batch_name(sample)
for svcall in sample["sv"]:
if svcall.get("variantcaller") == "seq2c":
out.extend(_get_variant_file(svcall, ("coverage",), suffix="-coverage"))
out.extend(_get_variant_file(svcall, ("calls",), sample=batch))
for key in ["vrn_file", "cnr", "cns", "seg", "gainloss",
"segmetrics", "vrn_bed", "vrn_bedpe"]:
out.extend(_get_variant_file(svcall, (key,), sample=batch))
out.extend(_get_variant_file(svcall, ("call_file",), suffix="-call", sample=batch))
out.extend(_get_variant_file(svcall, ("priority",), suffix="-priority", sample=batch))
if "plot" in svcall:
for plot_name, fname in svcall["plot"].items():
ext = os.path.splitext(fname)[-1].replace(".", "")
out.append({"path": fname,
"sample": batch,
"type": ext,
"ext": "%s-%s" % (svcall["variantcaller"], plot_name),
"variantcaller": svcall["variantcaller"]})
if "raw_files" in svcall:
for caller, fname in svcall["raw_files"].items():
ext = utils.splitext_plus(fname)[-1][1:]
out.append({"path": fname,
"sample": batch,
"type": ext,
"ext": "%s-%s" % (svcall["variantcaller"], caller),
"variantcaller": svcall["variantcaller"]})
if utils.file_exists(fname + ".tbi"):
out.append({"path": fname + ".tbi",
"sample": batch,
"type": "vcf.gz.tbi",
"index": True,
"ext": "%s-%s" % (svcall["variantcaller"], caller),
"variantcaller": svcall["variantcaller"]})
for extra in ["subclones", "contamination"]:
svfile = svcall.get(extra)
if svfile and os.path.exists(svfile):
ext = os.path.splitext(svfile)[-1].replace(".", "")
out.append({"path": svfile,
"sample": batch,
"type": ext,
"ext": "%s-%s" % (svcall["variantcaller"], extra),
"variantcaller": svcall["variantcaller"]})
if "sv-validate" in sample:
for vkey in ["csv", "plot", "df"]:
vfile = tz.get_in(["sv-validate", vkey], sample)
if vfile:
to_u = []
if isinstance(vfile, dict):
for svtype, fname in vfile.items():
to_u.append((fname, "-%s" % svtype))
else:
to_u.append((vfile, "-%s" % vkey if vkey in ["df"] else ""))
for vfile, ext in to_u:
vext = os.path.splitext(vfile)[-1].replace(".", "")
out.append({"path": vfile,
"sample": batch,
"type": vext,
"ext": "sv-validate%s" % ext})
return out
def _sample_variant_file_in_population(x):
"""Check if a sample file is the same as the population file.
This is true for batches where we don't extract into samples and do not
run decomposition for gemini.
'"""
if "population" in x:
a = _get_variant_file(x, ("population", "vcf"))
b = _get_variant_file(x, ("vrn_file",))
decomposed = tz.get_in(("population", "decomposed"), x)
if (a and b and not decomposed and len(a) > 0 and len(b) > 0 and
vcfutils.get_samples(a[0]["path"]) == vcfutils.get_samples(b[0]["path"])):
return True
return False
def _get_variant_file(x, key, suffix="", sample=None):
"""Retrieve VCF file with the given key if it exists, handling bgzipped.
"""
out = []
fname = utils.get_in(x, key)
upload_key = list(key)
upload_key[-1] = "do_upload"
do_upload = tz.get_in(tuple(upload_key), x, True)
if fname and do_upload:
if fname.endswith(".vcf.gz"):
out.append({"path": fname,
"type": "vcf.gz",
"ext": "%s%s" % (x["variantcaller"], suffix),
"variantcaller": x["variantcaller"]})
if utils.file_exists(fname + ".tbi"):
out.append({"path": fname + ".tbi",
"type": "vcf.gz.tbi",
"index": True,
"ext": "%s%s" % (x["variantcaller"], suffix),
"variantcaller": x["variantcaller"]})
elif fname.endswith((".vcf", ".bed", ".bedpe", ".bedgraph", ".cnr", ".cns", ".cnn", ".txt", ".tsv")):
ftype = utils.splitext_plus(fname)[-1][1:]
if ftype == "txt":
ftype = fname.split("-")[-1]
out.append({"path": fname,
"type": ftype,
"ext": "%s%s" % (x["variantcaller"], suffix),
"variantcaller": x["variantcaller"]})
if sample:
out_sample = []
for x in out:
x["sample"] = sample
out_sample.append(x)
return out_sample
else:
return out
def _maybe_add_sailfish_files(algorithm, sample, out):
analysis = dd.get_analysis(sample)
sailfish_dir = os.path.join(dd.get_work_dir(sample), "sailfish",
dd.get_sample_name(sample), "quant")
if os.path.exists(sailfish_dir):
out.append({"path": sailfish_dir,
"type": "directory",
"ext": "sailfish"})
return out
def _maybe_add_salmon_files(algorithm, sample, out):
salmon_dir = os.path.join(dd.get_work_dir(sample), "salmon",
dd.get_sample_name(sample), "quant")
if os.path.exists(salmon_dir):
out.append({"path": salmon_dir,
"type": "directory",
"ext": "salmon"})
return out
def _maybe_add_kallisto_files(algorithm, sample, out):
kallisto_dir = os.path.join(dd.get_work_dir(sample), "kallisto",
dd.get_sample_name(sample), "quant")
if os.path.exists(kallisto_dir):
out.append({"path": kallisto_dir,
"type": "directory",
"ext": "kallisto"})
def _maybe_add_ericscript_files(algorithm, sample, out):
config = EricScriptConfig(sample)
if os.path.exists(config.sample_out_dir):
out.append({
'path': config.sample_out_dir,
'type': 'directory',
'ext': 'ericscript',
})
return out
def _flatten_file_with_secondary(input, out_dir):
"""Flatten file representation with secondary indices (CWL-like)
"""
out = []
orig_dir = os.path.dirname(input["base"])
for finfo in [input["base"]] + input.get("secondary", []):
cur_dir = os.path.dirname(finfo)
if cur_dir != orig_dir and cur_dir.startswith(orig_dir):
cur_out_dir = os.path.join(out_dir, cur_dir.replace(orig_dir + "/", ""))
else:
cur_out_dir = out_dir
out.append({"path": finfo, "dir": cur_out_dir})
return out
def _maybe_add_summary(algorithm, sample, out):
out = []
if "summary" in sample:
if sample["summary"].get("pdf"):
out.append({"path": sample["summary"]["pdf"],
"type": "pdf",
"ext": "summary"})
if sample["summary"].get("qc"):
for program, finfo in sample["summary"]["qc"].items():
out.extend(_flatten_file_with_secondary(finfo, os.path.join("qc", program)))
if utils.get_in(sample, ("summary", "researcher")):
out.append({"path": sample["summary"]["researcher"],
"type": "tsv",
"sample": run_info.clean_name(utils.get_in(sample, ("upload", "researcher"))),
"ext": "summary"})
return out
def _maybe_add_alignment(algorithm, sample, out):
if _has_alignment_file(algorithm, sample) and dd.get_phenotype(sample) != "germline":
for (fname, ext, isplus) in [(sample.get("work_bam"), "ready", False),
(dd.get_disc_bam(sample), "disc", True),
(dd.get_sr_bam(sample), "sr", True)]:
if fname and os.path.exists(fname):
if fname.endswith("bam"):
ftype, fext = "bam", ".bai"
elif fname.endswith("cram"):
ftype, fext = "cram", ".crai"
else:
raise ValueError("Unexpected alignment file type %s" % fname)
out.append({"path": fname,
"type": ftype,
"plus": isplus,
"ext": ext})
if utils.file_exists(fname + fext):
out.append({"path": fname + fext,
"type": ftype + fext,
"plus": isplus,
"index": True,
"ext": ext})
return out
def _maybe_add_disambiguate(algorithm, sample, out):
if "disambiguate" in sample and _has_alignment_file(algorithm, sample):
for extra_name, fname in sample["disambiguate"].items():
ftype = os.path.splitext(fname)[-1].replace(".", "")
fext = ".bai" if ftype == "bam" else ""
if fname and os.path.exists(fname):
out.append({"path": fname,
"type": ftype,
"plus": True,
"ext": "disambiguate-%s" % extra_name})
if fext and utils.file_exists(fname + fext):
out.append({"path": fname + fext,
"type": ftype + fext,
"plus": True,
"index": True,
"ext": "disambiguate-%s" % extra_name})
return out
def _maybe_add_transcriptome_alignment(sample, out):
transcriptome_bam = dd.get_transcriptome_bam(sample)
if transcriptome_bam and utils.file_exists(transcriptome_bam):
out.append({"path": transcriptome_bam,
"type": "bam",
"ext": "transcriptome"})
return out
def _maybe_add_counts(algorithm, sample, out):
if not dd.get_count_file(sample):
return out
out.append({"path": sample["count_file"],
"type": "counts",
"ext": "ready"})
stats_file = os.path.splitext(sample["count_file"])[0] + ".stats"
if utils.file_exists(stats_file):
out.append({"path": stats_file,
"type": "count_stats",
"ext": "ready"})
return out
def _maybe_add_scrnaseq(algorithm, sample, out):
count_file = dd.get_count_file(sample)
if not count_file:
return out
out.append({"path": count_file,
"type": "mtx"})
out.append({"path": count_file + ".rownames",
"type": "rownames"})
out.append({"path": count_file + ".colnames",
"type": "colnames"})
return out
def _maybe_add_barcode_histogram(algorithm, sample, out):
if not dd.get_count_file(sample):
return out
count_file = sample["count_file"]
histogram_file = os.path.join(os.path.dirname(count_file), "cb-histogram.txt")
out.append({"path": histogram_file,
"type": "tsv",
"ext": "barcodes"})
return out
def _maybe_add_oncofuse(algorithm, sample, out):
if sample.get("oncofuse_file", None) is not None:
out.append({"path": sample["oncofuse_file"],
"type": "oncofuse_outfile",
"ext": "ready"})
return out
def _maybe_add_pizzly(algorithm, sample, out):
pizzly_dir = dd.get_pizzly_dir(sample)
if pizzly_dir:
out.append({"path": pizzly_dir,
"type": "directory",
"ext": "pizzly"})
return out
def _maybe_add_cufflinks(algorithm, sample, out):
if "cufflinks_dir" in sample:
out.append({"path": sample["cufflinks_dir"],
"type": "directory",
"ext": "cufflinks"})
return out
def _maybe_add_stringtie(algorithm, sample, out):
if "stringtie_dir" in sample:
out.append({"path": sample["stringtie_dir"],
"type": "directory",
"ext": "stringtie"})
return out
def _maybe_add_trimming(algorithm, sample, out):
fn = sample["collapse"] + "_size_stats"
if utils.file_exists(fn):
out.append({"path": fn,
"type": "trimming_stats",
"ext": "ready"})
return out
def _maybe_add_seqbuster(algorithm, sample, out):
if "seqbuster" not in sample:
return out
fn = sample["seqbuster"]
if utils.file_exists(fn):
out.append({"path": fn,
"type": "counts",
"ext": "mirbase-ready"})
fn = sample.get("seqbuster_novel")
if fn and utils.file_exists(fn):
out.append({"path": fn,
"type": "counts",
"ext": "novel-ready"})
return out
def _maybe_add_trna(algorithm, sample, out):
if "trna" not in sample:
return out
fn = sample["trna"]
if utils.file_exists(fn):
out.append({"path": fn,
"type": "directory",
"ext": "mintmap"})
return out
def _maybe_add_peaks(algorithm, sample, out):
out_dir = sample.get("peaks_files", {})
for caller in out_dir:
for fn in out_dir[caller]:
if os.path.exists(fn):
out.append({"path": fn,
"dir": caller,
"ext": utils.splitext_plus(fn)[1]})
return out
def _has_alignment_file(algorithm, sample):
return (((algorithm.get("aligner") or algorithm.get("realign")
or algorithm.get("recalibrate") or algorithm.get("bam_clean")
or algorithm.get("mark_duplicates"))) and
sample.get("work_bam") is not None and
"upload_alignment" not in dd.get_tools_off(sample))
# ## File information from full project
def _add_batch(x, sample):
"""Potentially add batch name to an upload file.
"""
added = False
for batch in sorted(dd.get_batches(sample) or [], key=len, reverse=True):
if batch and os.path.basename(x["path"]).startswith("%s-" % batch):
x["batch"] = batch
added = True
break
if not added:
x["batch"] = dd.get_sample_name(sample)
return x
def _get_files_project(sample, upload_config):
"""Retrieve output files associated with an entire analysis project.
"""
out = [{"path": sample["provenance"]["programs"]}]
if os.path.exists(tz.get_in(["provenance", "data"], sample) or ""):
out.append({"path": sample["provenance"]["data"]})
for fname in ["bcbio-nextgen.log", "bcbio-nextgen-commands.log"]:
if os.path.exists(os.path.join(log.get_log_dir(sample["config"]), fname)):
out.append({"path": os.path.join(log.get_log_dir(sample["config"]), fname),
"type": "external_command_log",
"ext": ""})
if "summary" in sample and sample["summary"].get("project"):
out.append({"path": sample["summary"]["project"]})
mixup_check = tz.get_in(["summary", "mixup_check"], sample)
if mixup_check:
out.append({"path": sample["summary"]["mixup_check"],
"type": "directory", "ext": "mixup_check"})
report = os.path.join(dd.get_work_dir(sample), "report")
if utils.file_exists(report):
out.append({"path": report,
"type": "directory", "ext": "report"})
multiqc = tz.get_in(["summary", "multiqc"], sample)
if multiqc:
out.extend(_flatten_file_with_secondary(multiqc, "multiqc"))
if sample.get("seqcluster", {}):
out.append({"path": sample["seqcluster"].get("out_dir"),
"type": "directory", "ext": "seqcluster"})
if sample.get("report", None):
out.append({"path": os.path.dirname(sample["report"]),
"type": "directory", "ext": "seqclusterViz"})
for x in sample.get("variants", []):
if "pop_db" in x:
out.append({"path": x["pop_db"],
"type": "sqlite",
"variantcaller": x["variantcaller"]})
for x in sample.get("variants", []):
if "population" in x:
pop_db = tz.get_in(["population", "db"], x)
if pop_db:
out.append({"path": pop_db,
"type": "sqlite",
"variantcaller": x["variantcaller"]})
suffix = "-annotated-decomposed" if tz.get_in(("population", "decomposed"), x) else "-annotated"
out.extend([_add_batch(x, sample)
for x in _get_variant_file(x, ("population", "vcf"), suffix=suffix)])
for x in sample.get("variants", []):
if x.get("validate") and x["validate"].get("grading_summary"):
out.append({"path": x["validate"]["grading_summary"]})
break
if "coverage" in sample:
cov_db = tz.get_in(["coverage", "summary"], sample)
if cov_db:
out.append({"path": cov_db, "type": "sqlite", "ext": "coverage"})
all_coverage = tz.get_in(["coverage", "all"], sample)
if all_coverage:
out.append({"path": all_coverage, "type": "bed", "ext": "coverage"})
if dd.get_mirna_counts(sample):
out.append({"path": dd.get_mirna_counts(sample)})
if dd.get_isomir_counts(sample):
out.append({"path": dd.get_isomir_counts(sample)})
if dd.get_novel_mirna_counts(sample):
out.append({"path": dd.get_novel_mirna_counts(sample)})
if dd.get_novel_isomir_counts(sample):
out.append({"path": dd.get_novel_isomir_counts(sample)})
if dd.get_combined_counts(sample):
count_file = dd.get_combined_counts(sample)
if sample["analysis"].lower() == "scrna-seq":
out.append({"path": count_file,
"type": "mtx"})
out.append({"path": count_file + ".rownames",
"type": "rownames"})
out.append({"path": count_file + ".colnames",
"type": "colnames"})
else:
out.append({"path": dd.get_combined_counts(sample)})
if dd.get_annotated_combined_counts(sample):
out.append({"path": dd.get_annotated_combined_counts(sample)})
if dd.get_combined_fpkm(sample):
out.append({"path": dd.get_combined_fpkm(sample)})
if dd.get_combined_fpkm_isoform(sample):
out.append({"path": dd.get_combined_fpkm_isoform(sample)})
if dd.get_transcript_assembler(sample):
out.append({"path": dd.get_merged_gtf(sample)})
if dd.get_dexseq_counts(sample):
out.append({"path": dd.get_dexseq_counts(sample)})
if dd.get_express_counts(sample):
out.append({"path": dd.get_express_counts(sample)})
if dd.get_express_fpkm(sample):
out.append({"path": dd.get_express_fpkm(sample)})
if dd.get_express_tpm(sample):
out.append({"path": dd.get_express_tpm(sample)})
if dd.get_isoform_to_gene(sample):
out.append({"path": dd.get_isoform_to_gene(sample)})
if dd.get_square_vcf(sample):
out.append({"path": dd.get_square_vcf(sample)})
if dd.get_sailfish_transcript_tpm(sample):
out.append({"path": dd.get_sailfish_transcript_tpm(sample)})
if dd.get_sailfish_gene_tpm(sample):
out.append({"path": dd.get_sailfish_gene_tpm(sample)})
if dd.get_tx2gene(sample):
out.append({"path": dd.get_tx2gene(sample)})
if dd.get_spikein_counts(sample):
out.append({"path": dd.get_spikein_counts(sample)})
transcriptome_dir = os.path.join(dd.get_work_dir(sample), "inputs",
"transcriptome")
if os.path.exists(transcriptome_dir):
out.append({"path": transcriptome_dir, "type": "directory",
"ext": "transcriptome"})
return _add_meta(out, config=upload_config)
|
biocyberman/bcbio-nextgen
|
bcbio/upload/__init__.py
|
Python
|
mit
| 30,250
|
[
"Galaxy"
] |
19034e60d41885e8bb90954b93ffec867aea6ae330304fd3222cf8c9ae12a056
|
# Copyright (c) 2012, Cloudscaling
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import ast
import re
import six
import pep8
"""
Guidelines for writing new hacking checks
- Use only for Manila specific tests. OpenStack general tests
should be submitted to the common 'hacking' module.
- Pick numbers in the range M3xx. Find the current test with
the highest allocated number and then pick the next value.
- Keep the test method code in the source file ordered based
on the M3xx value.
- List the new rule in the top level HACKING.rst file
- Add test cases for each new rule to manila/tests/test_hacking.py
"""
UNDERSCORE_IMPORT_FILES = []
translated_log = re.compile(
r"(.)*LOG\."
r"(audit|debug|error|info|warn|warning|critical|exception)"
r"\("
r"(_|_LE|_LI|_LW)"
r"\(")
string_translation = re.compile(r"[^_]*_\(\s*('|\")")
underscore_import_check = re.compile(r"(.)*import _$")
underscore_import_check_multi = re.compile(r"(.)*import (.)*_, (.)*")
# We need this for cases where they have created their own _ function.
custom_underscore_check = re.compile(r"(.)*_\s*=\s*(.)*")
oslo_namespace_imports = re.compile(r"from[\s]*oslo[.](.*)")
dict_constructor_with_list_copy_re = re.compile(r".*\bdict\((\[)?(\(|\[)")
assert_no_xrange_re = re.compile(r"\s*xrange\s*\(")
assert_True = re.compile(r".*assertEqual\(True, .*\)")
no_log_warn = re.compile(r"\s*LOG.warn\(.*")
class BaseASTChecker(ast.NodeVisitor):
"""Provides a simple framework for writing AST-based checks.
Subclasses should implement visit_* methods like any other AST visitor
implementation. When they detect an error for a particular node the
method should call ``self.add_error(offending_node)``. Details about
where in the code the error occurred will be pulled from the node
object.
Subclasses should also provide a class variable named CHECK_DESC to
be used for the human readable error message.
"""
CHECK_DESC = 'No check message specified'
def __init__(self, tree, filename):
"""This object is created automatically by pep8.
:param tree: an AST tree
:param filename: name of the file being analyzed
(ignored by our checks)
"""
self._tree = tree
self._errors = []
def run(self):
"""Called automatically by pep8."""
self.visit(self._tree)
return self._errors
def add_error(self, node, message=None):
"""Add an error caused by a node to the list of errors for pep8."""
message = message or self.CHECK_DESC
error = (node.lineno, node.col_offset, message, self.__class__)
self._errors.append(error)
def _check_call_names(self, call_node, names):
if isinstance(call_node, ast.Call):
if isinstance(call_node.func, ast.Name):
if call_node.func.id in names:
return True
return False
def no_translate_logs(logical_line):
if translated_log.match(logical_line):
yield(0, "M359 Don't translate log messages!")
class CheckLoggingFormatArgs(BaseASTChecker):
"""Check for improper use of logging format arguments.
LOG.debug("Volume %s caught fire and is at %d degrees C and climbing.",
('volume1', 500))
The format arguments should not be a tuple as it is easy to miss.
"""
CHECK_DESC = 'M310 Log method arguments should not be a tuple.'
LOG_METHODS = [
'debug', 'info',
'warn', 'warning',
'error', 'exception',
'critical', 'fatal',
'trace', 'log'
]
def _find_name(self, node):
"""Return the fully qualified name or a Name or Attribute."""
if isinstance(node, ast.Name):
return node.id
elif (isinstance(node, ast.Attribute)
and isinstance(node.value, (ast.Name, ast.Attribute))):
method_name = node.attr
obj_name = self._find_name(node.value)
if obj_name is None:
return None
return obj_name + '.' + method_name
elif isinstance(node, six.string_types):
return node
else: # could be Subscript, Call or many more
return None
def visit_Call(self, node):
"""Look for the 'LOG.*' calls."""
# extract the obj_name and method_name
if isinstance(node.func, ast.Attribute):
obj_name = self._find_name(node.func.value)
if isinstance(node.func.value, ast.Name):
method_name = node.func.attr
elif isinstance(node.func.value, ast.Attribute):
obj_name = self._find_name(node.func.value)
method_name = node.func.attr
else: # could be Subscript, Call or many more
return super(CheckLoggingFormatArgs, self).generic_visit(node)
# obj must be a logger instance and method must be a log helper
if (obj_name != 'LOG'
or method_name not in self.LOG_METHODS):
return super(CheckLoggingFormatArgs, self).generic_visit(node)
# the call must have arguments
if not len(node.args):
return super(CheckLoggingFormatArgs, self).generic_visit(node)
# any argument should not be a tuple
for arg in node.args:
if isinstance(arg, ast.Tuple):
self.add_error(arg)
return super(CheckLoggingFormatArgs, self).generic_visit(node)
def check_explicit_underscore_import(logical_line, filename):
"""Check for explicit import of the _ function
We need to ensure that any files that are using the _() function
to translate logs are explicitly importing the _ function. We
can't trust unit test to catch whether the import has been
added so we need to check for it here.
"""
# Build a list of the files that have _ imported. No further
# checking needed once it is found.
if filename in UNDERSCORE_IMPORT_FILES:
pass
elif (underscore_import_check.match(logical_line) or
underscore_import_check_multi.match(logical_line) or
custom_underscore_check.match(logical_line)):
UNDERSCORE_IMPORT_FILES.append(filename)
elif string_translation.match(logical_line):
yield(0, "M323: Found use of _() without explicit import of _ !")
class CheckForStrUnicodeExc(BaseASTChecker):
"""Checks for the use of str() or unicode() on an exception.
This currently only handles the case where str() or unicode()
is used in the scope of an exception handler. If the exception
is passed into a function, returned from an assertRaises, or
used on an exception created in the same scope, this does not
catch it.
"""
CHECK_DESC = ('M325 str() and unicode() cannot be used on an '
'exception. Remove or use six.text_type()')
def __init__(self, tree, filename):
super(CheckForStrUnicodeExc, self).__init__(tree, filename)
self.name = []
self.already_checked = []
# Python 2
def visit_TryExcept(self, node):
for handler in node.handlers:
if handler.name:
self.name.append(handler.name.id)
super(CheckForStrUnicodeExc, self).generic_visit(node)
self.name = self.name[:-1]
else:
super(CheckForStrUnicodeExc, self).generic_visit(node)
# Python 3
def visit_ExceptHandler(self, node):
if node.name:
self.name.append(node.name)
super(CheckForStrUnicodeExc, self).generic_visit(node)
self.name = self.name[:-1]
else:
super(CheckForStrUnicodeExc, self).generic_visit(node)
def visit_Call(self, node):
if self._check_call_names(node, ['str', 'unicode']):
if node not in self.already_checked:
self.already_checked.append(node)
if isinstance(node.args[0], ast.Name):
if node.args[0].id in self.name:
self.add_error(node.args[0])
super(CheckForStrUnicodeExc, self).generic_visit(node)
class CheckForTransAdd(BaseASTChecker):
"""Checks for the use of concatenation on a translated string.
Translations should not be concatenated with other strings, but
should instead include the string being added to the translated
string to give the translators the most information.
"""
CHECK_DESC = ('M326 Translated messages cannot be concatenated. '
'String should be included in translated message.')
TRANS_FUNC = ['_', '_LI', '_LW', '_LE', '_LC']
def visit_BinOp(self, node):
if isinstance(node.op, ast.Add):
if self._check_call_names(node.left, self.TRANS_FUNC):
self.add_error(node.left)
elif self._check_call_names(node.right, self.TRANS_FUNC):
self.add_error(node.right)
super(CheckForTransAdd, self).generic_visit(node)
def check_oslo_namespace_imports(logical_line, physical_line, filename):
if pep8.noqa(physical_line):
return
if re.match(oslo_namespace_imports, logical_line):
msg = ("M333: '%s' must be used instead of '%s'.") % (
logical_line.replace('oslo.', 'oslo_'),
logical_line)
yield(0, msg)
def dict_constructor_with_list_copy(logical_line):
msg = ("M336: Must use a dict comprehension instead of a dict constructor"
" with a sequence of key-value pairs."
)
if dict_constructor_with_list_copy_re.match(logical_line):
yield (0, msg)
def no_xrange(logical_line):
if assert_no_xrange_re.match(logical_line):
yield(0, "M337: Do not use xrange().")
def validate_assertTrue(logical_line):
if re.match(assert_True, logical_line):
msg = ("M313: Unit tests should use assertTrue(value) instead"
" of using assertEqual(True, value).")
yield(0, msg)
def check_uuid4(logical_line):
"""Generating UUID
Use oslo_utils.uuidutils to generate UUID instead of uuid4().
M354
"""
msg = ("M354: Use oslo_utils.uuidutils to generate UUID instead "
"of uuid4().")
if "uuid4()." in logical_line:
return
if "uuid4()" in logical_line:
yield (0, msg)
def no_log_warn_check(logical_line):
"""Disallow 'LOG.warn'
Deprecated LOG.warn(), instead use LOG.warning
://bugs.launchpad.net/manila/+bug/1508442
M338
"""
msg = ("M338: LOG.warn is deprecated, use LOG.warning.")
if re.match(no_log_warn, logical_line):
yield(0, msg)
def factory(register):
register(check_explicit_underscore_import)
register(no_translate_logs)
register(CheckForStrUnicodeExc)
register(CheckLoggingFormatArgs)
register(CheckForTransAdd)
register(check_oslo_namespace_imports)
register(dict_constructor_with_list_copy)
register(no_xrange)
register(validate_assertTrue)
register(check_uuid4)
register(no_log_warn_check)
|
bswartz/manila
|
manila/hacking/checks.py
|
Python
|
apache-2.0
| 11,630
|
[
"VisIt"
] |
ca358c3f2095882526e972a0dc86a2113cd4fdcce7bc06976b1078bda61aa75d
|
#!/usr/bin/env python
# encoding: utf-8
#| Copyright Inria May 2015
#| This project has received funding from the European Research Council (ERC) under
#| the European Union's Horizon 2020 research and innovation programme (grant
#| agreement No 637972) - see http://www.resibots.eu
#|
#| Contributor(s):
#| - Jean-Baptiste Mouret ([email protected])
#| - Antoine Cully ([email protected])
#| - Konstantinos Chatzilygeroudis ([email protected])
#| - Federico Allocati ([email protected])
#| - Vaios Papaspyros ([email protected])
#| - Roberto Rama ([email protected])
#|
#| This software is a computer library whose purpose is to optimize continuous,
#| black-box functions. It mainly implements Gaussian processes and Bayesian
#| optimization.
#| Main repository: http://github.com/resibots/limbo
#| Documentation: http://www.resibots.eu/limbo
#|
#| This software is governed by the CeCILL-C license under French law and
#| abiding by the rules of distribution of free software. You can use,
#| modify and/ or redistribute the software under the terms of the CeCILL-C
#| license as circulated by CEA, CNRS and INRIA at the following URL
#| "http://www.cecill.info".
#|
#| As a counterpart to the access to the source code and rights to copy,
#| modify and redistribute granted by the license, users are provided only
#| with a limited warranty and the software's author, the holder of the
#| economic rights, and the successive licensors have only limited
#| liability.
#|
#| In this respect, the user's attention is drawn to the risks associated
#| with loading, using, modifying and/or developing or reproducing the
#| software by the user in light of its specific status of free software,
#| that may mean that it is complicated to manipulate, and that also
#| therefore means that it is reserved for developers and experienced
#| professionals having in-depth computer knowledge. Users are therefore
#| encouraged to load and test the software's suitability as regards their
#| requirements in conditions enabling the security of their systems and/or
#| data to be ensured and, more generally, to use and operate it in the
#| same conditions as regards security.
#|
#| The fact that you are presently reading this means that you have had
#| knowledge of the CeCILL-C license and that you accept its terms.
#|
#! /usr/bin/env python
# JB Mouret - 2009
"""
Quick n dirty sferes2 detection
"""
from waflib.Configure import conf
def options(opt):
opt.add_option('--sferes', type='string', help='path to sferes2', dest='sferes')
@conf
def check_sferes(self, *k, **kw):
if self.options.sferes:
includes_sferes = [self.options.sferes]
libpath_sferes = [self.options.sferes + '/build/sferes']
else:
return
self.start_msg('Checking sferes includes')
try:
self.find_file('sferes/ea/ea.hpp', includes_sferes)
self.end_msg(True)
except:
self.end_msg(False)
return
self.start_msg('Checking sferes libs')
try:
self.find_file('libsferes2.a', libpath_sferes)
self.end_msg(True)
except:
self.end_msg(False)
return
self.env.STLIBPATH_SFERES = libpath_sferes
self.env.STLIB_SFERES = ["sferes2"]
self.env.INCLUDES_SFERES = includes_sferes
self.env.DEFINES_SFERES = ["USE_SFERES", "SFERES_FAST_DOMSORT"]
|
jbmouret/limbo
|
waf_tools/sferes.py
|
Python
|
gpl-3.0
| 3,417
|
[
"Gaussian"
] |
e579897f039328c33b3d840cb3666830b436906c6d60f83a8b5ea7c643045da8
|
'''
Created on May 27, 2014
@author: Francesco Nero
'''
from . import settings
network = None
class Network():
def __init__(self, name):
global network
print "Creating network: "+name
self.name = name
self.nodes = {}
self.connections = []
network = self
def make(self, neuron_name, useless=None, also_useless=None, tau_rc=None, tau_ref=None, max_rate=None,
intercept=None, radius=1.0):
ex = radius > 0
#print "Creating neuron: "+neuron_name+" | Excitatory: "+str(ex)
self.nodes[neuron_name] = (Neuron(neuron_name, ex))
def make_input(self, input_name, values=None):
#print "Creating input: "+input_name
self.nodes[input_name] = (Input(input_name))
#TODO manage delay better
def connect(self, nodeA, nodeB, transform=None, delay=None):
delay = settings.default_connection_delay()
#print "Connecting "+nodeA+" "+nodeB+"\tdelay="+str(delay)
self.connections.append(Connection(self.nodes[nodeA], self.nodes[nodeB], delay=delay))
def add_to_nengo(self):
print "Finished creating network"
def get_neurons(self):
return [x for x in self.nodes if isinstance(network.nodes[x], Neuron)]
def get_inputs(self):
return [x for x in self.nodes if isinstance(network.nodes[x], Input)]
def get_connections(self):
return [x.nodeA.name+"\t"+x.nodeB.name+"\t"+("ECC" if x.excitatory else "INI")+"\tdelay:"+str(x.delay)
for x in self.connections]
class Node():
def __init__(self, name, excitatory=True):
self.name = name
self.excitatory = excitatory
self.ID = None
class Neuron(Node):
pass
class Input(Node):
pass
class Connection():
def __init__(self, nodeA, nodeB, delay):
self.nodeA = nodeA
self.nodeB = nodeB
self.excitatory = nodeA.excitatory
self.delay = delay
class ConstantFunction():
def __init__(self, number_of_outputs, value):
self.number_of_outputs = number_of_outputs
self.value = value
|
francesconero/nengo2nest
|
nengo2nest/nef.py
|
Python
|
gpl-2.0
| 2,126
|
[
"NEURON"
] |
c843e298a120c09b8bb2235e5f0f114d402da88c1563c2a4d267af94d34c4758
|
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
"""SPM wrappers for preprocessing data
Change directory to provide relative paths for doctests
>>> import os
>>> filepath = os.path.dirname( os.path.realpath( __file__ ) )
>>> datadir = os.path.realpath(os.path.join(filepath, '../../testing/data'))
>>> os.chdir(datadir)
"""
__docformat__ = 'restructuredtext'
# Standard library imports
from copy import deepcopy
import os
# Third-party imports
import numpy as np
# Local imports
from nipype.interfaces.base import (OutputMultiPath, TraitedSpec, isdefined,
traits, InputMultiPath, File)
from nipype.interfaces.spm.base import (SPMCommand, scans_for_fname,
func_is_3d,
scans_for_fnames, SPMCommandInputSpec)
from nipype.utils.filemanip import (fname_presuffix, filename_to_list,
list_to_filename, split_filename)
class SliceTimingInputSpec(SPMCommandInputSpec):
in_files = InputMultiPath(traits.Either(traits.List(File(exists=True)),
File(exists=True)), field='scans',
desc='list of filenames to apply slice timing',
mandatory=True, copyfile=False)
num_slices = traits.Int(field='nslices',
desc='number of slices in a volume',
mandatory=True)
time_repetition = traits.Float(field='tr',
desc=('time between volume acquisitions '
'(start to start time)'),
mandatory=True)
time_acquisition = traits.Float(field='ta',
desc=('time of volume acquisition. usually '
'calculated as TR-(TR/num_slices)'),
mandatory=True)
slice_order = traits.List(traits.Int(), field='so',
desc='1-based order in which slices are acquired',
mandatory=True)
ref_slice = traits.Int(field='refslice',
desc='1-based Number of the reference slice',
mandatory=True)
out_prefix = traits.String('a', field='prefix', usedefault=True,
desc='slicetimed output prefix')
class SliceTimingOutputSpec(TraitedSpec):
timecorrected_files = OutputMultiPath(traits.Either(traits.List(File(exists=True)),
File(exists=True)),
desc='slice time corrected files')
class SliceTiming(SPMCommand):
"""Use spm to perform slice timing correction.
http://www.fil.ion.ucl.ac.uk/spm/doc/manual.pdf#page=19
Examples
--------
>>> from nipype.interfaces.spm import SliceTiming
>>> st = SliceTiming()
>>> st.inputs.in_files = 'functional.nii'
>>> st.inputs.num_slices = 32
>>> st.inputs.time_repetition = 6.0
>>> st.inputs.time_acquisition = 6. - 6./32.
>>> st.inputs.slice_order = range(32,0,-1)
>>> st.inputs.ref_slice = 1
>>> st.run() # doctest: +SKIP
"""
input_spec = SliceTimingInputSpec
output_spec = SliceTimingOutputSpec
_jobtype = 'temporal'
_jobname = 'st'
def _format_arg(self, opt, spec, val):
"""Convert input to appropriate format for spm
"""
if opt == 'in_files':
return scans_for_fnames(filename_to_list(val),
keep4d=False,
separate_sessions=True)
return super(SliceTiming, self)._format_arg(opt, spec, val)
def _list_outputs(self):
outputs = self._outputs().get()
outputs['timecorrected_files'] = []
filelist = filename_to_list(self.inputs.in_files)
for f in filelist:
if isinstance(f, list):
run = [fname_presuffix(in_f, prefix=self.inputs.out_prefix) for in_f in f]
else:
run = fname_presuffix(f, prefix=self.inputs.out_prefix)
outputs['timecorrected_files'].append(run)
return outputs
class RealignInputSpec(SPMCommandInputSpec):
in_files = InputMultiPath(traits.Either(traits.List(File(exists=True)),
File(exists=True)), field='data',
mandatory=True, copyfile=True,
desc='list of filenames to realign')
jobtype = traits.Enum('estwrite', 'estimate', 'write',
desc='one of: estimate, write, estwrite',
usedefault=True)
quality = traits.Range(low=0.0, high=1.0, field='eoptions.quality',
desc='0.1 = fast, 1.0 = precise')
fwhm = traits.Range(low=0.0, field='eoptions.fwhm',
desc='gaussian smoothing kernel width')
separation = traits.Range(low=0.0, field='eoptions.sep',
desc='sampling separation in mm')
register_to_mean = traits.Bool(True, field='eoptions.rtm',
mandatory=True, usedefault=True,
desc='Indicate whether realignment is done to the mean image')
weight_img = File(exists=True, field='eoptions.weight',
desc='filename of weighting image')
interp = traits.Range(low=0, high=7, field='eoptions.interp',
desc='degree of b-spline used for interpolation')
wrap = traits.List(traits.Int(), minlen=3, maxlen=3,
field='eoptions.wrap',
desc='Check if interpolation should wrap in [x,y,z]')
write_which = traits.ListInt([2, 1], field='roptions.which',
minlen=2, maxlen=2, usedefault=True,
desc='determines which images to reslice')
write_interp = traits.Range(low=0, high=7, field='roptions.interp',
desc='degree of b-spline used for interpolation')
write_wrap = traits.List(traits.Int(), minlen=3, maxlen=3,
field='roptions.wrap',
desc='Check if interpolation should wrap in [x,y,z]')
write_mask = traits.Bool(field='roptions.mask',
desc='True/False mask output image')
out_prefix = traits.String('r', field='roptions.prefix', usedefault=True,
desc='realigned output prefix')
class RealignOutputSpec(TraitedSpec):
mean_image = File(exists=True, desc='Mean image file from the realignment')
modified_in_files = OutputMultiPath(traits.Either(traits.List(File(exists=True)),
File(exists=True)),
desc='Copies of all files passed to in_files.\
Headers will have been modified to align all\
images with the first, or optionally to first\
do that, extract a mean image, and re-align to\
that mean image.')
realigned_files = OutputMultiPath(traits.Either(traits.List(File(exists=True)),
File(exists=True)),
desc='If jobtype is write or estwrite, these will be the\
resliced files. Otherwise, they will be copies of\
in_files that have had their headers rewritten.')
realignment_parameters = OutputMultiPath(File(exists=True),
desc='Estimated translation and rotation parameters')
class Realign(SPMCommand):
"""Use spm_realign for estimating within modality rigid body alignment
http://www.fil.ion.ucl.ac.uk/spm/doc/manual.pdf#page=25
Examples
--------
>>> import nipype.interfaces.spm as spm
>>> realign = spm.Realign()
>>> realign.inputs.in_files = 'functional.nii'
>>> realign.inputs.register_to_mean = True
>>> realign.run() # doctest: +SKIP
"""
input_spec = RealignInputSpec
output_spec = RealignOutputSpec
_jobtype = 'spatial'
_jobname = 'realign'
def _format_arg(self, opt, spec, val):
"""Convert input to appropriate format for spm
"""
if opt == 'in_files':
return scans_for_fnames(val,
keep4d=True,
separate_sessions=True)
return super(Realign, self)._format_arg(opt, spec, val)
def _parse_inputs(self):
"""validate spm realign options if set to None ignore
"""
einputs = super(Realign, self)._parse_inputs()
return [{'%s' % (self.inputs.jobtype): einputs[0]}]
def _list_outputs(self):
outputs = self._outputs().get()
resliced_all = self.inputs.write_which[0] > 0
resliced_mean = self.inputs.write_which[1] > 0
if isdefined(self.inputs.in_files):
outputs['realignment_parameters'] = []
for imgf in self.inputs.in_files:
if isinstance(imgf, list):
tmp_imgf = imgf[0]
else:
tmp_imgf = imgf
outputs['realignment_parameters'].append(fname_presuffix(tmp_imgf,
prefix='rp_',
suffix='.txt',
use_ext=False))
if not isinstance(imgf, list) and func_is_3d(imgf):
break
if self.inputs.jobtype == "estimate":
outputs['realigned_files'] = self.inputs.in_files
if self.inputs.jobtype == "estimate" or self.inputs.jobtype == "estwrite":
outputs['modified_in_files'] = self.inputs.in_files
if self.inputs.jobtype == "write" or self.inputs.jobtype == "estwrite":
if isinstance(self.inputs.in_files[0], list):
first_image = self.inputs.in_files[0][0]
else:
first_image = self.inputs.in_files[0]
if resliced_mean:
outputs['mean_image'] = fname_presuffix(first_image, prefix='mean')
if resliced_all:
outputs['realigned_files'] = []
for idx, imgf in enumerate(filename_to_list(self.inputs.in_files)):
realigned_run = []
if isinstance(imgf, list):
for i, inner_imgf in enumerate(filename_to_list(imgf)):
newfile = fname_presuffix(inner_imgf,
prefix=self.inputs.out_prefix)
if os.path.exists(newfile):
realigned_run.append(newfile)
continue
if (idx == 0) and (i == 0) and \
func_is_3d(inner_imgf):
realigned_run.append(fname_presuffix(inner_imgf,
prefix=''))
else:
realigned_run = fname_presuffix(imgf,
prefix=self.inputs.out_prefix)
if (idx == 0) and func_is_3d(imgf):
realigned_run = fname_presuffix(imgf, prefix='')
outputs['realigned_files'].append(realigned_run)
return outputs
class CoregisterInputSpec(SPMCommandInputSpec):
target = File(exists=True, field='ref', mandatory=True,
desc='reference file to register to', copyfile=False)
source = InputMultiPath(File(exists=True), field='source',
desc='file to register to target', copyfile=True,
mandatory=True)
jobtype = traits.Enum('estwrite', 'estimate', 'write',
desc='one of: estimate, write, estwrite',
usedefault=True)
apply_to_files = InputMultiPath(File(exists=True), field='other',
desc='files to apply transformation to',
copyfile=True)
cost_function = traits.Enum('mi', 'nmi', 'ecc', 'ncc',
field='eoptions.cost_fun',
desc="""cost function, one of: 'mi' - Mutual Information,
'nmi' - Normalised Mutual Information,
'ecc' - Entropy Correlation Coefficient,
'ncc' - Normalised Cross Correlation""")
fwhm = traits.List(traits.Float(), minlen=2, maxlen=2,
field='eoptions.fwhm',
desc='gaussian smoothing kernel width (mm)')
separation = traits.List(traits.Float(), field='eoptions.sep',
desc='sampling separation in mm')
tolerance = traits.List(traits.Float(), field='eoptions.tol',
desc='acceptable tolerance for each of 12 params')
write_interp = traits.Range(low=0, high=7, field='roptions.interp',
desc='degree of b-spline used for interpolation')
write_wrap = traits.List(traits.Int(), minlen=3, maxlen=3,
field='roptions.wrap',
desc='Check if interpolation should wrap in [x,y,z]')
write_mask = traits.Bool(field='roptions.mask',
desc='True/False mask output image')
out_prefix = traits.String('r', field='roptions.prefix', usedefault=True,
desc='coregistered output prefix')
class CoregisterOutputSpec(TraitedSpec):
coregistered_source = OutputMultiPath(File(exists=True),
desc='Coregistered source files')
coregistered_files = OutputMultiPath(File(exists=True),
desc='Coregistered other files')
class Coregister(SPMCommand):
"""Use spm_coreg for estimating cross-modality rigid body alignment
http://www.fil.ion.ucl.ac.uk/spm/doc/manual.pdf#page=39
Examples
--------
>>> import nipype.interfaces.spm as spm
>>> coreg = spm.Coregister()
>>> coreg.inputs.target = 'functional.nii'
>>> coreg.inputs.source = 'structural.nii'
>>> coreg.run() # doctest: +SKIP
"""
input_spec = CoregisterInputSpec
output_spec = CoregisterOutputSpec
_jobtype = 'spatial'
_jobname = 'coreg'
def _format_arg(self, opt, spec, val):
"""Convert input to appropriate format for spm
"""
if opt == 'target' or (opt == 'source' and self.inputs.jobtype != "write"):
return scans_for_fnames(filename_to_list(val),
keep4d=True)
if opt == 'apply_to_files':
return np.array(filename_to_list(val), dtype=object)
if opt == 'source' and self.inputs.jobtype == "write":
if isdefined(self.inputs.apply_to_files):
return scans_for_fnames(val+self.inputs.apply_to_files)
else:
return scans_for_fnames(val)
return super(Coregister, self)._format_arg(opt, spec, val)
def _parse_inputs(self):
"""validate spm coregister options if set to None ignore
"""
if self.inputs.jobtype == "write":
einputs = super(Coregister, self)._parse_inputs(skip=('jobtype', 'apply_to_files'))
else:
einputs = super(Coregister, self)._parse_inputs(skip=('jobtype'))
jobtype = self.inputs.jobtype
return [{'%s' % (jobtype): einputs[0]}]
def _list_outputs(self):
outputs = self._outputs().get()
if self.inputs.jobtype == "estimate":
if isdefined(self.inputs.apply_to_files):
outputs['coregistered_files'] = self.inputs.apply_to_files
outputs['coregistered_source'] = self.inputs.source
elif self.inputs.jobtype == "write" or self.inputs.jobtype == "estwrite":
if isdefined(self.inputs.apply_to_files):
outputs['coregistered_files'] = []
for imgf in filename_to_list(self.inputs.apply_to_files):
outputs['coregistered_files'].append(fname_presuffix(imgf, prefix=self.inputs.out_prefix))
outputs['coregistered_source'] = []
for imgf in filename_to_list(self.inputs.source):
outputs['coregistered_source'].append(fname_presuffix(imgf, prefix=self.inputs.out_prefix))
return outputs
class NormalizeInputSpec(SPMCommandInputSpec):
template = File(exists=True, field='eoptions.template',
desc='template file to normalize to',
mandatory=True, xor=['parameter_file'],
copyfile=False)
source = InputMultiPath(File(exists=True), field='subj.source',
desc='file to normalize to template',
xor=['parameter_file'],
mandatory=True, copyfile=True)
jobtype = traits.Enum('estwrite', 'est', 'write',
desc='one of: est, write, estwrite (opt, estwrite)',
usedefault=True)
apply_to_files = InputMultiPath(traits.Either(File(exists=True),
traits.List(File(exists=True))),
field='subj.resample',
desc='files to apply transformation to (opt)',
copyfile=True)
parameter_file = File(field='subj.matname', mandatory=True,
xor=['source', 'template'],
desc='normalization parameter file*_sn.mat', copyfile=False)
source_weight = File(field='subj.wtsrc',
desc='name of weighting image for source (opt)', copyfile=False)
template_weight = File(field='eoptions.weight',
desc='name of weighting image for template (opt)', copyfile=False)
source_image_smoothing = traits.Float(field='eoptions.smosrc',
desc='source smoothing (opt)')
template_image_smoothing = traits.Float(field='eoptions.smoref',
desc='template smoothing (opt)')
affine_regularization_type = traits.Enum('mni', 'size', 'none', field='eoptions.regype',
desc='mni, size, none (opt)')
DCT_period_cutoff = traits.Float(field='eoptions.cutoff',
desc='Cutoff of for DCT bases (opt)')
nonlinear_iterations = traits.Int(field='eoptions.nits',
desc='Number of iterations of nonlinear warping (opt)')
nonlinear_regularization = traits.Float(field='eoptions.reg',
desc='the amount of the regularization for the nonlinear part of the normalization (opt)')
write_preserve = traits.Bool(field='roptions.preserve',
desc='True/False warped images are modulated (opt,)')
write_bounding_box = traits.List(traits.List(traits.Float(), minlen=3,
maxlen=3),
field='roptions.bb', minlen=2, maxlen=2,
desc='3x2-element list of lists (opt)')
write_voxel_sizes = traits.List(traits.Float(), field='roptions.vox',
minlen=3, maxlen=3,
desc='3-element list (opt)')
write_interp = traits.Range(low=0, high=7, field='roptions.interp',
desc='degree of b-spline used for interpolation')
write_wrap = traits.List(traits.Int(), field='roptions.wrap',
desc=('Check if interpolation should wrap in [x,y,z] '
'- list of bools (opt)'))
out_prefix = traits.String('w', field='roptions.prefix', usedefault=True,
desc='normalized output prefix')
class NormalizeOutputSpec(TraitedSpec):
normalization_parameters = OutputMultiPath(File(exists=True), desc='MAT files containing the normalization parameters')
normalized_source = OutputMultiPath(File(exists=True), desc='Normalized source files')
normalized_files = OutputMultiPath(File(exists=True), desc='Normalized other files')
class Normalize(SPMCommand):
"""use spm_normalise for warping an image to a template
http://www.fil.ion.ucl.ac.uk/spm/doc/manual.pdf#page=51
Examples
--------
>>> import nipype.interfaces.spm as spm
>>> norm = spm.Normalize()
>>> norm.inputs.source = 'functional.nii'
>>> norm.run() # doctest: +SKIP
"""
input_spec = NormalizeInputSpec
output_spec = NormalizeOutputSpec
_jobtype = 'spatial'
_jobname = 'normalise'
def _format_arg(self, opt, spec, val):
"""Convert input to appropriate format for spm
"""
if opt == 'template':
return scans_for_fname(filename_to_list(val))
if opt == 'source':
return scans_for_fname(filename_to_list(val))
if opt == 'apply_to_files':
return scans_for_fnames(filename_to_list(val))
if opt == 'parameter_file':
return np.array([list_to_filename(val)], dtype=object)
if opt in ['write_wrap']:
if len(val) != 3:
raise ValueError('%s must have 3 elements' % opt)
return super(Normalize, self)._format_arg(opt, spec, val)
def _parse_inputs(self):
"""validate spm realign options if set to None ignore
"""
einputs = super(Normalize, self)._parse_inputs(skip=('jobtype',
'apply_to_files'))
if isdefined(self.inputs.apply_to_files):
inputfiles = deepcopy(self.inputs.apply_to_files)
if isdefined(self.inputs.source):
inputfiles.extend(self.inputs.source)
einputs[0]['subj']['resample'] = scans_for_fnames(inputfiles)
jobtype = self.inputs.jobtype
if jobtype in ['estwrite', 'write']:
if not isdefined(self.inputs.apply_to_files):
if isdefined(self.inputs.source):
einputs[0]['subj']['resample'] = scans_for_fname(self.inputs.source)
return [{'%s' % (jobtype): einputs[0]}]
def _list_outputs(self):
outputs = self._outputs().get()
jobtype = self.inputs.jobtype
if jobtype.startswith('est'):
outputs['normalization_parameters'] = []
for imgf in filename_to_list(self.inputs.source):
outputs['normalization_parameters'].append(fname_presuffix(imgf, suffix='_sn.mat', use_ext=False))
outputs['normalization_parameters'] = list_to_filename(outputs['normalization_parameters'])
if self.inputs.jobtype == "estimate":
if isdefined(self.inputs.apply_to_files):
outputs['normalized_files'] = self.inputs.apply_to_files
outputs['normalized_source'] = self.inputs.source
elif 'write' in self.inputs.jobtype:
outputs['normalized_files'] = []
if isdefined(self.inputs.apply_to_files):
filelist = filename_to_list(self.inputs.apply_to_files)
for f in filelist:
if isinstance(f, list):
run = [fname_presuffix(in_f, prefix=self.inputs.out_prefix) for in_f in f]
else:
run = [fname_presuffix(f, prefix=self.inputs.out_prefix)]
outputs['normalized_files'].extend(run)
if isdefined(self.inputs.source):
outputs['normalized_source'] = []
for imgf in filename_to_list(self.inputs.source):
outputs['normalized_source'].append(fname_presuffix(imgf, prefix=self.inputs.out_prefix))
return outputs
class SegmentInputSpec(SPMCommandInputSpec):
data = InputMultiPath(File(exists=True), field='data', desc='one scan per subject',
copyfile=False, mandatory=True)
gm_output_type = traits.List(traits.Bool(), minlen=3, maxlen=3, field='output.GM',
desc="""Options to produce grey matter images: c1*.img, wc1*.img and mwc1*.img.
None: [False,False,False],
Native Space: [False,False,True],
Unmodulated Normalised: [False,True,False],
Modulated Normalised: [True,False,False],
Native + Unmodulated Normalised: [False,True,True],
Native + Modulated Normalised: [True,False,True],
Native + Modulated + Unmodulated: [True,True,True],
Modulated + Unmodulated Normalised: [True,True,False]""")
wm_output_type = traits.List(traits.Bool(), minlen=3, maxlen=3, field='output.WM',
desc="""Options to produce white matter images: c2*.img, wc2*.img and mwc2*.img.
None: [False,False,False],
Native Space: [False,False,True],
Unmodulated Normalised: [False,True,False],
Modulated Normalised: [True,False,False],
Native + Unmodulated Normalised: [False,True,True],
Native + Modulated Normalised: [True,False,True],
Native + Modulated + Unmodulated: [True,True,True],
Modulated + Unmodulated Normalised: [True,True,False]""")
csf_output_type = traits.List(traits.Bool(), minlen=3, maxlen=3, field='output.CSF',
desc="""Options to produce CSF images: c3*.img, wc3*.img and mwc3*.img.
None: [False,False,False],
Native Space: [False,False,True],
Unmodulated Normalised: [False,True,False],
Modulated Normalised: [True,False,False],
Native + Unmodulated Normalised: [False,True,True],
Native + Modulated Normalised: [True,False,True],
Native + Modulated + Unmodulated: [True,True,True],
Modulated + Unmodulated Normalised: [True,True,False]""")
save_bias_corrected = traits.Bool(field='output.biascor',
desc='True/False produce a bias corrected image')
clean_masks = traits.Enum('no', 'light', 'thorough', field='output.cleanup',
desc="clean using estimated brain mask ('no','light','thorough')")
tissue_prob_maps = traits.List(File(exists=True), field='opts.tpm',
desc='list of gray, white & csf prob. (opt,)')
gaussians_per_class = traits.List(traits.Int(), field='opts.ngaus',
desc='num Gaussians capture intensity distribution')
affine_regularization = traits.Enum('mni', 'eastern', 'subj', 'none', '', field='opts.regtype',
desc='Possible options: "mni", "eastern", "subj", "none" (no reguralisation), "" (no affine registration)')
warping_regularization = traits.Float(field='opts.warpreg',
desc='Controls balance between parameters and data')
warp_frequency_cutoff = traits.Float(field='opts.warpco', desc='Cutoff of DCT bases')
bias_regularization = traits.Enum(0, 0.00001, 0.0001, 0.001, 0.01, 0.1, 1, 10, field='opts.biasreg',
desc='no(0) - extremely heavy (10)')
bias_fwhm = traits.Enum(30, 40, 50, 60, 70, 80, 90, 100, 110, 120, 130,
'Inf', field='opts.biasfwhm',
desc='FWHM of Gaussian smoothness of bias')
sampling_distance = traits.Float(field='opts.samp',
desc='Sampling distance on data for parameter estimation')
mask_image = File(exists=True, field='opts.msk',
desc='Binary image to restrict parameter estimation ')
class SegmentOutputSpec(TraitedSpec):
native_gm_image = File(desc='native space grey probability map')
normalized_gm_image = File(desc='normalized grey probability map',)
modulated_gm_image = File(desc='modulated, normalized grey probability map')
native_wm_image = File(desc='native space white probability map')
normalized_wm_image = File(desc='normalized white probability map')
modulated_wm_image = File(desc='modulated, normalized white probability map')
native_csf_image = File(desc='native space csf probability map')
normalized_csf_image = File(desc='normalized csf probability map')
modulated_csf_image = File(desc='modulated, normalized csf probability map')
modulated_input_image = File(deprecated='0.10',
new_name='bias_corrected_image',
desc='bias-corrected version of input image')
bias_corrected_image = File(desc='bias-corrected version of input image')
transformation_mat = File(exists=True, desc='Normalization transformation')
inverse_transformation_mat = File(exists=True,
desc='Inverse normalization info')
class Segment(SPMCommand):
"""use spm_segment to separate structural images into different
tissue classes.
http://www.fil.ion.ucl.ac.uk/spm/doc/manual.pdf#page=43
Examples
--------
>>> import nipype.interfaces.spm as spm
>>> seg = spm.Segment()
>>> seg.inputs.data = 'structural.nii'
>>> seg.run() # doctest: +SKIP
"""
_jobtype = 'spatial'
_jobname = 'preproc'
input_spec = SegmentInputSpec
output_spec = SegmentOutputSpec
def _format_arg(self, opt, spec, val):
"""Convert input to appropriate format for spm
"""
clean_masks_dict = {'no': 0, 'light': 1, 'thorough': 2}
if opt in ['data', 'tissue_prob_maps']:
if isinstance(val, list):
return scans_for_fnames(val)
else:
return scans_for_fname(val)
if 'output_type' in opt:
return [int(v) for v in val]
if opt == 'mask_image':
return scans_for_fname(val)
if opt == 'clean_masks':
return clean_masks_dict[val]
return super(Segment, self)._format_arg(opt, spec, val)
def _list_outputs(self):
outputs = self._outputs().get()
f = self.inputs.data[0]
for tidx, tissue in enumerate(['gm', 'wm', 'csf']):
outtype = '%s_output_type' % tissue
if isdefined(getattr(self.inputs, outtype)):
for idx, (image, prefix) in enumerate([('modulated', 'mw'),
('normalized', 'w'),
('native', '')]):
if getattr(self.inputs, outtype)[idx]:
outfield = '%s_%s_image' % (image, tissue)
outputs[outfield] = fname_presuffix(f,
prefix='%sc%d' % (prefix,
tidx+1))
if isdefined(self.inputs.save_bias_corrected) and \
self.inputs.save_bias_corrected:
outputs['bias_corrected_image'] = fname_presuffix(f, prefix='m')
t_mat = fname_presuffix(f, suffix='_seg_sn.mat', use_ext=False)
outputs['transformation_mat'] = t_mat
invt_mat = fname_presuffix(f, suffix='_seg_inv_sn.mat', use_ext=False)
outputs['inverse_transformation_mat'] = invt_mat
return outputs
class NewSegmentInputSpec(SPMCommandInputSpec):
channel_files = InputMultiPath(File(exists=True),
desc="A list of files to be segmented",
field='channel', copyfile=False, mandatory=True)
channel_info = traits.Tuple(traits.Float(), traits.Float(),
traits.Tuple(traits.Bool, traits.Bool),
desc="""A tuple with the following fields:
- bias reguralisation (0-10)
- FWHM of Gaussian smoothness of bias
- which maps to save (Corrected, Field) - a tuple of two boolean values""",
field='channel')
tissues = traits.List(traits.Tuple(traits.Tuple(File(exists=True), traits.Int()), traits.Int(),
traits.Tuple(traits.Bool, traits.Bool), traits.Tuple(traits.Bool, traits.Bool)),
desc="""A list of tuples (one per tissue) with the following fields:
- tissue probability map (4D), 1-based index to frame
- number of gaussians
- which maps to save [Native, DARTEL] - a tuple of two boolean values
- which maps to save [Modulated, Unmodualted] - a tuple of two boolean values""",
field='tissue')
affine_regularization = traits.Enum('mni', 'eastern', 'subj', 'none', field='warp.affreg',
desc='mni, eastern, subj, none ')
warping_regularization = traits.Float(field='warp.reg',
desc='Aproximate distance between sampling points.')
sampling_distance = traits.Float(field='warp.samp',
desc='Sampling distance on data for parameter estimation')
write_deformation_fields = traits.List(traits.Bool(), minlen=2, maxlen=2, field='warp.write',
desc="Which deformation fields to write:[Inverse, Forward]")
class NewSegmentOutputSpec(TraitedSpec):
native_class_images = traits.List(traits.List(File(exists=True)), desc='native space probability maps')
dartel_input_images = traits.List(traits.List(File(exists=True)), desc='dartel imported class images')
normalized_class_images = traits.List(traits.List(File(exists=True)), desc='normalized class images')
modulated_class_images = traits.List(traits.List(File(exists=True)), desc='modulated+normalized class images')
transformation_mat = OutputMultiPath(File(exists=True), desc='Normalization transformation')
bias_corrected_images = OutputMultiPath(File(exists=True), desc='bias corrected images')
bias_field_images = OutputMultiPath(File(exists=True), desc='bias field images')
forward_deformation_field = OutputMultiPath(File(exists=True))
inverse_deformation_field = OutputMultiPath(File(exists=True))
class NewSegment(SPMCommand):
"""Use spm_preproc8 (New Segment) to separate structural images into different
tissue classes. Supports multiple modalities.
NOTE: This interface currently supports single channel input only
http://www.fil.ion.ucl.ac.uk/spm/doc/manual.pdf#page=185
Examples
--------
>>> import nipype.interfaces.spm as spm
>>> seg = spm.NewSegment()
>>> seg.inputs.channel_files = 'structural.nii'
>>> seg.inputs.channel_info = (0.0001, 60, (True, True))
>>> seg.run() # doctest: +SKIP
For VBM pre-processing [http://www.fil.ion.ucl.ac.uk/~john/misc/VBMclass10.pdf],
TPM.nii should be replaced by /path/to/spm8/toolbox/Seg/TPM.nii
>>> seg = NewSegment()
>>> seg.inputs.channel_files = 'structural.nii'
>>> tissue1 = (('TPM.nii', 1), 2, (True,True), (False, False))
>>> tissue2 = (('TPM.nii', 2), 2, (True,True), (False, False))
>>> tissue3 = (('TPM.nii', 3), 2, (True,False), (False, False))
>>> tissue4 = (('TPM.nii', 4), 2, (False,False), (False, False))
>>> tissue5 = (('TPM.nii', 5), 2, (False,False), (False, False))
>>> seg.inputs.tissues = [tissue1, tissue2, tissue3, tissue4, tissue5]
>>> seg.run() # doctest: +SKIP
"""
input_spec = NewSegmentInputSpec
output_spec = NewSegmentOutputSpec
_jobtype = 'tools'
_jobname = 'preproc8'
def _format_arg(self, opt, spec, val):
"""Convert input to appropriate format for spm
"""
if opt in ['channel_files', 'channel_info']:
# structure have to be recreated, because of some weird traits error
new_channel = {}
new_channel['vols'] = scans_for_fnames(self.inputs.channel_files)
if isdefined(self.inputs.channel_info):
info = self.inputs.channel_info
new_channel['biasreg'] = info[0]
new_channel['biasfwhm'] = info[1]
new_channel['write'] = [int(info[2][0]), int(info[2][1])]
return [new_channel]
elif opt == 'tissues':
new_tissues = []
for tissue in val:
new_tissue = {}
new_tissue['tpm'] = np.array([','.join([tissue[0][0], str(tissue[0][1])])], dtype=object)
new_tissue['ngaus'] = tissue[1]
new_tissue['native'] = [int(tissue[2][0]), int(tissue[2][1])]
new_tissue['warped'] = [int(tissue[3][0]), int(tissue[3][1])]
new_tissues.append(new_tissue)
return new_tissues
elif opt == 'write_deformation_fields':
return super(NewSegment, self)._format_arg(opt, spec, [int(val[0]), int(val[1])])
else:
return super(NewSegment, self)._format_arg(opt, spec, val)
def _list_outputs(self):
outputs = self._outputs().get()
outputs['native_class_images'] = []
outputs['dartel_input_images'] = []
outputs['normalized_class_images'] = []
outputs['modulated_class_images'] = []
outputs['transformation_mat'] = []
outputs['bias_corrected_images'] = []
outputs['bias_field_images'] = []
outputs['inverse_deformation_field'] = []
outputs['forward_deformation_field'] = []
n_classes = 5
if isdefined(self.inputs.tissues):
n_classes = len(self.inputs.tissues)
for i in range(n_classes):
outputs['native_class_images'].append([])
outputs['dartel_input_images'].append([])
outputs['normalized_class_images'].append([])
outputs['modulated_class_images'].append([])
for filename in self.inputs.channel_files:
pth, base, ext = split_filename(filename)
if isdefined(self.inputs.tissues):
for i, tissue in enumerate(self.inputs.tissues):
if tissue[2][0]:
outputs['native_class_images'][i].append(os.path.join(pth, "c%d%s.nii" % (i+1, base)))
if tissue[2][1]:
outputs['dartel_input_images'][i].append(os.path.join(pth, "rc%d%s.nii" % (i+1, base)))
if tissue[3][0]:
outputs['normalized_class_images'][i].append(os.path.join(pth, "wc%d%s.nii" % (i+1, base)))
if tissue[3][1]:
outputs['modulated_class_images'][i].append(os.path.join(pth, "mwc%d%s.nii" % (i+1, base)))
else:
for i in range(n_classes):
outputs['native_class_images'][i].append(os.path.join(pth, "c%d%s.nii" % (i+1, base)))
outputs['transformation_mat'].append(os.path.join(pth, "%s_seg8.mat" % base))
if isdefined(self.inputs.write_deformation_fields):
if self.inputs.write_deformation_fields[0]:
outputs['inverse_deformation_field'].append(os.path.join(pth, "iy_%s.nii" % base))
if self.inputs.write_deformation_fields[1]:
outputs['forward_deformation_field'].append(os.path.join(pth, "y_%s.nii" % base))
if isdefined(self.inputs.channel_info):
if self.inputs.channel_info[2][0]:
outputs['bias_corrected_images'].append(os.path.join(pth, "m%s.nii" % (base)))
if self.inputs.channel_info[2][1]:
outputs['bias_field_images'].append(os.path.join(pth, "BiasField_%s.nii" % (base)))
return outputs
class SmoothInputSpec(SPMCommandInputSpec):
in_files = InputMultiPath(File(exists=True), field='data',
desc='list of files to smooth',
mandatory=True, copyfile=False)
fwhm = traits.Either(traits.List(traits.Float(), minlen=3, maxlen=3),
traits.Float(), field='fwhm',
desc='3-list of fwhm for each dimension (opt)')
data_type = traits.Int(field='dtype',
desc='Data type of the output images (opt)')
implicit_masking = traits.Bool(field='im',
desc=('A mask implied by a particular '
'voxel value'))
out_prefix = traits.String('s', field='prefix', usedefault=True,
desc='smoothed output prefix')
class SmoothOutputSpec(TraitedSpec):
smoothed_files = OutputMultiPath(File(exists=True), desc='smoothed files')
class Smooth(SPMCommand):
"""Use spm_smooth for 3D Gaussian smoothing of image volumes.
http://www.fil.ion.ucl.ac.uk/spm/doc/manual.pdf#page=57
Examples
--------
>>> import nipype.interfaces.spm as spm
>>> smooth = spm.Smooth()
>>> smooth.inputs.in_files = 'functional.nii'
>>> smooth.inputs.fwhm = [4, 4, 4]
>>> smooth.run() # doctest: +SKIP
"""
input_spec = SmoothInputSpec
output_spec = SmoothOutputSpec
_jobtype = 'spatial'
_jobname = 'smooth'
def _format_arg(self, opt, spec, val):
if opt in ['in_files']:
return scans_for_fnames(filename_to_list(val))
if opt == 'fwhm':
if not isinstance(val, list):
return [val, val, val]
if isinstance(val, list):
if len(val) == 1:
return [val[0], val[0], val[0]]
else:
return val
return super(Smooth, self)._format_arg(opt, spec, val)
def _list_outputs(self):
outputs = self._outputs().get()
outputs['smoothed_files'] = []
for imgf in filename_to_list(self.inputs.in_files):
outputs['smoothed_files'].append(fname_presuffix(imgf, prefix=self.inputs.out_prefix))
return outputs
class DARTELInputSpec(SPMCommandInputSpec):
image_files = traits.List(traits.List(File(exists=True)),
desc="A list of files to be segmented",
field='warp.images', copyfile=False, mandatory=True)
template_prefix = traits.Str('Template', usedefault=True,
field='warp.settings.template',
desc='Prefix for template')
regularization_form = traits.Enum('Linear', 'Membrane', 'Bending',
field='warp.settings.rform',
desc='Form of regularization energy term')
iteration_parameters = traits.List(traits.Tuple(traits.Range(1, 10),
traits.Tuple(traits.Float,
traits.Float,
traits.Float),
traits.Enum(1, 2, 4, 8, 16,
32, 64, 128,
256, 512),
traits.Enum(0, 0.5, 1, 2, 4,
8, 16, 32)),
minlen=3,
maxlen=12,
field='warp.settings.param',
desc="""List of tuples for each iteration
- Inner iterations
- Regularization parameters
- Time points for deformation model
- smoothing parameter
""")
optimization_parameters = traits.Tuple(traits.Float, traits.Range(1, 8),
traits.Range(1, 8),
field='warp.settings.optim',
desc="""Optimization settings a tuple
- LM regularization
- cycles of multigrid solver
- relaxation iterations
""")
class DARTELOutputSpec(TraitedSpec):
final_template_file = File(exists=True, desc='final DARTEL template')
template_files = traits.List(File(exists=True), desc='Templates from different stages of iteration')
dartel_flow_fields = traits.List(File(exists=True), desc='DARTEL flow fields')
class DARTEL(SPMCommand):
"""Use spm DARTEL to create a template and flow fields
http://www.fil.ion.ucl.ac.uk/spm/doc/manual.pdf#page=197
Examples
--------
>>> import nipype.interfaces.spm as spm
>>> dartel = spm.DARTEL()
>>> dartel.inputs.image_files = [['rc1s1.nii','rc1s2.nii'],['rc2s1.nii', 'rc2s2.nii']]
>>> dartel.run() # doctest: +SKIP
"""
input_spec = DARTELInputSpec
output_spec = DARTELOutputSpec
_jobtype = 'tools'
_jobname = 'dartel'
def _format_arg(self, opt, spec, val):
"""Convert input to appropriate format for spm
"""
if opt in ['image_files']:
return scans_for_fnames(val, keep4d=True, separate_sessions=True)
elif opt == 'regularization_form':
mapper = {'Linear': 0, 'Membrane': 1, 'Bending': 2}
return mapper[val]
elif opt == 'iteration_parameters':
params = []
for param in val:
new_param = {}
new_param['its'] = param[0]
new_param['rparam'] = list(param[1])
new_param['K'] = param[2]
new_param['slam'] = param[3]
params.append(new_param)
return params
elif opt == 'optimization_parameters':
new_param = {}
new_param['lmreg'] = val[0]
new_param['cyc'] = val[1]
new_param['its'] = val[2]
return [new_param]
else:
return super(DARTEL, self)._format_arg(opt, spec, val)
def _list_outputs(self):
outputs = self._outputs().get()
outputs['template_files'] = []
for i in range(6):
outputs['template_files'].append(os.path.realpath('%s_%d.nii' % (self.inputs.template_prefix, i+1)))
outputs['final_template_file'] = os.path.realpath('%s_6.nii' % self.inputs.template_prefix)
outputs['dartel_flow_fields'] = []
for filename in self.inputs.image_files[0]:
pth, base, ext = split_filename(filename)
outputs['dartel_flow_fields'].append(os.path.realpath('u_%s_%s%s' % (base,
self.inputs.template_prefix,
ext)))
return outputs
class DARTELNorm2MNIInputSpec(SPMCommandInputSpec):
template_file = File(exists=True,
desc="DARTEL template",
field='mni_norm.template', copyfile=False, mandatory=True)
flowfield_files = InputMultiPath(File(exists=True),
desc="DARTEL flow fields u_rc1*",
field='mni_norm.data.subjs.flowfields',
mandatory=True)
apply_to_files = InputMultiPath(File(exists=True),
desc="Files to apply the transform to",
field='mni_norm.data.subjs.images',
mandatory=True, copyfile=False)
voxel_size = traits.Tuple(traits.Float, traits.Float, traits.Float,
desc="Voxel sizes for output file",
field='mni_norm.vox')
bounding_box = traits.Tuple(traits.Float, traits.Float, traits.Float,
traits.Float, traits.Float, traits.Float,
desc="Voxel sizes for output file",
field='mni_norm.bb')
modulate = traits.Bool(field='mni_norm.preserve',
desc="Modulate out images - no modulation preserves concentrations")
fwhm = traits.Either(traits.List(traits.Float(), minlen=3, maxlen=3),
traits.Float(), field='mni_norm.fwhm',
desc='3-list of fwhm for each dimension')
class DARTELNorm2MNIOutputSpec(TraitedSpec):
normalized_files = OutputMultiPath(File(exists=True), desc='Normalized files in MNI space')
normalization_parameter_file = File(exists=True, desc='Transform parameters to MNI space')
class DARTELNorm2MNI(SPMCommand):
"""Use spm DARTEL to normalize data to MNI space
http://www.fil.ion.ucl.ac.uk/spm/doc/manual.pdf#page=200
Examples
--------
>>> import nipype.interfaces.spm as spm
>>> nm = spm.DARTELNorm2MNI()
>>> nm.inputs.template_file = 'Template_6.nii'
>>> nm.inputs.flowfield_files = ['u_rc1s1_Template.nii', 'u_rc1s3_Template.nii']
>>> nm.inputs.apply_to_files = ['c1s1.nii', 'c1s3.nii']
>>> nm.inputs.modulate = True
>>> nm.run() # doctest: +SKIP
"""
input_spec = DARTELNorm2MNIInputSpec
output_spec = DARTELNorm2MNIOutputSpec
_jobtype = 'tools'
_jobname = 'dartel'
def _format_arg(self, opt, spec, val):
"""Convert input to appropriate format for spm
"""
if opt in ['template_file']:
return np.array([val], dtype=object)
elif opt in ['flowfield_files']:
return scans_for_fnames(val, keep4d=True)
elif opt in ['apply_to_files']:
return scans_for_fnames(val, keep4d=True, separate_sessions=True)
elif opt == 'voxel_size':
return list(val)
elif opt == 'bounding_box':
return list(val)
elif opt == 'fwhm':
if isinstance(val, list):
return val
else:
return [val, val, val]
else:
return super(DARTELNorm2MNI, self)._format_arg(opt, spec, val)
def _list_outputs(self):
outputs = self._outputs().get()
pth, base, ext = split_filename(self.inputs.template_file)
outputs['normalization_parameter_file'] = os.path.realpath(base+'_2mni.mat')
outputs['normalized_files'] = []
prefix = "w"
if isdefined(self.inputs.modulate) and self.inputs.modulate:
prefix = 'm' + prefix
if not isdefined(self.inputs.fwhm) or self.inputs.fwhm > 0:
prefix = 's' + prefix
for filename in self.inputs.apply_to_files:
pth, base, ext = split_filename(filename)
outputs['normalized_files'].append(os.path.realpath('%s%s%s' % (prefix,
base,
ext)))
return outputs
class CreateWarpedInputSpec(SPMCommandInputSpec):
image_files = InputMultiPath(File(exists=True),
desc="A list of files to be warped",
field='crt_warped.images', copyfile=False,
mandatory=True)
flowfield_files = InputMultiPath(File(exists=True),
desc="DARTEL flow fields u_rc1*",
field='crt_warped.flowfields',
copyfile=False,
mandatory=True)
iterations = traits.Range(low=0, high=9,
desc=("The number of iterations: log2(number of "
"time steps)"),
field='crt_warped.K')
interp = traits.Range(low=0, high=7, field='crt_warped.interp',
desc='degree of b-spline used for interpolation')
class CreateWarpedOutputSpec(TraitedSpec):
warped_files = traits.List(File(exists=True, desc='final warped files'))
class CreateWarped(SPMCommand):
"""Apply a flow field estimated by DARTEL to create warped images
http://www.fil.ion.ucl.ac.uk/spm/doc/manual.pdf#page=202
Examples
--------
>>> import nipype.interfaces.spm as spm
>>> create_warped = spm.CreateWarped()
>>> create_warped.inputs.image_files = ['rc1s1.nii', 'rc1s2.nii']
>>> create_warped.inputs.flowfield_files = ['u_rc1s1_Template.nii', 'u_rc1s2_Template.nii']
>>> create_warped.run() # doctest: +SKIP
"""
input_spec = CreateWarpedInputSpec
output_spec = CreateWarpedOutputSpec
_jobtype = 'tools'
_jobname = 'dartel'
def _format_arg(self, opt, spec, val):
"""Convert input to appropriate format for spm
"""
if opt in ['image_files']:
return scans_for_fnames(val, keep4d=True,
separate_sessions=True)
if opt in ['flowfield_files']:
return scans_for_fnames(val, keep4d=True)
else:
return super(CreateWarped, self)._format_arg(opt, spec, val)
def _list_outputs(self):
outputs = self._outputs().get()
outputs['warped_files'] = []
for filename in self.inputs.image_files:
pth, base, ext = split_filename(filename)
outputs['warped_files'].append(os.path.realpath('w%s%s' % (base,
ext)))
return outputs
class ApplyDeformationFieldInputSpec(SPMCommandInputSpec):
in_files = InputMultiPath(File(exists=True), mandatory=True, field='fnames')
deformation_field = File(exists=True, mandatory=True, field='comp{1}.def')
reference_volume = File(exists=True, mandatory=True,
field='comp{2}.id.space')
interp = traits.Range(low=0, high=7, field='interp',
desc='degree of b-spline used for interpolation')
class ApplyDeformationFieldOutputSpec(TraitedSpec):
out_files = OutputMultiPath(File(exists=True))
class ApplyDeformations(SPMCommand):
input_spec = ApplyDeformationFieldInputSpec
output_spec = ApplyDeformationFieldOutputSpec
_jobtype = 'util'
_jobname = 'defs'
def _format_arg(self, opt, spec, val):
"""Convert input to appropriate format for spm
"""
if opt in ['deformation_field', 'reference_volume']:
val = [val]
if opt in ['deformation_field']:
return scans_for_fnames(val, keep4d=True, separate_sessions=False)
if opt in ['in_files', 'reference_volume']:
return scans_for_fnames(val, keep4d=False, separate_sessions=False)
else:
return super(ApplyDeformations, self)._format_arg(opt, spec, val)
def _list_outputs(self):
outputs = self._outputs().get()
outputs['out_files'] = []
for filename in self.inputs.in_files:
_, fname = os.path.split(filename)
outputs['out_files'].append(os.path.realpath('w%s' % fname))
return outputs
class VBMSegmentInputSpec(SPMCommandInputSpec):
in_files = InputMultiPath(
File(exists=True),
desc="A list of files to be segmented",
field='estwrite.data', copyfile=False, mandatory=True)
tissues = File(
exists=True, field='estwrite.tpm',
desc='tissue probability map')
gaussians_per_class = traits.Tuple(
(2, 2, 2, 3, 4, 2), *([traits.Int()]*6),
usedefault=True,
desc='number of gaussians for each tissue class')
bias_regularization = traits.Enum(
0.0001,
(0, 0.00001, 0.0001, 0.001, 0.01, 0.1, 1, 10),
field='estwrite.opts.biasreg', usedefault=True,
desc='no(0) - extremely heavy (10)')
bias_fwhm = traits.Enum(
60,
(30, 40, 50, 60, 70, 80, 90, 100, 110, 120, 130, 'Inf'),
field='estwrite.opts.biasfwhm',
usedefault=True,
desc='FWHM of Gaussian smoothness of bias')
sampling_distance = traits.Float(
3, usedefault=True, field='estwrite.opts.samp',
desc='Sampling distance on data for parameter estimation')
warping_regularization = traits.Float(
4, usedefault=True, field='estwrite.opts.warpreg',
desc='Controls balance between parameters and data')
spatial_normalization = traits.Enum(
'high', 'low', usedefault=True,)
dartel_template = File(
exists=True,
field='estwrite.extopts.dartelwarp.normhigh.darteltpm')
use_sanlm_denoising_filter = traits.Range(
0, 2, 2, usedefault=True, field='estwrite.extopts.sanlm',
desc="0=No denoising, 1=denoising,2=denoising multi-threaded")
mrf_weighting = traits.Float(
0.15, usedefault=True, field='estwrite.extopts.mrf')
cleanup_partitions = traits.Int(
1, usedefault=True, field='estwrite.extopts.cleanup',
desc="0=None,1=light,2=thorough")
display_results = traits.Bool(
True, usedefault=True, field='estwrite.extopts.print')
gm_native = traits.Bool(
False, usedefault=True, field='estwrite.output.GM.native',)
gm_normalized = traits.Bool(
False, usedefault=True, field='estwrite.output.GM.warped',)
gm_modulated_normalized = traits.Range(
0, 2, 2, usedefault=True, field='estwrite.output.GM.modulated',
desc='0=none,1=affine+non-linear(SPM8 default),2=non-linear only')
gm_dartel = traits.Range(
0, 2, 0, usedefault=True, field='estwrite.output.GM.dartel',
desc="0=None,1=rigid(SPM8 default),2=affine")
wm_native = traits.Bool(
False, usedefault=True, field='estwrite.output.WM.native',)
wm_normalized = traits.Bool(
False, usedefault=True, field='estwrite.output.WM.warped',)
wm_modulated_normalized = traits.Range(
0, 2, 2, usedefault=True, field='estwrite.output.WM.modulated',
desc='0=none,1=affine+non-linear(SPM8 default),2=non-linear only')
wm_dartel = traits.Range(
0, 2, 0, usedefault=True, field='estwrite.output.WM.dartel',
desc="0=None,1=rigid(SPM8 default),2=affine")
csf_native = traits.Bool(
False, usedefault=True, field='estwrite.output.CSF.native',)
csf_normalized = traits.Bool(
False, usedefault=True, field='estwrite.output.CSF.warped',)
csf_modulated_normalized = traits.Range(
0, 2, 2, usedefault=True, field='estwrite.output.CSF.modulated',
desc='0=none,1=affine+non-linear(SPM8 default),2=non-linear only')
csf_dartel = traits.Range(
0, 2, 0, usedefault=True, field='estwrite.output.CSF.dartel',
desc="0=None,1=rigid(SPM8 default),2=affine")
bias_corrected_native = traits.Bool(
False, usedefault=True, field='estwrite.output.bias.native',)
bias_corrected_normalized = traits.Bool(
True, usedefault=True, field='estwrite.output.bias.warped',)
bias_corrected_affine = traits.Bool(
False, usedefault=True, field='estwrite.output.bias.affine',)
pve_label_native = traits.Bool(
False, usedefault=True, field='estwrite.output.label.native')
pve_label_normalized = traits.Bool(
False, usedefault=True, field='estwrite.output.label.warped')
pve_label_dartel = traits.Range(
0, 2, 0, usedefault=True, field='estwrite.output.label.dartel',
desc="0=None,1=rigid(SPM8 default),2=affine")
jacobian_determinant = traits.Bool(
False, usedefault=True, field='estwrite.jacobian.warped')
deformation_field = traits.Tuple(
(0, 0), traits.Bool, traits.Bool, usedefault=True,
field='estwrite.output.warps',
desc='forward and inverse field')
class VBMSegmentOuputSpec(TraitedSpec):
native_class_images = traits.List(traits.List(File(exists=True)),
desc='native space probability maps')
dartel_input_images = traits.List(traits.List(File(exists=True)),
desc='dartel imported class images')
normalized_class_images = traits.List(traits.List(File(exists=True)),
desc='normalized class images')
modulated_class_images = traits.List(traits.List(File(exists=True)),
desc='modulated+normalized class images')
transformation_mat = OutputMultiPath(File(exists=True),
desc='Normalization transformation')
bias_corrected_images = OutputMultiPath(
File(exists=True),
desc='bias corrected images')
normalized_bias_corrected_images = OutputMultiPath(
File(exists=True),
desc='bias corrected images')
pve_label_native_images = OutputMultiPath(File(exists=True))
pve_label_normalized_images = OutputMultiPath(File(exists=True))
pve_label_registered_images = OutputMultiPath(File(exists=True))
forward_deformation_field = OutputMultiPath(File(exists=True))
inverse_deformation_field = OutputMultiPath(File(exists=True))
jacobian_determinant_images = OutputMultiPath(File(exists=True))
class VBMSegment(SPMCommand):
"""Use VBM8 toolbox to separate structural images into different
tissue classes.
Example
-------
>>> import nipype.interfaces.spm as spm
>>> seg = spm.VBMSegment()
>>> seg.inputs.tissues = 'TPM.nii'
>>> seg.inputs.dartel_template = 'Template_1_IXI550_MNI152.nii'
>>> seg.inputs.bias_corrected_native = True
>>> seg.inputs.gm_native = True
>>> seg.inputs.wm_native = True
>>> seg.inputs.csf_native = True
>>> seg.inputs.pve_label_native = True
>>> seg.inputs.deformation_field = (True, False)
>>> seg.run() # doctest: +SKIP
"""
input_spec = VBMSegmentInputSpec
output_spec = VBMSegmentOuputSpec
_jobtype = 'tools'
_jobname = 'vbm8'
def _list_outputs(self):
outputs = self._outputs().get()
do_dartel = self.inputs.spatial_normalization
dartel_px = ''
if do_dartel:
dartel_px = 'r'
outputs['native_class_images'] = [[], [], []]
outputs['dartel_input_images'] = [[], [], []]
outputs['normalized_class_images'] = [[], [], []]
outputs['modulated_class_images'] = [[], [], []]
outputs['transformation_mat'] = []
outputs['bias_corrected_images'] = []
outputs['normalized_bias_corrected_images'] = []
outputs['inverse_deformation_field'] = []
outputs['forward_deformation_field'] = []
outputs['jacobian_determinant_images'] = []
outputs['pve_label_native_images'] = []
outputs['pve_label_normalized_images'] = []
outputs['pve_label_registered_images'] = []
for filename in self.inputs.in_files:
pth, base, ext = split_filename(filename)
outputs['transformation_mat'].append(
os.path.join(pth, "%s_seg8.mat" % base))
for i, tis in enumerate(['gm', 'wm', 'csf']):
# native space
if getattr(self.inputs, '%s_native' % tis):
outputs['native_class_images'][i].append(
os.path.join(pth, "p%d%s.nii" % (i+1, base)))
if getattr(self.inputs, '%s_dartel' % tis) == 1:
outputs['dartel_input_images'][i].append(
os.path.join(pth, "rp%d%s.nii" % (i+1, base)))
elif getattr(self.inputs, '%s_dartel' % tis) == 2:
outputs['dartel_input_images'][i].append(
os.path.join(pth, "rp%d%s_affine.nii" % (i+1, base)))
# normalized space
if getattr(self.inputs, '%s_normalized' % tis):
outputs['normalized_class_images'][i].append(
os.path.join(pth, "w%sp%d%s.nii" % (dartel_px, i+1, base)))
if getattr(self.inputs, '%s_modulated_normalized' % tis) == 1:
outputs['modulated_class_images'][i].append(os.path.join(
pth, "mw%sp%d%s.nii" % (dartel_px, i+1, base)))
elif getattr(self.inputs, '%s_modulated_normalized' % tis) == 2:
outputs['normalized_class_images'][i].append(os.path.join(
pth, "m0w%sp%d%s.nii" % (dartel_px, i+1, base)))
if self.inputs.pve_label_native:
outputs['pve_label_native_images'].append(
os.path.join(pth, "p0%s.nii" % (base)))
if self.inputs.pve_label_normalized:
outputs['pve_label_normalized_images'].append(
os.path.join(pth, "w%sp0%s.nii" % (dartel_px, base)))
if self.inputs.pve_label_dartel == 1:
outputs['pve_label_registered_images'].append(
os.path.join(pth, "rp0%s.nii" % (base)))
elif self.inputs.pve_label_dartel == 2:
outputs['pve_label_registered_images'].append(
os.path.join(pth, "rp0%s_affine.nii" % (base)))
if self.inputs.bias_corrected_native:
outputs['bias_corrected_images'].append(
os.path.join(pth, "m%s.nii" % (base)))
if self.inputs.bias_corrected_normalized:
outputs['normalized_bias_corrected_images'].append(
os.path.join(pth, "wm%s%s.nii" % (dartel_px, base)))
if self.inputs.deformation_field[0]:
outputs['forward_deformation_field'].append(
os.path.join(pth, "y_%s%s.nii" % (dartel_px, base)))
if self.inputs.deformation_field[1]:
outputs['inverse_deformation_field'].append(
os.path.join(pth, "iy_%s%s.nii" % (dartel_px, base)))
if self.inputs.jacobian_determinant and do_dartel:
outputs['jacobian_determinant_images'].append(
os.path.join(pth, "jac_wrp1%s.nii" % (base)))
return outputs
def _format_arg(self, opt, spec, val):
"""Convert input to appropriate format for spm
"""
if opt in ['in_files']:
return scans_for_fnames(val, keep4d=True)
elif opt in ['spatial_normalization']:
if val == 'low':
return {'normlow': []}
elif opt in ['dartel_template']:
return np.array([val], dtype=object)
elif opt in ['deformation_field']:
return super(VBMSegment, self)._format_arg(opt, spec, [int(val[0]), int(val[1])])
else:
return super(VBMSegment, self)._format_arg(opt, spec, val)
def _parse_inputs(self):
if self.inputs.spatial_normalization == 'low':
einputs = super(VBMSegment, self)._parse_inputs(
skip=('spatial_normalization', 'dartel_template'))
einputs[0]['estwrite']['extopts']['dartelwarp'] = {'normlow': 1}
return einputs
else:
return super(VBMSegment, self)._parse_inputs(skip=('spatial_normalization'))
|
mick-d/nipype_source
|
nipype/interfaces/spm/preprocess.py
|
Python
|
bsd-3-clause
| 67,777
|
[
"Gaussian"
] |
e41e99b50addd1a43999021c81d07336b2fa7679670f5a717f181e28301b8757
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
#
# Copyright (C) 2017 Lenovo, Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
# Module to Reset to factory settings of Lenovo Switches
# Lenovo Networking
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: cnos_factory
author: "Dave Kasberg (@dkasberg)"
short_description: Reset the switch's startup configuration to default (factory) on devices running Lenovo CNOS
description:
- This module allows you to reset a switch's startup configuration. The method provides a way to reset the
startup configuration to its factory settings. This is helpful when you want to move the switch to another
topology as a new network device.
This module uses SSH to manage network device configuration.
The results of the operation can be viewed in results directory.
For more information about this module from Lenovo and customizing it usage for your
use cases, please visit U(http://systemx.lenovofiles.com/help/index.jsp?topic=%2Fcom.lenovo.switchmgt.ansible.doc%2Fcnos_factory.html)
version_added: "2.3"
extends_documentation_fragment: cnos
options: {}
'''
EXAMPLES = '''
Tasks : The following are examples of using the module cnos_reload. These are written in the main.yml file of the tasks directory.
---
- name: Test Reset to factory
cnos_factory:
host: "{{ inventory_hostname }}"
username: "{{ hostvars[inventory_hostname]['username'] }}"
password: "{{ hostvars[inventory_hostname]['password'] }}"
deviceType: "{{ hostvars[inventory_hostname]['deviceType'] }}"
outputfile: "./results/test_factory_{{ inventory_hostname }}_output.txt"
'''
RETURN = '''
msg:
description: Success or failure message
returned: always
type: string
sample: "Switch Startup Config is Reset to factory settings"
'''
import sys
try:
import paramiko
HAS_PARAMIKO = True
except ImportError:
HAS_PARAMIKO = False
import time
import socket
import array
import json
import time
import re
try:
from ansible.module_utils import cnos
HAS_LIB = True
except:
HAS_LIB = False
from ansible.module_utils.basic import AnsibleModule
from collections import defaultdict
def main():
module = AnsibleModule(
argument_spec=dict(
outputfile=dict(required=True),
host=dict(required=True),
username=dict(required=True),
password=dict(required=True, no_log=True),
enablePassword=dict(required=False, no_log=True),
deviceType=dict(required=True),),
supports_check_mode=False)
username = module.params['username']
password = module.params['password']
enablePassword = module.params['enablePassword']
cliCommand = "save erase \n"
outputfile = module.params['outputfile']
hostIP = module.params['host']
deviceType = module.params['deviceType']
output = ""
if not HAS_PARAMIKO:
module.fail_json(msg='paramiko is required for this module')
# Create instance of SSHClient object
remote_conn_pre = paramiko.SSHClient()
# Automatically add untrusted hosts (make sure okay for security policy in your environment)
remote_conn_pre.set_missing_host_key_policy(paramiko.AutoAddPolicy())
# initiate SSH connection with the switch
remote_conn_pre.connect(hostIP, username=username, password=password)
time.sleep(2)
# Use invoke_shell to establish an 'interactive session'
remote_conn = remote_conn_pre.invoke_shell()
time.sleep(2)
# Enable and enter configure terminal then send command
output = output + cnos.waitForDeviceResponse("\n", ">", 2, remote_conn)
output = output + cnos.enterEnableModeForDevice(enablePassword, 3, remote_conn)
# Make terminal length = 0
output = output + cnos.waitForDeviceResponse("terminal length 0\n", "#", 2, remote_conn)
# cnos.debugOutput(cliCommand)
# Send the CLi command
output = output + cnos.waitForDeviceResponse(cliCommand, "[n]", 2, remote_conn)
output = output + cnos.waitForDeviceResponse("y" + "\n", "#", 2, remote_conn)
# Save it into the file
file = open(outputfile, "a")
file.write(output)
file.close()
errorMsg = cnos.checkOutputForError(output)
if(errorMsg is None):
module.exit_json(changed=True, msg="Switch Startup Config is Reset to factory settings ")
else:
module.fail_json(msg=errorMsg)
if __name__ == '__main__':
main()
|
tsdmgz/ansible
|
lib/ansible/modules/network/cnos/cnos_factory.py
|
Python
|
gpl-3.0
| 5,254
|
[
"VisIt"
] |
504d6efd103082ce4989f7e45c1016bf34df8fa09f7def9bd64e354d80f23190
|
#!/usr/bin/env python
#coding: utf-8
class Solution:
def _bfs(self, board, cr, cc, row, col, visit, live):
if cr < 0 or cr >= row or cc < 0 or cc >= col: return
if visit[cr][cc]: return
visit[cr][cc], live[cr][cc] = True, True
queue = [(cr,cc)]
while queue:
cr, cc = queue[0]
tneighbor = [(cr-1,cc), (cr+1,cc), (cr,cc-1), (cr,cc+1)]
neighbor = [(i,j) for i, j in tneighbor
if (i>=0 and j>=0 and i <row and j <col
and not visit[i][j]
and board[i][j] == 'O')]
for i, j in neighbor:
visit[i][j] = True
live[i][j] = True
queue = queue[1:] + neighbor
# @param board, a 2D array
# Capture all regions by modifying the input board in-place.
# Do not return any value.
def solve(self, board):
if not board: return
lboard = [list(b) for b in board]
r, c = len(lboard), len(lboard[0])
visit = [[False] * c for i in range(r)]
live = [[False] * c for i in range(r)]
for cr in [0, r-1]:
for cc in range(c):
if lboard[cr][cc] == 'O':
self._bfs(lboard, cr, cc, r, c, visit, live)
for cc in [0, c-1]:
for cr in range(r):
if lboard[cr][cc] == 'O':
self._bfs(lboard, cr, cc, r, c, visit, live)
for i in range(r):
for j in range(c):
if lboard[i][j] == 'O' and not live[i][j]:
lboard[i][j] = 'X'
for i in range(r):
board[i] = ''.join(lboard[i])
if __name__ == '__main__':
board = [
'XXXX',
'XOOX',
'XXOX',
'XOXX'
]
s = Solution()
board = ["XOOOOOOOOOOOOOOOOOOO","OXOOOOXOOOOOOOOOOOXX","OOOOOOOOXOOOOOOOOOOX","OOXOOOOOOOOOOOOOOOXO","OOOOOXOOOOXOOOOOXOOX","XOOOXOOOOOXOXOXOXOXO","OOOOXOOXOOOOOXOOXOOO","XOOOXXXOXOOOOXXOXOOO","OOOOOXXXXOOOOXOOXOOO","XOOOOXOOOOOOXXOOXOOX","OOOOOOOOOOXOOXOOOXOX","OOOOXOXOOXXOOOOOXOOO","XXOOOOOXOOOOOOOOOOOO","OXOXOOOXOXOOOXOXOXOO","OOXOOOOOOOXOOOOOXOXO","XXOOOOOOOOXOXXOOOXOO","OOXOOOOOOOXOOXOXOXOO","OOOXOOOOOXXXOOXOOOXO","OOOOOOOOOOOOOOOOOOOO","XOOOOXOOOXXOOXOXOXOO"]
s.solve(board)
print(board)
|
wh-acmer/minixalpha-acm
|
LeetCode/Python/surrounded_regions.py
|
Python
|
mit
| 2,345
|
[
"VisIt"
] |
a084c3a67ce71ea928ecac595582314f8b63d9adf24d86be31ad277dc455f1ae
|
"""
WSGI config for ase project.
This module contains the WSGI application used by Django's development server
and any production WSGI deployments. It should expose a module-level variable
named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover
this application via the ``WSGI_APPLICATION`` setting.
Usually you will have the standard Django WSGI application here, but it also
might make sense to replace the whole Django WSGI application with a custom one
that later delegates to the Django one. For example, you could introduce WSGI
middleware here, or combine a Django application with an application of another
framework.
"""
import os
import sys
sys.path.append('/home/alzo/www-dev/ase')
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "common.settings")
# This application object is used by any WSGI server configured to use this
# file. This includes Django's development server, if the WSGI_APPLICATION
# setting points here.
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
|
epcoullery/ase
|
repart/wsgi.py
|
Python
|
agpl-3.0
| 1,049
|
[
"ASE"
] |
a65de21ea4fab815362a29a76247023372e5dd8ba882fcb2eee8fef2e04f7fb6
|
import os
import numpy as np
import deepchem as dc
def load_gaussian_cdf_data():
"""Load example with numbers sampled from Gaussian normal distribution.
Each feature and task is a column of values that is sampled
from a normal distribution of mean 0, stdev 1."""
current_dir = os.path.dirname(os.path.abspath(__file__))
features = ["feat0", "feat1"]
featurizer = dc.feat.UserDefinedFeaturizer(features)
tasks = ["task0", "task1"]
input_file = os.path.join(current_dir, "assets/gaussian_cdf_example.csv")
loader = dc.data.UserCSVLoader(
tasks=tasks, featurizer=featurizer, id_field="id")
return loader.create_dataset(input_file)
def test_cdf_X_transformer():
"""Test CDF transformer on Gaussian normal dataset."""
target = np.array(np.transpose(np.linspace(0., 1., 1001)))
target = np.transpose(np.array(np.append([target], [target], axis=0)))
gaussian_dataset = load_gaussian_cdf_data()
bins = 1001
cdf_transformer = dc.trans.CDFTransformer(
transform_X=True, dataset=gaussian_dataset, bins=bins)
_, y, w, ids = (gaussian_dataset.X, gaussian_dataset.y, gaussian_dataset.w,
gaussian_dataset.ids)
gaussian_dataset = cdf_transformer.transform(gaussian_dataset)
X_t, y_t, w_t, ids_t = (gaussian_dataset.X, gaussian_dataset.y,
gaussian_dataset.w, gaussian_dataset.ids)
# Check ids are unchanged.
for id_elt, id_t_elt in zip(ids, ids_t):
assert id_elt == id_t_elt
# Check y is unchanged since this is an X transformer
np.testing.assert_allclose(y, y_t)
# Check w is unchanged since this is an X transformer
np.testing.assert_allclose(w, w_t)
# Check X is now holding the proper values when sorted.
sorted = np.sort(X_t, axis=0)
np.testing.assert_allclose(sorted, target)
def test_cdf_1d_y_transformer():
"""Test on a synthetic dataset we sample with 1d y."""
N = 10
n_feat = 5
n_bins = 100
X = np.random.normal(size=(N, n_feat))
y = np.random.normal(size=(N,))
dataset = dc.data.NumpyDataset(X, y)
cdftrans = dc.trans.CDFTransformer(
transform_y=True, dataset=dataset, bins=n_bins)
dataset = cdftrans.transform(dataset)
def test_cdf_y_transformer():
"""Test CDF transformer on Gaussian normal dataset."""
target = np.array(np.transpose(np.linspace(0., 1., 1001)))
target = np.transpose(np.array(np.append([target], [target], axis=0)))
gaussian_dataset = load_gaussian_cdf_data()
bins = 1001
cdf_transformer = dc.trans.CDFTransformer(
transform_y=True, dataset=gaussian_dataset, bins=bins)
X, y, w, ids = (gaussian_dataset.X, gaussian_dataset.y, gaussian_dataset.w,
gaussian_dataset.ids)
gaussian_dataset = cdf_transformer.transform(gaussian_dataset, bins=bins)
X_t, y_t, w_t, ids_t = (gaussian_dataset.X, gaussian_dataset.y,
gaussian_dataset.w, gaussian_dataset.ids)
# Check ids are unchanged.
for id_elt, id_t_elt in zip(ids, ids_t):
assert id_elt == id_t_elt
# Check X is unchanged since this is an y transformer
np.testing.assert_allclose(X, X_t)
# Check w is unchanged since this is an y transformer
np.testing.assert_allclose(w, w_t)
# Check y is now holding the proper values when sorted.
sorted = np.sort(y_t, axis=0)
np.testing.assert_allclose(sorted, target)
# Check that untransform does the right thing.
y_restored = cdf_transformer.untransform(y_t)
assert np.max(y_restored - y) < 1e-5
|
deepchem/deepchem
|
deepchem/trans/tests/test_cdf_transform.py
|
Python
|
mit
| 3,446
|
[
"Gaussian"
] |
92fa8dae05a44c02b7bfcf812b1cd9bc2d6d8d2668546b98346f590cc2136f65
|
# ******************************** #
# Double pole balancing experiment #
# ******************************** #
import math
import random
import cPickle as pickle
from neat import config, population, chromosome, genome, visualize
from cart_pole import CartPole
def evaluate_population(population):
simulation = CartPole(population, markov = False)
# comment this line to print the status
simulation.print_status = False
simulation.run()
if __name__ == "__main__":
config.load('cpExp_config')
# change the number of inputs accordingly to the type
# of experiment: markov (6) or non-markov (3)
# you can also set the configs in dpole_config as long
# as you have two config files for each type of experiment
config.Config.input_nodes = 3
# neuron model type
chromosome.node_gene_type = genome.NodeGene
#chromosome.node_gene_type = genome.CTNodeGene
population.Population.evaluate = evaluate_population
pop = population.Population()
pop.epoch(500, report=1, save_best=0)
winner = pop.stats[0][-1]
# visualize the best topology
#visualize.draw_net(winner) # best chromosome
# Plots the evolution of the best/average fitness
#visualize.plot_stats(pop.stats)
# Visualizes speciation
#visualize.plot_species(pop.species_log)
print 'Number of evaluations: %d' %winner.id
print 'Winner score: %d' %winner.score
from time import strftime
date = strftime("%Y_%m_%d_%Hh%Mm%Ss")
# saves the winner
file = open('winner_'+date, 'w')
pickle.dump(winner, file)
file.close()
#print winner
|
anuragpapineni/Hearthbreaker-evolved-agent
|
neat-python-read-only/examples/pole_balancing/double_pole/cpExp.py
|
Python
|
mit
| 1,609
|
[
"NEURON"
] |
b69831dcedb1c191a1e571f0e28d234941cde7555688bcce0d9d4656415fa2e3
|
#!/usr/bin/env python3
import os
import pathlib
from pathlib import Path
import cloudpickle
from natsort import natsorted
import numpy as np
from pysisyphus.helpers import geom_from_library, geom_from_xyz_file, geoms_from_trj
from pysisyphus.calculators.Turbomole import Turbomole
from pysisyphus.optimizers.ConjugateGradient import ConjugateGradient
from pysisyphus.optimizers.SteepestDescent import SteepestDescent
THIS_DIR = pathlib.Path(os.path.dirname(os.path.realpath(__file__)))
np.set_printoptions(suppress=True, precision=4)
def check():
import re
import matplotlib.pyplot as plt
from natsort import natsorted
import numpy as np
p = Path(".")
all_ens = list()
for log in natsorted(p.glob("calculator*.out")):
with open(log) as handle:
text = handle.read()
ens = [float(e) for e in
re.findall("Total energy:\s*([\d\.\-]+)", text)]
all_ens.append(ens)
arr = np.array(all_ens, dtype=float)
fig, ax = plt.subplots()
for i, row in enumerate(all_ens):
xs = np.full_like(row, i)
ax.plot(xs, row, "+")
plt.show()
def test_butadiene_track_opt():
in_path = THIS_DIR / "butadiene"
geom = geom_from_xyz_file(in_path / "butadiene_hf_sto3g.xyz")
turbo = Turbomole(in_path, track=True, wfo_basis="def2-svp")
geom.set_calculator(turbo)
#fn = "/scratch/programme/pysisyphus/tests/test_turbo_butadien_td_opt/wfo_backup.out"
#with open(fn) as handle:
# stdout = handle.read()
#wfo = turbo.wfow
#a = wfo.parse_wfoverlap_out(stdout)
#print(a)
#print(a.reshape(-1, 6))
opt_kwargs = {
"max_cycles": 10,
"dump": True,
}
opt = ConjugateGradient(geom, **opt_kwargs)
opt = SteepestDescent(geom, **opt_kwargs)
opt.run()
def test_butadiene_twist_track_opt():
in_path = THIS_DIR / "butadiene_twist"
geom = geom_from_xyz_file(in_path / "02_buta_twist.xyz")
turbo = Turbomole(in_path, track=True, wfo_basis="def2-svp")
geom.set_calculator(turbo)
opt_kwargs = {
"max_cycles": 3,
"dump": True,
}
opt = SteepestDescent(geom, **opt_kwargs)
opt.run()
def test_butadiene_twist_track_cc2_opt():
in_path = THIS_DIR / "butadiene_twist_cc2"
geom = geom_from_xyz_file(in_path / "02_buta_twist.xyz")
turbo = Turbomole(in_path, track=True, wfo_basis="def2-svp")
geom.set_calculator(turbo)
opt_kwargs = {
"max_cycles": 3,
"dump": True,
}
opt = SteepestDescent(geom, **opt_kwargs)
opt.run()
def test_wfo_ref():
in_path = THIS_DIR / "butadiene_twist"
geom = geom_from_xyz_file(in_path / "02_buta_twist.xyz")
turbo = Turbomole(in_path, track=True, wfo_basis="def2-svp")
geom.set_calculator(turbo)
opt_kwargs = {
"max_cycles": 2,
"dump": True,
}
opt = SteepestDescent(geom, **opt_kwargs)
opt.run()
#wfo_ref = "wfo.ref"
#with open(wfo_ref) as handle:
# text = handle.read()
#wfow = turbo.wfow
#for key in wfow.matrix_types:
# mat = wfow.parse_wfoverlap_out(text, key)
# print(mat)
def test_wfo_compare():
in_path = THIS_DIR / "butadiene_twist"
geom = geom_from_xyz_file(in_path / "02_buta_twist.xyz")
turbo = Turbomole(in_path, track=True, wfo_basis="def2-svp")
geom.set_calculator(turbo)
opt_kwargs = {
"max_cycles": 1,
"dump": True,
}
opt = SteepestDescent(geom, **opt_kwargs)
opt.run()
wfow = turbo.wfow
wfow.compare(wfow)
def test_wfo_compare_butadiene_cc2_sto3g():
in_path = THIS_DIR / "butadiene_cc2_sto3g"
geom = geom_from_xyz_file(in_path / "buta.xyz")
turbo = Turbomole(in_path, track=True, wfo_basis="sto3g")
geom.set_calculator(turbo)
# opt_kwargs = {
# "max_cycles": 1,
# "dump": True,
# }
# opt = SteepestDescent(geom, **opt_kwargs)
# opt.run()
turbo.run_calculation(geom.atoms, geom.coords)
wfow = turbo.wfow
wfow.compare(wfow)
def test_wfo_compare_butadiene_cc2():
in_path = THIS_DIR / "butadiene_cc2"
geom = geom_from_xyz_file(in_path / "buta.xyz")
turbo = Turbomole(in_path, track=True, wfo_basis="def2-svp")
geom.set_calculator(turbo)
opt_kwargs = {
"max_cycles": 1,
"dump": True,
}
opt = SteepestDescent(geom, **opt_kwargs)
opt.run()
wfow = turbo.wfow
wfow.compare(wfow)
def test_wfo_compare_neon():
in_path = THIS_DIR / "neon"
geom = geom_from_xyz_file(in_path / "neon.xyz")
turbo = Turbomole(in_path, track=True, wfo_basis="def2-svp")
geom.set_calculator(turbo)
opt_kwargs = {
"max_cycles": 1,
"dump": True,
}
opt = SteepestDescent(geom, **opt_kwargs)
opt.run()
wfow = turbo.wfow
wfow.compare(wfow)
def test_wfo_compare_neon_dimer():
in_path = THIS_DIR / "neon_dimer"
geom = geom_from_xyz_file(in_path / "neon_dimer.xyz")
turbo = Turbomole(in_path, track=True, wfo_basis="def2-svp")
geom.set_calculator(turbo)
opt_kwargs = {
"max_cycles": 5,
"dump": True,
"track": True,
# "convergence": {
# "max_force_thresh": 2.5e-8,
# }
}
opt = SteepestDescent(geom, **opt_kwargs)
#import pdb; pdb.set_trace()
opt.run()
# wfow = turbo.wfow
# wfow.compare(wfow)
def test_wfo_compare_sto3g():
in_path = THIS_DIR / "butadiene_twist_sto3g"
geom = geom_from_xyz_file(in_path / "02_buta_twist.xyz")
turbo = Turbomole(in_path, track=True, wfo_basis="sto-3g")
geom.set_calculator(turbo)
opt_kwargs = {
"max_cycles": 1,
"dump": True,
}
opt = SteepestDescent(geom, **opt_kwargs)
opt.run()
wfow = turbo.wfow
wfow.compare(wfow)
def test_diabatize():
geoms = geoms_from_trj(THIS_DIR / "ma_proton_transfer/interpolated.trj")
in_path = THIS_DIR / "ma_turbo"
calc_kwargs = {
"track": True,
"wfo_basis": "sto-3g",
}
turbos = list()
wfos = list()
for i, geom in enumerate(geoms):
pickle_fn = f"wfo_pickle_{i}"
turbo = Turbomole(in_path, calc_number=i, **calc_kwargs)
geom.set_calculator(turbo)
forces = geom.forces
print(f"cycle {i}")
turbos.append(turbo)
wfos.append(turbo.wfow)
with open(pickle_fn, "wb") as handle:
cloudpickle.dump(turbo.wfow, handle)
def test_diabatize():
geoms = geoms_from_trj(THIS_DIR / "ma_proton_transfer/interpolated.trj")[:4]
#in_path = THIS_DIR / "ma_turbo"
in_path = THIS_DIR / "ma_turbo_no_exopt"
calc_kwargs = {
"track": True,
"wfo_basis": "sto-3g",
}
#geoms = geoms_from_trj(THIS_DIR / "biaryl_trj/biaryl_first_13.trj")[:4]
#in_path = THIS_DIR / "biaryl"
#calc_kwargs = {
# "track": True,
# "wfo_basis": "def2-svp",
#}
turbos = list()
wfos = list()
mos = None
np.set_printoptions(precision=2, suppress=True)
# for i, geom in enumerate(geoms):
# pickle_fn = f"wfo_pickle_{i}"
# turbo = Turbomole(in_path, calc_number=i, **calc_kwargs)
# if i > 0:
# turbo.mos = mos
# none_ = turbo.get_tddft(geom.atoms, geom.coords)
# mos = turbo.mos
# print(f"cycle {i}")
# turbos.append(turbo)
# wfos.append(turbo.wfow)
# with open(pickle_fn, "wb") as handle:
# cloudpickle.dump(turbo.wfow, handle)
# if i > 0:
# wfo1 = wfos[i-1]
# inds = wfo1.compare(turbo.wfow)
# print(inds)
mos = natsorted(THIS_DIR.glob("*.mos"))
td_vecs = natsorted(THIS_DIR.glob("*.ciss_a"))
for i, (geom, mos, td_vec) in enumerate(zip(geoms, mos, td_vecs)):
print("cycle", i, geom, mos, td_vec)
turbo = Turbomole(in_path, calc_number=i, **calc_kwargs)
turbo.mos = mos
turbo.td_vec_fn = td_vec
turbo.check_for_root_flip(geom.atoms, geom.coords)
wfos.append(turbo.wfow)
if i > 0:
wfo1 = wfos[i-1]
inds = wfo1.compare(turbo.wfow)
def diabatize_pickled():
wfos = list()
for fn in natsorted(THIS_DIR.glob("wfo_pickle_*")):
print(fn)
with open(fn, "rb") as handle:
wfo = cloudpickle.load(handle)
print(wfo)
wfos.append(wfo)
print("Found {len(wfos)} WFOWrapper pickles.")
diabats = list()
for i, wfo1 in enumerate(wfos[:-1]):
wfo2 = wfos[i+1]
max_ovlp_inds = wfo1.compare(wfo2)
diabats.append(max_ovlp_inds)
"""
dia_arr: shape (no. of images-1, no. of states)
The N-th row contains the roots with maximum overlap between the
N-th and (N+1)-th WFOWrapper objects.
The index i of item j in the k-th row gives the root in WFO1 while
j gives the root with maximum overlap in WFO2.
Transposing the whole array makes this easier to understand. Now
every rows hold the overlaps for one adiabatic state.
"""
dia_arr = np.array(diabats)
print(dia_arr)
print()
print(dia_arr.T)
# def diabatize_pickled():
# fn = "wfo_pickle_0"
# with open(fn, "rb") as handle:
# wfo = cloudpickle.load(handle)
# print(wfo)
# inds = wfo.compare(wfo)
# print(inds)
# fn = "wfo_0.001.out"
# with open(fn) as handle:
# text = handle.read()
# wfo.parse_wfoverlap_out(text)
if __name__ == "__main__":
#test_butadiene_track_opt()
#test_butadiene_twist_track_opt()
#test_butadiene_twist_track_cc2_opt()
#check()
#test_wfo_ref()
#test_wfo_compare()
#print()
# test_wfo_compare_neon()
#test_wfo_compare_neon_dimer()
#test_wfo_compare_butadiene_cc2()
test_wfo_compare_butadiene_cc2_sto3g()
#test_wfo_compare_sto3g()
#test_diabatize()
#diabatize_pickled()
|
eljost/pysisyphus
|
tests_staging/test_turbo_butadien_td_opt/test_turbo_butadien_td_opt.py
|
Python
|
gpl-3.0
| 9,847
|
[
"TURBOMOLE"
] |
57ce17634c223d22db791fd23f3598174f9dc04dfff490b008415a68065bfd79
|
'''
This is the main program loop. This generates the front end and generates all URLS for the user to visit.
'''
# TODO: 2 Generate all food URLS once the database is done
#
import web_scraper
from flask import *
#from flask_bootstrap import Bootstrap
DC_menu=web_scraper.Webget()
DC_menu=DC_menu.format_to_HTML()
app = Flask(__name__)
@app.route("/Haverford/")
def hello_monkey():
return render_template('index.html', title='Home', menu=DC_menu)
@app.route('/<letter>')
def test(letter):
return render_template('index.html', title=c, menu=DC_menu)
'''This is how I will implement a new url for each food. For all foods that are available today, generate a URL for them
Maybe I'll generate for all foods period. I don't know yet. Also, the URL should be /Haverford/food because each
food will be unique to each location'''
for c in 'abcdef':
with app.test_request_context():
url_for('test', letter=c)
if __name__ == "__main__":
app.run(host='127.0.0.1', port=80, debug=True)
|
SalehHindi/DC-Killer
|
web_frontend.py
|
Python
|
gpl-2.0
| 1,006
|
[
"VisIt"
] |
8d4c58c6fca95d8f64a4871c75d2f3f8f20e611dff9a20fbcb6d2c5b933da66a
|
"""Module created to test the library on MNIST digits.
Contains:
* ANN/ DBN network training.
* RBM training and feature exposure.
* CNN network training.
"""
__author__ = "Mihaela Rosca"
__contact__ = "[email protected]"
import argparse
import cPickle as pickle
import matplotlib
import numpy as np
import os
import PCA
import warnings
from sklearn import cross_validation
from lib import convNet
from lib import deepbelief as db
from lib import restrictedBoltzmannMachine as rbm
from lib.common import *
from lib.cnnLayers import *
from lib.activationfunctions import *
from lib.trainingoptions import *
from read import readmnist
import matplotlib
import os
havedisplay = "DISPLAY" in os.environ
if not havedisplay:
exitval = os.system('python -c "import matplotlib.pyplot as plt; plt.figure()"')
havedisplay = (exitval == 0)
if havedisplay:
import matplotlib.pyplot as plt
else:
matplotlib.use('Agg')
import matplotlib.pyplot as plt
parser = argparse.ArgumentParser(description='digit recognition')
parser.add_argument('--save',dest='save',action='store_true', default=False,
help="if true, the network is serialized and saved")
parser.add_argument('--train',dest='train',action='store_true', default=False,
help=("if true, the network is trained from scratch from the"
"training data"))
parser.add_argument('--sparsity', dest='sparsity',action='store_true', default=False,
help=("if true, the the networks are trained with sparsity constraints"))
parser.add_argument('--pca', dest='pca',action='store_true', default=False,
help=("if true, the code for running PCA on the data is run"))
parser.add_argument('--rbm', dest='rbm',action='store_true', default=False,
help=("if true, the code for training an rbm on the data is run"))
parser.add_argument('--rbmGauss', dest='rbmGauss',action='store_true', default=False,
help=("if true, the code for training an rbm on the data is run"))
parser.add_argument('--db', dest='db',action='store_true', default=False,
help=("if true, the code for training a deepbelief net on the"
"data is run"))
parser.add_argument('--save_best_weights', dest='save_best_weights',action='store_true', default=False,
help=("if true, the best weights are used and saved during training."))
parser.add_argument('--dbgauss', dest='dbgauss',action='store_true', default=False,
help=(("if true, a dbn is trained with gaussian visible units for rbms"
"and relu for hidden units")))
parser.add_argument('--nesterov', dest='nesterov',action='store_true', default=False,
help=("if true, the deep belief net is trained using nesterov momentum"))
parser.add_argument('--rbmnesterov', dest='rbmnesterov',action='store_true', default=False,
help=("if true, rbms are trained using nesterov momentum"))
parser.add_argument('--adversarial_training', dest='adversarial_training',action='store_true', default=False,
help=("if true, we use adversarial training"))
parser.add_argument('--rmsprop', dest='rmsprop',action='store_true', default=False,
help=("if true, rmsprop is used when training the deep belief net."))
parser.add_argument('--rbmrmsprop', dest='rbmrmsprop',action='store_true', default=False,
help=("if true, rmsprop is used when training the rbms."))
parser.add_argument('--cvgauss', dest='cvgauss',action='store_true', default=False,
help=("if true, performs cv on the MNIST data with gaussian units"))
parser.add_argument('--cvadv', dest='cvadv',action='store_true', default=False,
help=("if true, performs cv on the MNIST with adversarial training on"))
parser.add_argument('--conv', dest='conv',action='store_true', default=False,
help=("if true, trains a conv neural net on MNIST"))
parser.add_argument('--cv', dest='cv',action='store_true', default=False,
help=("if true, performs cv on the MNIST data"))
parser.add_argument('--display_main', dest='display_main',action='store_true', default=False,
help=("if true saves images of the net weights and samples from the net"))
parser.add_argument('--relu', dest='relu',action='store_true', default=False,
help=("if true, trains the RBM or DBN with a rectified linear unit"))
parser.add_argument('--trainSize', type=int, default=10000,
help='the number of tranining cases to be considered')
parser.add_argument('--testSize', type=int, default=1000,
help='the number of testing cases to be considered')
parser.add_argument('--preTrainEpochs', type=int, default=1,
help='the number of pretraining epochs')
parser.add_argument('--maxEpochs', type=int, default=100,
help='the maximum number of supervised epochs')
parser.add_argument('--miniBatchSize', type=int, default=10,
help='the number of training points in a mini batch')
parser.add_argument('netFile',
help="file where the serialized network should be saved")
parser.add_argument('--validation', dest='validation', action='store_true', default=False,
help="if true, the network is trained using a validation set")
parser.add_argument('--display', dest='display', action='store_true', default=False,
help="if true, figures will be displayed with matplotlib when available"
"Set to false when running the code via ssh, otherwise matplotlib"
"might crash.")
parser.add_argument('--path', dest='path', type=str, default="MNIST",
help="the path to the MNIST files")
# DEBUG mode?
parser.add_argument('--debug', dest='debug',action='store_true', default=False,
help=("if true, the deep belief net is ran in DEBUG mode"))
# Get the arguments of the program
args = parser.parse_args()
# Set the debug mode in the deep belief net
db.DEBUG = args.debug
def rbmMain(reconstructRandom=False):
trainVectors, trainLabels =\
readmnist.read(0, args.trainSize, digits=None, bTrain=True, path=args.path)
testingVectors, testLabels =\
readmnist.read(0, args.testSize, digits=None, bTrain=False, path=args.path)
trainingScaledVectors = trainVectors / 255.0
testingScaledVectors = testingVectors / 255.0
# TODO: the reconstruction for relu still looks weird
if args.relu:
activationFunction = RectifiedNoisy()
learningRate = 5e-05
binary=False
else:
learningRate = 0.3
binary=True
activationFunction = Sigmoid()
# Train the network
if args.train:
# The number of hidden units is taken from a deep learning tutorial
# The data are the values of the images have to be normalized before being
# presented to the network
nrVisible = len(trainingScaledVectors[0])
nrHidden = 500
# use 1 dropout to test the rbm for now
net = rbm.RBM(nrVisible, nrHidden, learningRate, 1, 1,
visibleActivationFunction=activationFunction,
hiddenActivationFunction=activationFunction,
rmsprop=args.rbmrmsprop, nesterov=args.rbmnesterov,
sparsityConstraint=args.sparsity,
sparsityRegularization=0.01,
sparsityTraget=0.01)
net.train(trainingScaledVectors)
t = visualizeWeights(net.weights.T, (28,28), (10,10))
else:
# Take the saved network and use that for reconstructions
f = open(args.netFile, "rb")
t = pickle.load(f)
net = pickle.load(f)
f.close()
# Reconstruct an image and see that it actually looks like a digit
test = testingScaledVectors[0,:]
# get a random image and see it looks like
if reconstructRandom:
test = np.random.random_sample(test.shape)
if args.display:
# Show the initial image first
recon = net.reconstruct(test.reshape(1, test.shape[0]))
plt.imshow(vectorToImage(test, (28,28)), cmap=plt.cm.gray)
plt.axis('off')
plt.savefig('initial7.png', transparent=True)
plt.show()
hidden = net.hiddenRepresentation(test.reshape(1, test.shape[0]))
plt.imshow(vectorToImage(hidden, (25,20)), cmap=plt.cm.gray)
plt.axis('off')
plt.savefig('hiddenfeatures7.png', transparent=True)
# Show the reconstruction
recon = net.reconstruct(test.reshape(1, test.shape[0]))
plt.imshow(vectorToImage(recon, (28,28)), cmap=plt.cm.gray)
plt.axis('off')
plt.savefig('reconstruct7withall.png', transparent=True)
plt.show()
# Show the weights and their form in a tile fashion
# Plot the weights
plt.imshow(t, cmap=plt.cm.gray)
plt.axis('off')
plt.savefig('weights2srmsprop.png', transparent=True)
plt.show()
print "done"
if args.save:
f = open(args.netFile, "wb")
pickle.dump(t, f)
pickle.dump(net, f)
def makeMNISTpic():
f = lambda x: readmnist.read(0, 1000, digits=[x], path=args.path)[0][0].reshape((28,28))
img = np.hstack(map(f, xrange(10)))
print img.shape
plt.imshow(img, cmap=plt.cm.gray)
plt.axis('off')
plt.savefig('MNISTdigits.png', transparent=True)
# plt.show()
def getMissclassifiedDigits():
# First let's display the nets layer weights:
with open(args.netFile, "rb") as f:
dbnNet = pickle.load(f)
testVectors, testLabels =\
readmnist.read(0, args.testSize, digits=None, bTrain=False, path=args.path)
testVectors = testVectors / 255.0
_, predictedLabels = dbnNet.classify(testVectors)
missclassified = []
actualLabels = []
wrongPredictedLabels = []
count = 0
i = 0
while count < 10 and i < args.testSize:
if not predictedLabels[i] == testLabels[i]:
missclassified += [testVectors[i].reshape((28, 28))]
actualLabels += [testLabels[i]]
wrongPredictedLabels += [predictedLabels[i]]
count += 1
i+= 1
print "worked on " + str(i) + "examples before finding 10 wrong"
misspreditctedimg = np.hstack(missclassified)
plt.imshow(misspreditctedimg, cmap=plt.cm.gray)
plt.axis('off')
plt.savefig('misspredictedMNISTdigits.png', transparent=True)
print "predicted"
print wrongPredictedLabels
print "actual"
print actualLabels
def displayWeightsAndDbSample():
# First let's display the nets layer weights:
with open(args.netFile, "rb") as f:
dbnNet = pickle.load(f)
for i in xrange(dbnNet.nrLayers - 1):
w = dbnNet.weights[i]
if i == 0:
t = visualizeWeights(w.T, (28,28), (10,10))
else:
t = visualizeWeights(w.T, (40, 25), (10, 10))
plt.imshow(t, cmap=plt.cm.gray)
plt.axis('off')
plt.savefig('weightslayer' + str(i) +'.png', transparent=True)
# then sample from the net
samples = dbnNet.sample(10)
reshaped = map(lambda x: x.reshape(28, 28), samples)
reshaped = np.hstack(reshaped)
plt.imshow(reshaped, cmap=plt.cm.gray)
plt.axis('off')
plt.savefig('samples.png', transparent=True)
testVectors, testLabels =\
readmnist.read(0, args.testSize, digits=None, bTrain=False, path=args.path)
testVectors = testVectors / 255.0
activationList = dbnNet.getHiddenActivations(testVectors)
activations = activationList[-1]
nice = []
for activation in activationList:
reshaped = map(lambda x: x.reshape(25, 40), activation)
reshaped = np.hstack(reshaped)
nice += [reshaped]
nice = np.vstack(nice)
plt.imshow(activations[0].reshape(25,40), cmap=plt.cm.gray)
plt.axis('off')
plt.savefig('activationsingle.png', transparent=True)
plt.imshow(nice, cmap=plt.cm.gray)
plt.axis('off')
plt.savefig('allactivations.png', transparent=True)
def rbmMainGauss(reconstructRandom=False):
trainVectors, trainLabels =\
readmnist.read(0, args.trainSize, digits=None, bTrain=True, path=args.path)
testVectors, testLabels =\
readmnist.read(0, args.testSize, digits=None, bTrain=False, path=args.path)
trainVectors = np.array(trainVectors, dtype='float')
trainingScaledVectors = scale(trainVectors)
testVectors = np.array(testVectors, dtype='float')
testingScaledVectors = scale(testVectors)
learningRate = 0.0005
# Train the network
if args.train:
# The number of hidden units is taken from a deep learning tutorial
# The data are the values of the images have to be normalized before being
# presented to the network
nrVisible = len(trainingScaledVectors[0])
nrHidden = 500
# use 1 dropout to test the rbm for now
net = rbm.RBM(nrVisible, nrHidden, learningRate, 1, 1,
visibleActivationFunction=Identity(),
hiddenActivationFunction=RectifiedNoisy(),
rmsprop=args.rbmrmsprop, nesterov=args.rbmnesterov)
net.train(trainingScaledVectors)
t = visualizeWeights(net.weights.T, (28,28), (10,10))
else:
# Take the saved network and use that for reconstructions
f = open(args.netFile, "rb")
t = pickle.load(f)
net = pickle.load(f)
f.close()
# Reconstruct an image and see that it actually looks like a digit
test = testingScaledVectors[0,:]
# get a random image and see it looks like
if reconstructRandom:
test = np.random.random_sample(test.shape)
# Show the initial image first
recon = net.reconstruct(test.reshape(1, test.shape[0]))
plt.imshow(vectorToImage(test, (28,28)), cmap=plt.cm.gray)
plt.axis('off')
plt.savefig('initial7relu.png', transparent=True)
# plt.show()
# Show the reconstruction
recon = net.reconstruct(test.reshape(1, test.shape[0]))
plt.imshow(vectorToImage(recon, (28,28)), cmap=plt.cm.gray)
plt.axis('off')
plt.savefig('reconstruct7withallrelu.png', transparent=True)
# plt.show()
# Show the weights and their form in a tile fashion
# Plot the weights
plt.imshow(t, cmap=plt.cm.gray)
plt.axis('off')
plt.savefig('weights2srmsproprelu.png', transparent=True)
# plt.show()
print "done"
if args.save:
f = open(args.netFile, "wb")
pickle.dump(t, f)
pickle.dump(net, f)
def makeNicePlots():
trainVectors, trainLabels =\
readmnist.read(0, args.trainSize, digits=[2], bTrain=True, path=args.path)
testingVectors, testLabels =\
readmnist.read(0, args.testSize, digits=[2], bTrain=False, path=args.path)
trainingScaledVectors = trainVectors / 255.0
testingScaledVectors = testingVectors / 255.0
# TODO: the reconstruction for relu still looks weird
learningRate = 0.1
binary = True
activationFunction = Sigmoid()
# Train the network
if args.train:
# The number of hidden units is taken from a deep learning tutorial
# The data are the values of the images have to be normalized before being
# presented to the network
nrVisible = len(trainingScaledVectors[0])
nrHidden = 500
# use 1 dropout to test the rbm for now
net = rbm.RBM(nrVisible, nrHidden, learningRate, 1, 1,
visibleActivationFunction=activationFunction,
hiddenActivationFunction=activationFunction,
rmsprop=args.rbmrmsprop, nesterov=args.rbmnesterov)
net.train(trainingScaledVectors)
t = visualizeWeights(net.weights.T, (28,28), (10,10))
else:
# Take the saved network and use that for reconstructions
f = open(args.netFile, "rb")
t = pickle.load(f)
net = pickle.load(f)
f.close()
incomingWeightVector = net.weights.T[0]
print " testingVectors[0]"
print testingVectors[0]
testVec = testingScaledVectors[0]
# reshape this vector to be 28, 28
reshapedWeightVector = incomingWeightVector.reshape((28, 28))
print "reshapedWeightVector"
print reshapedWeightVector
reshapedTestVec = testVec.reshape((28, 28))
print "reshapedTestVec"
print reshapedTestVec
overpose = reshapedWeightVector * reshapedTestVec
print "overpose"
print overpose
plt.imshow(np.absolute(reshapedWeightVector), cmap=plt.cm.gray)
plt.axis('off')
plt.savefig('weightvectorreshaped.png', transparent=True)
plt.imshow(np.absolute(reshapedTestVec), cmap=plt.cm.gray)
plt.axis('off')
plt.savefig('reshapedTestVec.png', transparent=True)
plt.imshow(np.absolute(overpose), cmap=plt.cm.gray)
plt.axis('off')
plt.savefig('overpose.png', transparent=True)
def pcaSklearn(training, dimension=700):
pca = PCA(n_components=dimension)
pca.fit(training)
low = pca.transform(training)
same = pca.inverse_transform(low)
print "low[0].shape"
print low[0].shape
image2DInitial = vectorToImage(training[0], (28,28))
print same[0].shape
image2D = vectorToImage(same[0], (28,28))
image2DLow = vectorToImage(low[0], (20,20))
plt.imshow(image2DLow, cmap=plt.cm.gray)
plt.show()
plt.imshow(image2DInitial, cmap=plt.cm.gray)
plt.show()
plt.imshow(image2D, cmap=plt.cm.gray)
plt.show()
print "done"
return low
def pcaOnMnist(training, dimension=700):
mean, principalComponents = PCA.pca(training, dimension)
low, same = PCA.reduce(principalComponents, training, mean, noSame=False)
print "low[0].shape"
print low[0].shape
image2DInitial = vectorToImage(training[0], (28,28))
print same[0].shape
image2D = vectorToImage(same[0], (28,28))
image2DLow = vectorToImage(low[0], (20,20))
plt.imshow(image2DLow, cmap=plt.cm.gray)
plt.show()
plt.imshow(image2DInitial, cmap=plt.cm.gray)
plt.show()
plt.imshow(image2D, cmap=plt.cm.gray)
plt.show()
print "done"
return low
def cvMNIST():
assert not args.relu, "do not run this function for rectified linear units"
training = args.trainSize
data, labels =\
readmnist.read(0, training, bTrain=True, path=args.path)
data, labels = shuffle(data, labels)
scaledData = data / 255.0
vectorLabels = labelsToVectors(labels, 10)
activationFunction = Sigmoid()
bestFold = -1
bestError = np.inf
if args.relu:
# params =[(0.01, 0.01) , (0.01, 0.05), (0.05, 0.1), (0.05, 0.05)]
# params =[(0.0001, 0.01), (0.00001, 0.001), (0.00001, 0.0001), (0.0001, 0.1)]
params =[(1e-05, 0.001, 0.9), (5e-06, 0.001, 0.9), (5e-05, 0.001, 0.9),
(1e-05, 0.001, 0.95), (5e-06, 0.001, 0.95), (5e-05, 0.001, 0.95),
(1e-05, 0.001, 0.99), (5e-06, 0.001, 0.99), (5e-05, 0.001, 0.99)]
else:
# params =[(0.1, 0.1) , (0.1, 0.05), (0.05, 0.1), (0.05, 0.05)]
params =[(0.05, 0.05) , (0.05, 0.075), (0.075, 0.05), (0.075, 0.075)]
# params =[(0.05, 0.075, 0.1), (0.05, 0.1, 0.1), (0.01, 0.05, 0.1),
# (0.05, 0.075, 0.01), (0.05, 0.1, 0.01), (0.01, 0.05, 0.01),
# (0.05, 0.075, 0.001), (0.05, 0.1, 0.001), (0.01, 0.05, 0.001)]
nrFolds = len(params)
kf = cross_validation.KFold(n=training, n_folds=nrFolds)
i = 0
for training, testing in kf:
# Train the net
# Try 1200, 1200, 1200
trainData = scaledData[training]
trainLabels = vectorLabels[training]
# net = db.DBN(5, [784, 1000, 1000, 1000, 10],
net = db.DBN(5, [784, 500, 500, 2000, 10],
binary=1-args.relu,
unsupervisedLearningRate=params[i][0],
supervisedLearningRate=params[i][1],
momentumMax=0.95,
nesterovMomentum=args.nesterov,
rbmNesterovMomentum=args.rbmnesterov,
activationFunction=activationFunction,
rbmActivationFunctionVisible=activationFunction,
rbmActivationFunctionHidden=activationFunction,
rmsprop=args.rmsprop,
save_best_weights=args.save_best_weights,
visibleDropout=0.8,
hiddenDropout=0.5,
weightDecayL1=0,
weightDecayL2=0,
rbmHiddenDropout=1.0,
rbmVisibleDropout=1.0,
miniBatchSize=args.miniBatchSize,
preTrainEpochs=args.preTrainEpochs,
sparsityTragetRbm=0.01,
sparsityConstraintRbm=False,
sparsityRegularizationRbm=None)
net.train(trainData, trainLabels, maxEpochs=args.maxEpochs,
validation=args.validation)
proabilities, predicted = net.classify(scaledData[testing])
testLabels = labels[testing]
# Test it with the testing data and measure the missclassification error
error = getClassificationError(predicted, testLabels)
print "error for " + str(params[i])
print error
if error < bestError:
bestError = error
bestFold = i
i += 1
print "best fold was " + str(bestFold)
print "bestParameter " + str(params[bestFold])
print "bestError" + str(bestError)
# NOT for relu: use GaussianMNIST for that
def deepbeliefMNIST():
assert not args.relu, "do not run this method for rectified linear units"
training = args.trainSize
testing = args.testSize
trainVectors, trainLabels =\
readmnist.read(0, training, bTrain=True, path=args.path)
testVectors, testLabels =\
readmnist.read(0, testing, bTrain=False, path=args.path)
print trainVectors[0].shape
trainVectors, trainLabels = shuffle(trainVectors, trainLabels)
activationFunction = Sigmoid()
trainingScaledVectors = trainVectors / 255.0
testingScaledVectors = testVectors / 255.0
vectorLabels = labelsToVectors(trainLabels, 10)
unsupervisedLearningRate = 0.01
supervisedLearningRate = 0.05
momentumMax = 0.95
if args.train:
net = db.DBN(5, [784, 1000, 1000, 1000, 10],
binary=False,
unsupervisedLearningRate=unsupervisedLearningRate,
supervisedLearningRate=supervisedLearningRate,
momentumMax=momentumMax,
activationFunction=activationFunction,
rbmActivationFunctionVisible=activationFunction,
rbmActivationFunctionHidden=activationFunction,
nesterovMomentum=args.nesterov,
rbmNesterovMomentum=args.rbmnesterov,
rmsprop=args.rmsprop,
hiddenDropout=0.5,
visibleDropout=0.8,
rbmHiddenDropout=1.0,
rbmVisibleDropout=1.0,
save_best_weights=args.save_best_weights,
adversarial_training=args.adversarial_training,
adversarial_coefficient=0.5,
adversarial_epsilon=1.0 / 255,
weightDecayL1=0,
weightDecayL2=0,
preTrainEpochs=args.preTrainEpochs)
net.train(trainingScaledVectors, vectorLabels,
maxEpochs=args.maxEpochs, validation=args.validation)
else:
# Take the saved network and use that for reconstructions
f = open(args.netFile, "rb")
net = pickle.load(f)
f.close()
probs, predicted = net.classify(testingScaledVectors)
correct = 0
errorCases = []
for i in xrange(testing):
print "predicted"
print "probs"
print probs[i]
print predicted[i]
print "actual"
actual = testLabels[i]
print actual
if predicted[i] == actual:
correct += 1
else:
errorCases.append(i)
# Mistakes for digits
# You just need to display some for the report
# trueDigits = testLabels[errorCases]
# predictedDigits = predicted[errorCases]
print "correct"
print correct
# for w in net.weights:
# print w
# for b in net.biases:
# print b
# t = visualizeWeights(net.weights[0].T, trainImages[0].(28, 28), (10,10))
# plt.imshow(t, cmap=plt.cm.gray)
# plt.show()
# print "done"
if args.save:
f = open(args.netFile, "wb")
pickle.dump(net, f)
f.close()
def deepbeliefMNISTGaussian():
training = args.trainSize
testing = args.testSize
trainVectors, trainLabels =\
readmnist.read(0, training, bTrain=True, path=args.path)
testVectors, testLabels =\
readmnist.read(0, testing, bTrain=False, path=args.path)
print trainVectors[0].shape
trainVectors, trainLabels = shuffle(trainVectors, trainLabels)
trainVectors = np.array(trainVectors, dtype='float')
trainingScaledVectors = scale(trainVectors)
testVectors = np.array(testVectors, dtype='float')
testingScaledVectors = scale(testVectors)
vectorLabels = labelsToVectors(trainLabels, 10)
unsupervisedLearningRate = 0.005
supervisedLearningRate = 0.005
momentumMax = 0.97
sparsityTragetRbm = 0.01
sparsityConstraintRbm = False
sparsityRegularizationRbm = 0.005
if args.train:
net = db.DBN(5, [784, 1200, 1200, 1200, 10],
binary=False,
unsupervisedLearningRate=unsupervisedLearningRate,
supervisedLearningRate=supervisedLearningRate,
momentumMax=momentumMax,
activationFunction=Rectified(),
rbmActivationFunctionVisible=Identity(),
rbmActivationFunctionHidden=RectifiedNoisy(),
nesterovMomentum=args.nesterov,
rbmNesterovMomentum=args.rbmnesterov,
save_best_weights=args.save_best_weights,
rmsprop=args.rmsprop,
hiddenDropout=0.5,
visibleDropout=0.8,
rbmHiddenDropout=1.0,
rbmVisibleDropout=1.0,
weightDecayL1=0,
weightDecayL2=0,
sparsityTragetRbm=sparsityTragetRbm,
sparsityConstraintRbm=sparsityConstraintRbm,
sparsityRegularizationRbm=sparsityRegularizationRbm,
preTrainEpochs=args.preTrainEpochs)
net.train(trainingScaledVectors, vectorLabels,
maxEpochs=args.maxEpochs, validation=args.validation)
else:
# Take the saved network and use that for reconstructions
f = open(args.netFile, "rb")
net = pickle.load(f)
f.close()
probs, predicted = net.classify(testingScaledVectors)
print type(predicted)
correct = 0
errorCases = []
for i in xrange(testing):
print "predicted"
print "probs"
print probs[i]
print predicted[i]
print "actual"
actual = testLabels[i]
print actual
if predicted[i] == actual:
correct += 1
else:
errorCases.append(i)
print "correct"
print correct
if args.save:
f = open(args.netFile, "wb")
pickle.dump(net, f)
f.close()
def cvMNISTGaussian():
training = args.trainSize
trainVectors, trainLabels =\
readmnist.read(0, training, bTrain=True, path=args.path)
trainVectors, trainLabels = shuffle(trainVectors, trainLabels)
trainVectors = np.array(trainVectors, dtype='float')
# Ensure the data has zero mean and 1 variance
trainingScaledVectors = scale(trainVectors)
vectorLabels = labelsToVectors(trainLabels, 10)
bestFold = -1
bestError = np.inf
params = [(5e-03, 1e-02), (1e-02, 5e-02), (5e-03, 5e-02), (1e-02, 5e-03), (5e-03, 5e-03), (1e-02, 1e-02) ]
correctness = []
nrFolds = len(params)
kf = cross_validation.KFold(n=training, n_folds=nrFolds)
i = 0
for train, test in kf:
# Train the net
# Try 1200, 1200, 1200
net = db.DBN(5, [784, 1000, 1000, 1000, 10],
binary=False,
unsupervisedLearningRate=params[i][0],
supervisedLearningRate=params[i][1],
momentumMax=0.95,
nesterovMomentum=args.nesterov,
rbmNesterovMomentum=args.rbmnesterov,
activationFunction=Rectified(),
rbmActivationFunctionVisible=Identity(),
rbmActivationFunctionHidden=RectifiedNoisy(),
rmsprop=args.rmsprop,
save_best_weights=args.save_best_weights,
visibleDropout=0.8,
hiddenDropout=0.5,
weightDecayL1=0,
weightDecayL2=0,
rbmHiddenDropout=1.0,
rbmVisibleDropout=1.0,
miniBatchSize=args.miniBatchSize,
preTrainEpochs=args.preTrainEpochs,
sparsityConstraintRbm=False,
sparsityTragetRbm=0.01,
sparsityRegularizationRbm=None)
net.train(trainingScaledVectors[train], vectorLabels[train],
maxEpochs=args.maxEpochs,
validation=args.validation)
proabilities, predicted = net.classify(trainingScaledVectors[test])
# Test it with the testing data and measure the missclassification error
error = getClassificationError(predicted, trainLabels[test])
print "error for " + str(params[i])
print error
correct = 1.0 - error
if error < bestError:
bestError = error
bestFold = i
i += 1
correctness += [correct]
print "best fold was " + str(bestFold)
print "bestParameter " + str(params[bestFold])
print "bestError " + str(bestError)
for i in xrange(len(params)):
print "parameter tuple " + str(params[i]) + " achieved correctness of " + str(correctness[i])
def cvadversarialMNIST():
training = args.trainSize
testing = args.testSize
trainVectors, trainLabels =\
readmnist.read(0, training, bTrain=True, path=args.path)
testVectors, testLabels =\
readmnist.read(0, testing, bTrain=False, path=args.path)
print trainVectors[0].shape
trainVectors, trainLabels = shuffle(trainVectors, trainLabels)
activationFunction = Sigmoid()
trainingScaledVectors = trainVectors / 255.0
vectorLabels = labelsToVectors(trainLabels, 10)
bestFold = -1
bestError = np.inf
params = [(5e-02, 1e-02), (1e-02, 5e-02), (5e-02, 5e-02), (1e-02, 5e-03), (5e-02, 5e-03) ]
correctness = []
nrFolds = len(params)
kf = cross_validation.KFold(n=training, n_folds=nrFolds)
i = 0
for train, test in kf:
print "cv fold", i
print "params", params[i]
net = db.DBN(5, [784, 1500, 1500, 1500, 10],
binary=False,
unsupervisedLearningRate=params[i][0],
supervisedLearningRate=params[i][1],
momentumMax=0.95,
activationFunction=activationFunction,
rbmActivationFunctionVisible=activationFunction,
rbmActivationFunctionHidden=activationFunction,
nesterovMomentum=args.nesterov,
rbmNesterovMomentum=args.rbmnesterov,
rmsprop=args.rmsprop,
save_best_weights=args.save_best_weights,
hiddenDropout=0.5,
visibleDropout=0.8,
rbmHiddenDropout=1.0,
rbmVisibleDropout=1.0,
adversarial_training=args.adversarial_training,
adversarial_coefficient=0.5,
adversarial_epsilon=1.0 / 255,
weightDecayL1=0,
weightDecayL2=0,
preTrainEpochs=args.preTrainEpochs)
net.train(trainingScaledVectors[train], vectorLabels[train],
maxEpochs=args.maxEpochs,
validation=args.validation)
proabilities, predicted = net.classify(trainingScaledVectors[test])
# Test it with the testing data and measure the missclassification error
error = getClassificationError(predicted, trainLabels[test])
print "error for " + str(params[i])
print error
correct = 1.0 - error
if error < bestError:
bestError = error
bestFold = i
i += 1
correctness += [correct]
print "best fold was " + str(bestFold)
print "bestParameter " + str(params[bestFold])
print "bestError " + str(bestError)
for i in xrange(len(params)):
print "parameter tuple " + str(params[i]) + " achieved correctness of " + str(correctness[i])
def adversarialMNIST():
assert not args.relu, "do not run this method for rectified linear units"
training = args.trainSize
testing = args.testSize
trainVectors, trainLabels =\
readmnist.read(0, training, bTrain=True, path=args.path)
testVectors, testLabels =\
readmnist.read(0, testing, bTrain=False, path=args.path)
print trainVectors[0].shape
trainVectors, trainLabels = shuffle(trainVectors, trainLabels)
activationFunction = Sigmoid()
trainingScaledVectors = trainVectors / 255.0
testingScaledVectors = testVectors / 255.0
vectorLabels = labelsToVectors(trainLabels, 10)
unsupervisedLearningRate = 0.01
supervisedLearningRate = 0.05
momentumMax = 0.95
if args.train:
net = db.DBN(5, [784, 1000, 1000, 1000, 10],
binary=False,
unsupervisedLearningRate=unsupervisedLearningRate,
supervisedLearningRate=supervisedLearningRate,
momentumMax=momentumMax,
activationFunction=activationFunction,
rbmActivationFunctionVisible=activationFunction,
rbmActivationFunctionHidden=activationFunction,
nesterovMomentum=args.nesterov,
rbmNesterovMomentum=args.rbmnesterov,
rmsprop=args.rmsprop,
save_best_weights=args.save_best_weights,
hiddenDropout=0.5,
visibleDropout=0.8,
rbmHiddenDropout=1.0,
rbmVisibleDropout=1.0,
adversarial_training=args.adversarial_training,
adversarial_coefficient=0.5,
adversarial_epsilon=1.0 / 255,
weightDecayL1=0,
weightDecayL2=0,
preTrainEpochs=args.preTrainEpochs)
net.train(trainingScaledVectors, vectorLabels,
maxEpochs=args.maxEpochs, validation=args.validation)
else:
# Take the saved network and use that for reconstructions
f = open(args.netFile, "rb")
net = pickle.load(f)
f.close()
probs, predicted = net.classify(testingScaledVectors)
correct = 0
errorCases = []
for i in xrange(testing):
print "predicted"
print "probs"
print probs[i]
print predicted[i]
print "actual"
actual = testLabels[i]
print actual
if predicted[i] == actual:
correct += 1
else:
errorCases.append(i)
print "correct"
print correct
# TODO: fix this (look at the ML coursework for it)
# Even better, use LDA
# think of normalizing them to 0.1 for pca as well
def pcaMain():
training = args.trainSize
testing = args.testSize
train, trainLabels =\
readmnist.read(0, training, bTrain=True, path=args.path)
testVectors, testLabels =\
readmnist.read(0, testing, bTrain=False, path=args.path)
print train[0].shape
pcaSklearn(train, dimension=400)
def convolutionalNNMnist():
training = args.trainSize
testing = args.testSize
# building the layers
layer1 = ConvolutionalLayer(50, (5, 5) , Sigmoid())
layer2 = PoolingLayer((2, 2))
layer3 = ConvolutionalLayer(20, (5, 5), Sigmoid())
layer4 = PoolingLayer((2, 2))
# TODO: add fully connected layer
layer5 = SoftmaxLayer(10)
layers = [layer1, layer2, layer3, layer4, layer5]
net = convNet.ConvolutionalNN(layers, TrainingOptions(10, 1.0, momentumMax=0.9))
trainData, trainLabels =\
readmnist.read(0, training, digits=None, bTrain=True, path="MNIST", returnImages=True)
# transform the labels into vector (one hot encoding)
trainLabels = labelsToVectors(trainLabels, 10)
net.train(trainData, trainLabels, epochs=args.maxEpochs)
testData, testLabels =\
readmnist.read(0, testing, digits=None, bTrain=False, path="MNIST", returnImages=True)
outputData, labels = net.test(testData)
print " "
print "accuracy"
print sum(labels == testLabels) * 1.0 / testing
def main():
import random
print "FIXING RANDOMNESS"
random.seed(6)
np.random.seed(6)
if args.db + args.pca + args.rbm + args.cv + \
args.cvgauss + args.rbmGauss + args.dbgauss + args.display_main + args.conv + args.cvadv != 1:
raise Exception("You have to decide on one main method to run")
# makeNicePlots()
# makeMNISTpic()
if args.db:
deepbeliefMNIST()
if args.pca:
pcaMain()
if args.rbm:
rbmMain()
if args.cv:
cvMNIST()
if args.cvgauss:
cvMNISTGaussian()
if args.rbmGauss:
rbmMainGauss()
if args.dbgauss:
deepbeliefMNISTGaussian()
if args.display_main:
displayWeightsAndDbSample()
if args.conv:
convolutionalNNMnist()
if args.cvadv:
cvadversarialMNIST()
if __name__ == '__main__':
main()
|
mihaelacr/pydeeplearn
|
code/MNISTdigits.py
|
Python
|
bsd-3-clause
| 36,365
|
[
"Gaussian"
] |
4ab9c62d6b03dfdf66d631d94ed792be116d10395a2de2dc69f8c5b74dff4f08
|
""" Definition for WMSHistory Monitoring type.
Drop-in replacement for the Accounting/WMSHistory accounting type.
Filled by the agent "WorkloadManagement/StatesAccountingAgent"
"""
from DIRAC.MonitoringSystem.Client.Types.BaseType import BaseType
__RCSID__ = "$Id$"
class WMSHistory(BaseType):
"""
.. class:: WMSMonitorType
"""
def __init__(self):
"""c'tor
:param self: self reference
"""
super(WMSHistory, self).__init__()
self.keyFields = [
"Status",
"Site",
"User",
"UserGroup",
"JobGroup",
"MinorStatus",
"ApplicationStatus",
"JobSplitType",
]
self.monitoringFields = ["Jobs", "Reschedules"]
self.index = "wmshistory_index"
self.addMapping(
{
"Status": {"type": "keyword"},
"Site": {"type": "keyword"},
"JobSplitType": {"type": "keyword"},
"ApplicationStatus": {"type": "keyword"},
"MinorStatus": {"type": "keyword"},
"User": {"type": "keyword"},
"JobGroup": {"type": "keyword"},
"UserGroup": {"type": "keyword"},
}
)
# {'timestamp': {'type': 'date'}} will be added for all monitoring types
self.dataToKeep = 86400 * 30
self.checkType()
|
ic-hep/DIRAC
|
src/DIRAC/MonitoringSystem/Client/Types/WMSHistory.py
|
Python
|
gpl-3.0
| 1,432
|
[
"DIRAC"
] |
1b491c082acaa5f0fa7cbb8696177b5be4332fef75f20fc59fd085ce037bf4eb
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from collections import defaultdict
from concurrent import futures
from functools import partial, reduce
import json
import numpy as np
import os
import re
import operator
import urllib.parse
import pyarrow as pa
import pyarrow.lib as lib
import pyarrow._parquet as _parquet
from pyarrow._parquet import (ParquetReader, Statistics, # noqa
FileMetaData, RowGroupMetaData,
ColumnChunkMetaData,
ParquetSchema, ColumnSchema)
from pyarrow.fs import (LocalFileSystem, FileSystem,
_resolve_filesystem_and_path, _ensure_filesystem)
from pyarrow import filesystem as legacyfs
from pyarrow.util import guid, _is_path_like, _stringify_path
_URI_STRIP_SCHEMES = ('hdfs',)
def _parse_uri(path):
path = _stringify_path(path)
parsed_uri = urllib.parse.urlparse(path)
if parsed_uri.scheme in _URI_STRIP_SCHEMES:
return parsed_uri.path
else:
# ARROW-4073: On Windows returning the path with the scheme
# stripped removes the drive letter, if any
return path
def _get_filesystem_and_path(passed_filesystem, path):
if passed_filesystem is None:
return legacyfs.resolve_filesystem_and_path(path, passed_filesystem)
else:
passed_filesystem = legacyfs._ensure_filesystem(passed_filesystem)
parsed_path = _parse_uri(path)
return passed_filesystem, parsed_path
def _check_contains_null(val):
if isinstance(val, bytes):
for byte in val:
if isinstance(byte, bytes):
compare_to = chr(0)
else:
compare_to = 0
if byte == compare_to:
return True
elif isinstance(val, str):
return '\x00' in val
return False
def _check_filters(filters, check_null_strings=True):
"""
Check if filters are well-formed.
"""
if filters is not None:
if len(filters) == 0 or any(len(f) == 0 for f in filters):
raise ValueError("Malformed filters")
if isinstance(filters[0][0], str):
# We have encountered the situation where we have one nesting level
# too few:
# We have [(,,), ..] instead of [[(,,), ..]]
filters = [filters]
if check_null_strings:
for conjunction in filters:
for col, op, val in conjunction:
if (
isinstance(val, list) and
all(_check_contains_null(v) for v in val) or
_check_contains_null(val)
):
raise NotImplementedError(
"Null-terminated binary strings are not supported "
"as filter values."
)
return filters
_DNF_filter_doc = """Predicates are expressed in disjunctive normal form (DNF), like
``[[('x', '=', 0), ...], ...]``. DNF allows arbitrary boolean logical
combinations of single column predicates. The innermost tuples each
describe a single column predicate. The list of inner predicates is
interpreted as a conjunction (AND), forming a more selective and
multiple column predicate. Finally, the most outer list combines these
filters as a disjunction (OR).
Predicates may also be passed as List[Tuple]. This form is interpreted
as a single conjunction. To express OR in predicates, one must
use the (preferred) List[List[Tuple]] notation."""
def _filters_to_expression(filters):
"""
Check if filters are well-formed.
See _DNF_filter_doc above for more details.
"""
import pyarrow.dataset as ds
if isinstance(filters, ds.Expression):
return filters
filters = _check_filters(filters, check_null_strings=False)
def convert_single_predicate(col, op, val):
field = ds.field(col)
if op == "=" or op == "==":
return field == val
elif op == "!=":
return field != val
elif op == '<':
return field < val
elif op == '>':
return field > val
elif op == '<=':
return field <= val
elif op == '>=':
return field >= val
elif op == 'in':
return field.isin(val)
elif op == 'not in':
return ~field.isin(val)
else:
raise ValueError(
'"{0}" is not a valid operator in predicates.'.format(
(col, op, val)))
disjunction_members = []
for conjunction in filters:
conjunction_members = [
convert_single_predicate(col, op, val)
for col, op, val in conjunction
]
disjunction_members.append(reduce(operator.and_, conjunction_members))
return reduce(operator.or_, disjunction_members)
# ----------------------------------------------------------------------
# Reading a single Parquet file
class ParquetFile:
"""
Reader interface for a single Parquet file.
Parameters
----------
source : str, pathlib.Path, pyarrow.NativeFile, or file-like object
Readable source. For passing bytes or buffer-like file containing a
Parquet file, use pyarrow.BufferReader.
metadata : FileMetaData, default None
Use existing metadata object, rather than reading from file.
common_metadata : FileMetaData, default None
Will be used in reads for pandas schema metadata if not found in the
main file's metadata, no other uses at the moment.
memory_map : bool, default False
If the source is a file path, use a memory map to read file, which can
improve performance in some environments.
buffer_size : int, default 0
If positive, perform read buffering when deserializing individual
column chunks. Otherwise IO calls are unbuffered.
"""
def __init__(self, source, metadata=None, common_metadata=None,
read_dictionary=None, memory_map=False, buffer_size=0):
self.reader = ParquetReader()
self.reader.open(source, use_memory_map=memory_map,
buffer_size=buffer_size,
read_dictionary=read_dictionary, metadata=metadata)
self.common_metadata = common_metadata
self._nested_paths_by_prefix = self._build_nested_paths()
def _build_nested_paths(self):
paths = self.reader.column_paths
result = defaultdict(list)
for i, path in enumerate(paths):
key = path[0]
rest = path[1:]
while True:
result[key].append(i)
if not rest:
break
key = '.'.join((key, rest[0]))
rest = rest[1:]
return result
@property
def metadata(self):
return self.reader.metadata
@property
def schema(self):
"""
Return the Parquet schema, unconverted to Arrow types
"""
return self.metadata.schema
@property
def schema_arrow(self):
"""
Return the inferred Arrow schema, converted from the whole Parquet
file's schema
"""
return self.reader.schema_arrow
@property
def num_row_groups(self):
return self.reader.num_row_groups
def read_row_group(self, i, columns=None, use_threads=True,
use_pandas_metadata=False):
"""
Read a single row group from a Parquet file.
Parameters
----------
columns: list
If not None, only these columns will be read from the row group. A
column name may be a prefix of a nested field, e.g. 'a' will select
'a.b', 'a.c', and 'a.d.e'.
use_threads : bool, default True
Perform multi-threaded column reads.
use_pandas_metadata : bool, default False
If True and file has custom pandas schema metadata, ensure that
index columns are also loaded.
Returns
-------
pyarrow.table.Table
Content of the row group as a table (of columns)
"""
column_indices = self._get_column_indices(
columns, use_pandas_metadata=use_pandas_metadata)
return self.reader.read_row_group(i, column_indices=column_indices,
use_threads=use_threads)
def read_row_groups(self, row_groups, columns=None, use_threads=True,
use_pandas_metadata=False):
"""
Read a multiple row groups from a Parquet file.
Parameters
----------
row_groups: list
Only these row groups will be read from the file.
columns: list
If not None, only these columns will be read from the row group. A
column name may be a prefix of a nested field, e.g. 'a' will select
'a.b', 'a.c', and 'a.d.e'.
use_threads : bool, default True
Perform multi-threaded column reads.
use_pandas_metadata : bool, default False
If True and file has custom pandas schema metadata, ensure that
index columns are also loaded.
Returns
-------
pyarrow.table.Table
Content of the row groups as a table (of columns).
"""
column_indices = self._get_column_indices(
columns, use_pandas_metadata=use_pandas_metadata)
return self.reader.read_row_groups(row_groups,
column_indices=column_indices,
use_threads=use_threads)
def read(self, columns=None, use_threads=True, use_pandas_metadata=False):
"""
Read a Table from Parquet format,
Parameters
----------
columns: list
If not None, only these columns will be read from the file. A
column name may be a prefix of a nested field, e.g. 'a' will select
'a.b', 'a.c', and 'a.d.e'.
use_threads : bool, default True
Perform multi-threaded column reads.
use_pandas_metadata : bool, default False
If True and file has custom pandas schema metadata, ensure that
index columns are also loaded.
Returns
-------
pyarrow.table.Table
Content of the file as a table (of columns).
"""
column_indices = self._get_column_indices(
columns, use_pandas_metadata=use_pandas_metadata)
return self.reader.read_all(column_indices=column_indices,
use_threads=use_threads)
def scan_contents(self, columns=None, batch_size=65536):
"""
Read contents of file for the given columns and batch size.
Notes
-----
This function's primary purpose is benchmarking.
The scan is executed on a single thread.
Parameters
----------
columns : list of integers, default None
Select columns to read, if None scan all columns.
batch_size : int, default 64K
Number of rows to read at a time internally.
Returns
-------
num_rows : number of rows in file
"""
column_indices = self._get_column_indices(columns)
return self.reader.scan_contents(column_indices,
batch_size=batch_size)
def _get_column_indices(self, column_names, use_pandas_metadata=False):
if column_names is None:
return None
indices = []
for name in column_names:
if name in self._nested_paths_by_prefix:
indices.extend(self._nested_paths_by_prefix[name])
if use_pandas_metadata:
file_keyvalues = self.metadata.metadata
common_keyvalues = (self.common_metadata.metadata
if self.common_metadata is not None
else None)
if file_keyvalues and b'pandas' in file_keyvalues:
index_columns = _get_pandas_index_columns(file_keyvalues)
elif common_keyvalues and b'pandas' in common_keyvalues:
index_columns = _get_pandas_index_columns(common_keyvalues)
else:
index_columns = []
if indices is not None and index_columns:
indices += [self.reader.column_name_idx(descr)
for descr in index_columns
if not isinstance(descr, dict)]
return indices
_SPARK_DISALLOWED_CHARS = re.compile('[ ,;{}()\n\t=]')
def _sanitized_spark_field_name(name):
return _SPARK_DISALLOWED_CHARS.sub('_', name)
def _sanitize_schema(schema, flavor):
if 'spark' in flavor:
sanitized_fields = []
schema_changed = False
for field in schema:
name = field.name
sanitized_name = _sanitized_spark_field_name(name)
if sanitized_name != name:
schema_changed = True
sanitized_field = pa.field(sanitized_name, field.type,
field.nullable, field.metadata)
sanitized_fields.append(sanitized_field)
else:
sanitized_fields.append(field)
new_schema = pa.schema(sanitized_fields, metadata=schema.metadata)
return new_schema, schema_changed
else:
return schema, False
def _sanitize_table(table, new_schema, flavor):
# TODO: This will not handle prohibited characters in nested field names
if 'spark' in flavor:
column_data = [table[i] for i in range(table.num_columns)]
return pa.Table.from_arrays(column_data, schema=new_schema)
else:
return table
_parquet_writer_arg_docs = """version : {"1.0", "2.0"}, default "1.0"
Determine which Parquet logical types are available for use, whether the
reduced set from the Parquet 1.x.x format or the expanded logical types
added in format version 2.0.0 and after. Note that files written with
version='2.0' may not be readable in all Parquet implementations, so
version='1.0' is likely the choice that maximizes file compatibility. Some
features, such as lossless storage of nanosecond timestamps as INT64
physical storage, are only available with version='2.0'. The Parquet 2.0.0
format version also introduced a new serialized data page format; this can
be enabled separately using the data_page_version option.
use_dictionary : bool or list
Specify if we should use dictionary encoding in general or only for
some columns.
use_deprecated_int96_timestamps : bool, default None
Write timestamps to INT96 Parquet format. Defaults to False unless enabled
by flavor argument. This take priority over the coerce_timestamps option.
coerce_timestamps : str, default None
Cast timestamps a particular resolution. The defaults depends on `version`.
For ``version='1.0'`` (the default), nanoseconds will be cast to
microseconds ('us'), and seconds to milliseconds ('ms') by default. For
``version='2.0'``, the original resolution is preserved and no casting
is done by default. The casting might result in loss of data, in which
case ``allow_truncated_timestamps=True`` can be used to suppress the
raised exception.
Valid values: {None, 'ms', 'us'}
data_page_size : int, default None
Set a target threshold for the approximate encoded size of data
pages within a column chunk (in bytes). If None, use the default data page
size of 1MByte.
allow_truncated_timestamps : bool, default False
Allow loss of data when coercing timestamps to a particular
resolution. E.g. if microsecond or nanosecond data is lost when coercing to
'ms', do not raise an exception.
compression : str or dict
Specify the compression codec, either on a general basis or per-column.
Valid values: {'NONE', 'SNAPPY', 'GZIP', 'BROTLI', 'LZ4', 'ZSTD'}.
write_statistics : bool or list
Specify if we should write statistics in general (default is True) or only
for some columns.
flavor : {'spark'}, default None
Sanitize schema or set other compatibility options to work with
various target systems.
filesystem : FileSystem, default None
If nothing passed, will be inferred from `where` if path-like, else
`where` is already a file-like object so no filesystem is needed.
compression_level: int or dict, default None
Specify the compression level for a codec, either on a general basis or
per-column. If None is passed, arrow selects the compression level for
the compression codec in use. The compression level has a different
meaning for each codec, so you have to read the documentation of the
codec you are using.
An exception is thrown if the compression codec does not allow specifying
a compression level.
use_byte_stream_split: bool or list, default False
Specify if the byte_stream_split encoding should be used in general or
only for some columns. If both dictionary and byte_stream_stream are
enabled, then dictionary is preferred.
The byte_stream_split encoding is valid only for floating-point data types
and should be combined with a compression codec.
data_page_version : {"1.0", "2.0"}, default "1.0"
The serialized Parquet data page format version to write, defaults to
1.0. This does not impact the file schema logical types and Arrow to
Parquet type casting behavior; for that use the "version" option.
"""
class ParquetWriter:
__doc__ = """
Class for incrementally building a Parquet file for Arrow tables.
Parameters
----------
where : path or file-like object
schema : arrow Schema
{}
**options : dict
If options contains a key `metadata_collector` then the
corresponding value is assumed to be a list (or any object with
`.append` method) that will be filled with the file metadata instance
of the written file.
""".format(_parquet_writer_arg_docs)
def __init__(self, where, schema, filesystem=None,
flavor=None,
version='1.0',
use_dictionary=True,
compression='snappy',
write_statistics=True,
use_deprecated_int96_timestamps=None,
compression_level=None,
use_byte_stream_split=False,
writer_engine_version=None,
data_page_version='1.0',
**options):
if use_deprecated_int96_timestamps is None:
# Use int96 timestamps for Spark
if flavor is not None and 'spark' in flavor:
use_deprecated_int96_timestamps = True
else:
use_deprecated_int96_timestamps = False
self.flavor = flavor
if flavor is not None:
schema, self.schema_changed = _sanitize_schema(schema, flavor)
else:
self.schema_changed = False
self.schema = schema
self.where = where
# If we open a file using a filesystem, store file handle so we can be
# sure to close it when `self.close` is called.
self.file_handle = None
filesystem, path = _resolve_filesystem_and_path(
where, filesystem, allow_legacy_filesystem=True
)
if filesystem is not None:
if isinstance(filesystem, legacyfs.FileSystem):
# legacy filesystem (eg custom subclass)
# TODO deprecate
sink = self.file_handle = filesystem.open(path, 'wb')
else:
# ARROW-10480: do not auto-detect compression. While
# a filename like foo.parquet.gz is nonconforming, it
# shouldn't implicitly apply compression.
sink = self.file_handle = filesystem.open_output_stream(
path, compression=None)
else:
sink = where
self._metadata_collector = options.pop('metadata_collector', None)
engine_version = 'V2'
self.writer = _parquet.ParquetWriter(
sink, schema,
version=version,
compression=compression,
use_dictionary=use_dictionary,
write_statistics=write_statistics,
use_deprecated_int96_timestamps=use_deprecated_int96_timestamps,
compression_level=compression_level,
use_byte_stream_split=use_byte_stream_split,
writer_engine_version=engine_version,
data_page_version=data_page_version,
**options)
self.is_open = True
def __del__(self):
if getattr(self, 'is_open', False):
self.close()
def __enter__(self):
return self
def __exit__(self, *args, **kwargs):
self.close()
# return false since we want to propagate exceptions
return False
def write_table(self, table, row_group_size=None):
if self.schema_changed:
table = _sanitize_table(table, self.schema, self.flavor)
assert self.is_open
if not table.schema.equals(self.schema, check_metadata=False):
msg = ('Table schema does not match schema used to create file: '
'\ntable:\n{!s} vs. \nfile:\n{!s}'
.format(table.schema, self.schema))
raise ValueError(msg)
self.writer.write_table(table, row_group_size=row_group_size)
def close(self):
if self.is_open:
self.writer.close()
self.is_open = False
if self._metadata_collector is not None:
self._metadata_collector.append(self.writer.metadata)
if self.file_handle is not None:
self.file_handle.close()
def _get_pandas_index_columns(keyvalues):
return (json.loads(keyvalues[b'pandas'].decode('utf8'))
['index_columns'])
# ----------------------------------------------------------------------
# Metadata container providing instructions about reading a single Parquet
# file, possibly part of a partitioned dataset
class ParquetDatasetPiece:
"""
A single chunk of a potentially larger Parquet dataset to read.
The arguments will indicate to read either a single row group or all row
groups, and whether to add partition keys to the resulting pyarrow.Table.
Parameters
----------
path : str or pathlib.Path
Path to file in the file system where this piece is located.
open_file_func : callable
Function to use for obtaining file handle to dataset piece.
partition_keys : list of tuples
Two-element tuples of ``(column name, ordinal index)``.
row_group : int, default None
Row group to load. By default, reads all row groups.
"""
def __init__(self, path, open_file_func=partial(open, mode='rb'),
file_options=None, row_group=None, partition_keys=None):
self.path = _stringify_path(path)
self.open_file_func = open_file_func
self.row_group = row_group
self.partition_keys = partition_keys or []
self.file_options = file_options or {}
def __eq__(self, other):
if not isinstance(other, ParquetDatasetPiece):
return False
return (self.path == other.path and
self.row_group == other.row_group and
self.partition_keys == other.partition_keys)
def __repr__(self):
return ('{}({!r}, row_group={!r}, partition_keys={!r})'
.format(type(self).__name__, self.path,
self.row_group,
self.partition_keys))
def __str__(self):
result = ''
if len(self.partition_keys) > 0:
partition_str = ', '.join('{}={}'.format(name, index)
for name, index in self.partition_keys)
result += 'partition[{}] '.format(partition_str)
result += self.path
if self.row_group is not None:
result += ' | row_group={}'.format(self.row_group)
return result
def get_metadata(self):
"""
Return the file's metadata.
Returns
-------
metadata : FileMetaData
"""
f = self.open()
return f.metadata
def open(self):
"""
Return instance of ParquetFile.
"""
reader = self.open_file_func(self.path)
if not isinstance(reader, ParquetFile):
reader = ParquetFile(reader, **self.file_options)
return reader
def read(self, columns=None, use_threads=True, partitions=None,
file=None, use_pandas_metadata=False):
"""
Read this piece as a pyarrow.Table.
Parameters
----------
columns : list of column names, default None
use_threads : bool, default True
Perform multi-threaded column reads.
partitions : ParquetPartitions, default None
file : file-like object
Passed to ParquetFile.
Returns
-------
table : pyarrow.Table
"""
if self.open_file_func is not None:
reader = self.open()
elif file is not None:
reader = ParquetFile(file, **self.file_options)
else:
# try to read the local path
reader = ParquetFile(self.path, **self.file_options)
options = dict(columns=columns,
use_threads=use_threads,
use_pandas_metadata=use_pandas_metadata)
if self.row_group is not None:
table = reader.read_row_group(self.row_group, **options)
else:
table = reader.read(**options)
if len(self.partition_keys) > 0:
if partitions is None:
raise ValueError('Must pass partition sets')
# Here, the index is the categorical code of the partition where
# this piece is located. Suppose we had
#
# /foo=a/0.parq
# /foo=b/0.parq
# /foo=c/0.parq
#
# Then we assign a=0, b=1, c=2. And the resulting Table pieces will
# have a DictionaryArray column named foo having the constant index
# value as indicated. The distinct categories of the partition have
# been computed in the ParquetManifest
for i, (name, index) in enumerate(self.partition_keys):
# The partition code is the same for all values in this piece
indices = np.full(len(table), index, dtype='i4')
# This is set of all partition values, computed as part of the
# manifest, so ['a', 'b', 'c'] as in our example above.
dictionary = partitions.levels[i].dictionary
arr = pa.DictionaryArray.from_arrays(indices, dictionary)
table = table.append_column(name, arr)
return table
class PartitionSet:
"""
A data structure for cataloguing the observed Parquet partitions at a
particular level. So if we have
/foo=a/bar=0
/foo=a/bar=1
/foo=a/bar=2
/foo=b/bar=0
/foo=b/bar=1
/foo=b/bar=2
Then we have two partition sets, one for foo, another for bar. As we visit
levels of the partition hierarchy, a PartitionSet tracks the distinct
values and assigns categorical codes to use when reading the pieces
"""
def __init__(self, name, keys=None):
self.name = name
self.keys = keys or []
self.key_indices = {k: i for i, k in enumerate(self.keys)}
self._dictionary = None
def get_index(self, key):
"""
Get the index of the partition value if it is known, otherwise assign
one
"""
if key in self.key_indices:
return self.key_indices[key]
else:
index = len(self.key_indices)
self.keys.append(key)
self.key_indices[key] = index
return index
@property
def dictionary(self):
if self._dictionary is not None:
return self._dictionary
if len(self.keys) == 0:
raise ValueError('No known partition keys')
# Only integer and string partition types are supported right now
try:
integer_keys = [int(x) for x in self.keys]
dictionary = lib.array(integer_keys)
except ValueError:
dictionary = lib.array(self.keys)
self._dictionary = dictionary
return dictionary
@property
def is_sorted(self):
return list(self.keys) == sorted(self.keys)
class ParquetPartitions:
def __init__(self):
self.levels = []
self.partition_names = set()
def __len__(self):
return len(self.levels)
def __getitem__(self, i):
return self.levels[i]
def equals(self, other):
if not isinstance(other, ParquetPartitions):
raise TypeError('`other` must be an instance of ParquetPartitions')
return (self.levels == other.levels and
self.partition_names == other.partition_names)
def __eq__(self, other):
try:
return self.equals(other)
except TypeError:
return NotImplemented
def get_index(self, level, name, key):
"""
Record a partition value at a particular level, returning the distinct
code for that value at that level.
Example:
partitions.get_index(1, 'foo', 'a') returns 0
partitions.get_index(1, 'foo', 'b') returns 1
partitions.get_index(1, 'foo', 'c') returns 2
partitions.get_index(1, 'foo', 'a') returns 0
Parameters
----------
level : int
The nesting level of the partition we are observing
name : str
The partition name
key : str or int
The partition value
"""
if level == len(self.levels):
if name in self.partition_names:
raise ValueError('{} was the name of the partition in '
'another level'.format(name))
part_set = PartitionSet(name)
self.levels.append(part_set)
self.partition_names.add(name)
return self.levels[level].get_index(key)
def filter_accepts_partition(self, part_key, filter, level):
p_column, p_value_index = part_key
f_column, op, f_value = filter
if p_column != f_column:
return True
f_type = type(f_value)
if isinstance(f_value, set):
if not f_value:
raise ValueError("Cannot use empty set as filter value")
if op not in {'in', 'not in'}:
raise ValueError("Op '%s' not supported with set value",
op)
if len({type(item) for item in f_value}) != 1:
raise ValueError("All elements of set '%s' must be of"
" same type", f_value)
f_type = type(next(iter(f_value)))
p_value = f_type(self.levels[level]
.dictionary[p_value_index].as_py())
if op == "=" or op == "==":
return p_value == f_value
elif op == "!=":
return p_value != f_value
elif op == '<':
return p_value < f_value
elif op == '>':
return p_value > f_value
elif op == '<=':
return p_value <= f_value
elif op == '>=':
return p_value >= f_value
elif op == 'in':
return p_value in f_value
elif op == 'not in':
return p_value not in f_value
else:
raise ValueError("'%s' is not a valid operator in predicates.",
filter[1])
class ParquetManifest:
def __init__(self, dirpath, open_file_func=None, filesystem=None,
pathsep='/', partition_scheme='hive', metadata_nthreads=1):
filesystem, dirpath = _get_filesystem_and_path(filesystem, dirpath)
self.filesystem = filesystem
self.open_file_func = open_file_func
self.pathsep = pathsep
self.dirpath = _stringify_path(dirpath)
self.partition_scheme = partition_scheme
self.partitions = ParquetPartitions()
self.pieces = []
self._metadata_nthreads = metadata_nthreads
self._thread_pool = futures.ThreadPoolExecutor(
max_workers=metadata_nthreads)
self.common_metadata_path = None
self.metadata_path = None
self._visit_level(0, self.dirpath, [])
# Due to concurrency, pieces will potentially by out of order if the
# dataset is partitioned so we sort them to yield stable results
self.pieces.sort(key=lambda piece: piece.path)
if self.common_metadata_path is None:
# _common_metadata is a subset of _metadata
self.common_metadata_path = self.metadata_path
self._thread_pool.shutdown()
def _visit_level(self, level, base_path, part_keys):
fs = self.filesystem
_, directories, files = next(fs.walk(base_path))
filtered_files = []
for path in files:
full_path = self.pathsep.join((base_path, path))
if path.endswith('_common_metadata'):
self.common_metadata_path = full_path
elif path.endswith('_metadata'):
self.metadata_path = full_path
elif self._should_silently_exclude(path):
continue
else:
filtered_files.append(full_path)
# ARROW-1079: Filter out "private" directories starting with underscore
filtered_directories = [self.pathsep.join((base_path, x))
for x in directories
if not _is_private_directory(x)]
filtered_files.sort()
filtered_directories.sort()
if len(filtered_files) > 0 and len(filtered_directories) > 0:
raise ValueError('Found files in an intermediate '
'directory: {}'.format(base_path))
elif len(filtered_directories) > 0:
self._visit_directories(level, filtered_directories, part_keys)
else:
self._push_pieces(filtered_files, part_keys)
def _should_silently_exclude(self, file_name):
return (file_name.endswith('.crc') or # Checksums
file_name.endswith('_$folder$') or # HDFS directories in S3
file_name.startswith('.') or # Hidden files starting with .
file_name.startswith('_') or # Hidden files starting with _
file_name in EXCLUDED_PARQUET_PATHS)
def _visit_directories(self, level, directories, part_keys):
futures_list = []
for path in directories:
head, tail = _path_split(path, self.pathsep)
name, key = _parse_hive_partition(tail)
index = self.partitions.get_index(level, name, key)
dir_part_keys = part_keys + [(name, index)]
# If you have less threads than levels, the wait call will block
# indefinitely due to multiple waits within a thread.
if level < self._metadata_nthreads:
future = self._thread_pool.submit(self._visit_level,
level + 1,
path,
dir_part_keys)
futures_list.append(future)
else:
self._visit_level(level + 1, path, dir_part_keys)
if futures_list:
futures.wait(futures_list)
def _parse_partition(self, dirname):
if self.partition_scheme == 'hive':
return _parse_hive_partition(dirname)
else:
raise NotImplementedError('partition schema: {}'
.format(self.partition_scheme))
def _push_pieces(self, files, part_keys):
self.pieces.extend([
ParquetDatasetPiece(path, partition_keys=part_keys,
open_file_func=self.open_file_func)
for path in files
])
def _parse_hive_partition(value):
if '=' not in value:
raise ValueError('Directory name did not appear to be a '
'partition: {}'.format(value))
return value.split('=', 1)
def _is_private_directory(x):
_, tail = os.path.split(x)
return (tail.startswith('_') or tail.startswith('.')) and '=' not in tail
def _path_split(path, sep):
i = path.rfind(sep) + 1
head, tail = path[:i], path[i:]
head = head.rstrip(sep)
return head, tail
EXCLUDED_PARQUET_PATHS = {'_SUCCESS'}
class _ParquetDatasetMetadata:
__slots__ = ('fs', 'memory_map', 'read_dictionary', 'common_metadata',
'buffer_size')
def _open_dataset_file(dataset, path, meta=None):
if (dataset.fs is not None and
not isinstance(dataset.fs, legacyfs.LocalFileSystem)):
path = dataset.fs.open(path, mode='rb')
return ParquetFile(
path,
metadata=meta,
memory_map=dataset.memory_map,
read_dictionary=dataset.read_dictionary,
common_metadata=dataset.common_metadata,
buffer_size=dataset.buffer_size
)
_read_docstring_common = """\
read_dictionary : list, default None
List of names or column paths (for nested types) to read directly
as DictionaryArray. Only supported for BYTE_ARRAY storage. To read
a flat column as dictionary-encoded pass the column name. For
nested types, you must pass the full column "path", which could be
something like level1.level2.list.item. Refer to the Parquet
file's schema to obtain the paths.
memory_map : bool, default False
If the source is a file path, use a memory map to read file, which can
improve performance in some environments.
buffer_size : int, default 0
If positive, perform read buffering when deserializing individual
column chunks. Otherwise IO calls are unbuffered.
partitioning : Partitioning or str or list of str, default "hive"
The partitioning scheme for a partitioned dataset. The default of "hive"
assumes directory names with key=value pairs like "/year=2009/month=11".
In addition, a scheme like "/2009/11" is also supported, in which case
you need to specify the field names or a full schema. See the
``pyarrow.dataset.partitioning()`` function for more details."""
class ParquetDataset:
__doc__ = """
Encapsulates details of reading a complete Parquet dataset possibly
consisting of multiple files and partitions in subdirectories.
Parameters
----------
path_or_paths : str or List[str]
A directory name, single file name, or list of file names.
filesystem : FileSystem, default None
If nothing passed, paths assumed to be found in the local on-disk
filesystem.
metadata : pyarrow.parquet.FileMetaData
Use metadata obtained elsewhere to validate file schemas.
schema : pyarrow.parquet.Schema
Use schema obtained elsewhere to validate file schemas. Alternative to
metadata parameter.
split_row_groups : bool, default False
Divide files into pieces for each row group in the file.
validate_schema : bool, default True
Check that individual file schemas are all the same / compatible.
filters : List[Tuple] or List[List[Tuple]] or None (default)
Rows which do not match the filter predicate will be removed from scanned
data. Partition keys embedded in a nested directory structure will be
exploited to avoid loading files at all if they contain no matching rows.
If `use_legacy_dataset` is True, filters can only reference partition
keys and only a hive-style directory structure is supported. When
setting `use_legacy_dataset` to False, also within-file level filtering
and different partitioning schemes are supported.
{1}
metadata_nthreads: int, default 1
How many threads to allow the thread pool which is used to read the
dataset metadata. Increasing this is helpful to read partitioned
datasets.
{0}
use_legacy_dataset : bool, default True
Set to False to enable the new code path (experimental, using the
new Arrow Dataset API). Among other things, this allows to pass
`filters` for all columns and not only the partition keys, enables
different partitioning schemes, etc.
""".format(_read_docstring_common, _DNF_filter_doc)
def __new__(cls, path_or_paths=None, filesystem=None, schema=None,
metadata=None, split_row_groups=False, validate_schema=True,
filters=None, metadata_nthreads=1, read_dictionary=None,
memory_map=False, buffer_size=0, partitioning="hive",
use_legacy_dataset=None):
if use_legacy_dataset is None:
# if a new filesystem is passed -> default to new implementation
if isinstance(filesystem, FileSystem):
use_legacy_dataset = False
# otherwise the default is still True
else:
use_legacy_dataset = True
if not use_legacy_dataset:
return _ParquetDatasetV2(path_or_paths, filesystem=filesystem,
filters=filters,
partitioning=partitioning,
read_dictionary=read_dictionary,
memory_map=memory_map,
buffer_size=buffer_size,
# unsupported keywords
schema=schema, metadata=metadata,
split_row_groups=split_row_groups,
validate_schema=validate_schema,
metadata_nthreads=metadata_nthreads)
self = object.__new__(cls)
return self
def __init__(self, path_or_paths, filesystem=None, schema=None,
metadata=None, split_row_groups=False, validate_schema=True,
filters=None, metadata_nthreads=1, read_dictionary=None,
memory_map=False, buffer_size=0, partitioning="hive",
use_legacy_dataset=True):
if partitioning != "hive":
raise ValueError(
'Only "hive" for hive-like partitioning is supported when '
'using use_legacy_dataset=True')
self._metadata = _ParquetDatasetMetadata()
a_path = path_or_paths
if isinstance(a_path, list):
a_path = a_path[0]
self._metadata.fs, _ = _get_filesystem_and_path(filesystem, a_path)
if isinstance(path_or_paths, list):
self.paths = [_parse_uri(path) for path in path_or_paths]
else:
self.paths = _parse_uri(path_or_paths)
self._metadata.read_dictionary = read_dictionary
self._metadata.memory_map = memory_map
self._metadata.buffer_size = buffer_size
(self.pieces,
self.partitions,
self.common_metadata_path,
self.metadata_path) = _make_manifest(
path_or_paths, self.fs, metadata_nthreads=metadata_nthreads,
open_file_func=partial(_open_dataset_file, self._metadata)
)
if self.common_metadata_path is not None:
with self.fs.open(self.common_metadata_path) as f:
self._metadata.common_metadata = read_metadata(
f,
memory_map=memory_map
)
else:
self._metadata.common_metadata = None
if metadata is None and self.metadata_path is not None:
with self.fs.open(self.metadata_path) as f:
self.metadata = read_metadata(f, memory_map=memory_map)
else:
self.metadata = metadata
self.schema = schema
self.split_row_groups = split_row_groups
if split_row_groups:
raise NotImplementedError("split_row_groups not yet implemented")
if filters is not None:
filters = _check_filters(filters)
self._filter(filters)
if validate_schema:
self.validate_schemas()
def equals(self, other):
if not isinstance(other, ParquetDataset):
raise TypeError('`other` must be an instance of ParquetDataset')
if self.fs.__class__ != other.fs.__class__:
return False
for prop in ('paths', 'memory_map', 'pieces', 'partitions',
'common_metadata_path', 'metadata_path',
'common_metadata', 'metadata', 'schema',
'buffer_size', 'split_row_groups'):
if getattr(self, prop) != getattr(other, prop):
return False
return True
def __eq__(self, other):
try:
return self.equals(other)
except TypeError:
return NotImplemented
def validate_schemas(self):
if self.metadata is None and self.schema is None:
if self.common_metadata is not None:
self.schema = self.common_metadata.schema
else:
self.schema = self.pieces[0].get_metadata().schema
elif self.schema is None:
self.schema = self.metadata.schema
# Verify schemas are all compatible
dataset_schema = self.schema.to_arrow_schema()
# Exclude the partition columns from the schema, they are provided
# by the path, not the DatasetPiece
if self.partitions is not None:
for partition_name in self.partitions.partition_names:
if dataset_schema.get_field_index(partition_name) != -1:
field_idx = dataset_schema.get_field_index(partition_name)
dataset_schema = dataset_schema.remove(field_idx)
for piece in self.pieces:
file_metadata = piece.get_metadata()
file_schema = file_metadata.schema.to_arrow_schema()
if not dataset_schema.equals(file_schema, check_metadata=False):
raise ValueError('Schema in {!s} was different. \n'
'{!s}\n\nvs\n\n{!s}'
.format(piece, file_schema,
dataset_schema))
def read(self, columns=None, use_threads=True, use_pandas_metadata=False):
"""
Read multiple Parquet files as a single pyarrow.Table.
Parameters
----------
columns : List[str]
Names of columns to read from the file.
use_threads : bool, default True
Perform multi-threaded column reads
use_pandas_metadata : bool, default False
Passed through to each dataset piece.
Returns
-------
pyarrow.Table
Content of the file as a table (of columns).
"""
tables = []
for piece in self.pieces:
table = piece.read(columns=columns, use_threads=use_threads,
partitions=self.partitions,
use_pandas_metadata=use_pandas_metadata)
tables.append(table)
all_data = lib.concat_tables(tables)
if use_pandas_metadata:
# We need to ensure that this metadata is set in the Table's schema
# so that Table.to_pandas will construct pandas.DataFrame with the
# right index
common_metadata = self._get_common_pandas_metadata()
current_metadata = all_data.schema.metadata or {}
if common_metadata and b'pandas' not in current_metadata:
all_data = all_data.replace_schema_metadata({
b'pandas': common_metadata})
return all_data
def read_pandas(self, **kwargs):
"""
Read dataset including pandas metadata, if any. Other arguments passed
through to ParquetDataset.read, see docstring for further details.
Returns
-------
pyarrow.Table
Content of the file as a table (of columns).
"""
return self.read(use_pandas_metadata=True, **kwargs)
def _get_common_pandas_metadata(self):
if self.common_metadata is None:
return None
keyvalues = self.common_metadata.metadata
return keyvalues.get(b'pandas', None)
def _filter(self, filters):
accepts_filter = self.partitions.filter_accepts_partition
def one_filter_accepts(piece, filter):
return all(accepts_filter(part_key, filter, level)
for level, part_key in enumerate(piece.partition_keys))
def all_filters_accept(piece):
return any(all(one_filter_accepts(piece, f) for f in conjunction)
for conjunction in filters)
self.pieces = [p for p in self.pieces if all_filters_accept(p)]
fs = property(operator.attrgetter('_metadata.fs'))
memory_map = property(operator.attrgetter('_metadata.memory_map'))
read_dictionary = property(
operator.attrgetter('_metadata.read_dictionary')
)
common_metadata = property(
operator.attrgetter('_metadata.common_metadata')
)
buffer_size = property(operator.attrgetter('_metadata.buffer_size'))
def _make_manifest(path_or_paths, fs, pathsep='/', metadata_nthreads=1,
open_file_func=None):
partitions = None
common_metadata_path = None
metadata_path = None
if isinstance(path_or_paths, list) and len(path_or_paths) == 1:
# Dask passes a directory as a list of length 1
path_or_paths = path_or_paths[0]
if _is_path_like(path_or_paths) and fs.isdir(path_or_paths):
manifest = ParquetManifest(path_or_paths, filesystem=fs,
open_file_func=open_file_func,
pathsep=getattr(fs, "pathsep", "/"),
metadata_nthreads=metadata_nthreads)
common_metadata_path = manifest.common_metadata_path
metadata_path = manifest.metadata_path
pieces = manifest.pieces
partitions = manifest.partitions
else:
if not isinstance(path_or_paths, list):
path_or_paths = [path_or_paths]
# List of paths
if len(path_or_paths) == 0:
raise ValueError('Must pass at least one file path')
pieces = []
for path in path_or_paths:
if not fs.isfile(path):
raise OSError('Passed non-file path: {}'
.format(path))
piece = ParquetDatasetPiece(path, open_file_func=open_file_func)
pieces.append(piece)
return pieces, partitions, common_metadata_path, metadata_path
class _ParquetDatasetV2:
"""
ParquetDataset shim using the Dataset API under the hood.
"""
def __init__(self, path_or_paths, filesystem=None, filters=None,
partitioning="hive", read_dictionary=None, buffer_size=None,
memory_map=False, ignore_prefixes=None, **kwargs):
import pyarrow.dataset as ds
# Raise error for not supported keywords
for keyword, default in [
("schema", None), ("metadata", None),
("split_row_groups", False), ("validate_schema", True),
("metadata_nthreads", 1)]:
if keyword in kwargs and kwargs[keyword] is not default:
raise ValueError(
"Keyword '{0}' is not yet supported with the new "
"Dataset API".format(keyword))
# map format arguments
read_options = {}
if buffer_size:
read_options.update(use_buffered_stream=True,
buffer_size=buffer_size)
if read_dictionary is not None:
read_options.update(dictionary_columns=read_dictionary)
# map filters to Expressions
self._filters = filters
self._filter_expression = filters and _filters_to_expression(filters)
# map old filesystems to new one
if filesystem is not None:
filesystem = _ensure_filesystem(
filesystem, use_mmap=memory_map)
elif filesystem is None and memory_map:
# if memory_map is specified, assume local file system (string
# path can in principle be URI for any filesystem)
filesystem = LocalFileSystem(use_mmap=memory_map)
# check for single fragment dataset
single_file = None
if isinstance(path_or_paths, list):
if len(path_or_paths) == 1:
single_file = path_or_paths[0]
else:
if _is_path_like(path_or_paths):
path = str(path_or_paths)
if filesystem is None:
# path might be a URI describing the FileSystem as well
try:
filesystem, path = FileSystem.from_uri(path)
except ValueError:
filesystem = LocalFileSystem(use_mmap=memory_map)
if filesystem.get_file_info(path).is_file:
single_file = path
else:
single_file = path_or_paths
if single_file is not None:
self._enable_parallel_column_conversion = True
read_options.update(enable_parallel_column_conversion=True)
parquet_format = ds.ParquetFileFormat(read_options=read_options)
fragment = parquet_format.make_fragment(single_file, filesystem)
self._dataset = ds.FileSystemDataset(
[fragment], schema=fragment.physical_schema,
format=parquet_format,
filesystem=fragment.filesystem
)
return
else:
self._enable_parallel_column_conversion = False
parquet_format = ds.ParquetFileFormat(read_options=read_options)
# check partitioning to enable dictionary encoding
if partitioning == "hive":
partitioning = ds.HivePartitioning.discover(
infer_dictionary=True)
self._dataset = ds.dataset(path_or_paths, filesystem=filesystem,
format=parquet_format,
partitioning=partitioning,
ignore_prefixes=ignore_prefixes)
@property
def schema(self):
return self._dataset.schema
def read(self, columns=None, use_threads=True, use_pandas_metadata=False):
"""
Read (multiple) Parquet files as a single pyarrow.Table.
Parameters
----------
columns : List[str]
Names of columns to read from the dataset. The partition fields
are not automatically included (in contrast to when setting
``use_legacy_dataset=True``).
use_threads : bool, default True
Perform multi-threaded column reads.
use_pandas_metadata : bool, default False
If True and file has custom pandas schema metadata, ensure that
index columns are also loaded.
Returns
-------
pyarrow.Table
Content of the file as a table (of columns).
"""
# if use_pandas_metadata, we need to include index columns in the
# column selection, to be able to restore those in the pandas DataFrame
metadata = self.schema.metadata
if columns is not None and use_pandas_metadata:
if metadata and b'pandas' in metadata:
# RangeIndex can be represented as dict instead of column name
index_columns = [
col for col in _get_pandas_index_columns(metadata)
if not isinstance(col, dict)
]
columns = columns + list(set(index_columns) - set(columns))
if self._enable_parallel_column_conversion:
if use_threads:
# Allow per-column parallelism; would otherwise cause
# contention in the presence of per-file parallelism.
use_threads = False
table = self._dataset.to_table(
columns=columns, filter=self._filter_expression,
use_threads=use_threads
)
# if use_pandas_metadata, restore the pandas metadata (which gets
# lost if doing a specific `columns` selection in to_table)
if use_pandas_metadata:
if metadata and b"pandas" in metadata:
new_metadata = table.schema.metadata or {}
new_metadata.update({b"pandas": metadata[b"pandas"]})
table = table.replace_schema_metadata(new_metadata)
return table
def read_pandas(self, **kwargs):
"""
Read dataset including pandas metadata, if any. Other arguments passed
through to ParquetDataset.read, see docstring for further details.
"""
return self.read(use_pandas_metadata=True, **kwargs)
@property
def pieces(self):
# TODO raise deprecation warning
return list(self._dataset.get_fragments())
_read_table_docstring = """
{0}
Parameters
----------
source: str, pyarrow.NativeFile, or file-like object
If a string passed, can be a single file name or directory name. For
file-like objects, only read a single file. Use pyarrow.BufferReader to
read a file contained in a bytes or buffer-like object.
columns: list
If not None, only these columns will be read from the file. A column
name may be a prefix of a nested field, e.g. 'a' will select 'a.b',
'a.c', and 'a.d.e'.
use_threads : bool, default True
Perform multi-threaded column reads.
metadata : FileMetaData
If separately computed
{1}
use_legacy_dataset : bool, default False
By default, `read_table` uses the new Arrow Datasets API since
pyarrow 1.0.0. Among other things, this allows to pass `filters`
for all columns and not only the partition keys, enables
different partitioning schemes, etc.
Set to True to use the legacy behaviour.
ignore_prefixes : list, optional
Files matching any of these prefixes will be ignored by the
discovery process if use_legacy_dataset=False.
This is matched to the basename of a path.
By default this is ['.', '_'].
Note that discovery happens only if a directory is passed as source.
filesystem : FileSystem, default None
If nothing passed, paths assumed to be found in the local on-disk
filesystem.
filters : List[Tuple] or List[List[Tuple]] or None (default)
Rows which do not match the filter predicate will be removed from scanned
data. Partition keys embedded in a nested directory structure will be
exploited to avoid loading files at all if they contain no matching rows.
If `use_legacy_dataset` is True, filters can only reference partition
keys and only a hive-style directory structure is supported. When
setting `use_legacy_dataset` to False, also within-file level filtering
and different partitioning schemes are supported.
{3}
Returns
-------
{2}
"""
def read_table(source, columns=None, use_threads=True, metadata=None,
use_pandas_metadata=False, memory_map=False,
read_dictionary=None, filesystem=None, filters=None,
buffer_size=0, partitioning="hive", use_legacy_dataset=False,
ignore_prefixes=None):
if not use_legacy_dataset:
if metadata is not None:
raise ValueError(
"The 'metadata' keyword is no longer supported with the new "
"datasets-based implementation. Specify "
"'use_legacy_dataset=True' to temporarily recover the old "
"behaviour."
)
try:
dataset = _ParquetDatasetV2(
source,
filesystem=filesystem,
partitioning=partitioning,
memory_map=memory_map,
read_dictionary=read_dictionary,
buffer_size=buffer_size,
filters=filters,
ignore_prefixes=ignore_prefixes,
)
except ImportError:
# fall back on ParquetFile for simple cases when pyarrow.dataset
# module is not available
if filters is not None:
raise ValueError(
"the 'filters' keyword is not supported when the "
"pyarrow.dataset module is not available"
)
if partitioning != "hive":
raise ValueError(
"the 'partitioning' keyword is not supported when the "
"pyarrow.dataset module is not available"
)
filesystem, path = _resolve_filesystem_and_path(source, filesystem)
if filesystem is not None:
source = filesystem.open_input_file(path)
# TODO test that source is not a directory or a list
dataset = ParquetFile(
source, metadata=metadata, read_dictionary=read_dictionary,
memory_map=memory_map, buffer_size=buffer_size)
return dataset.read(columns=columns, use_threads=use_threads,
use_pandas_metadata=use_pandas_metadata)
if ignore_prefixes is not None:
raise ValueError(
"The 'ignore_prefixes' keyword is only supported when "
"use_legacy_dataset=False")
if _is_path_like(source):
pf = ParquetDataset(source, metadata=metadata, memory_map=memory_map,
read_dictionary=read_dictionary,
buffer_size=buffer_size,
filesystem=filesystem, filters=filters,
partitioning=partitioning)
else:
pf = ParquetFile(source, metadata=metadata,
read_dictionary=read_dictionary,
memory_map=memory_map,
buffer_size=buffer_size)
return pf.read(columns=columns, use_threads=use_threads,
use_pandas_metadata=use_pandas_metadata)
read_table.__doc__ = _read_table_docstring.format(
"""Read a Table from Parquet format
Note: starting with pyarrow 1.0, the default for `use_legacy_dataset` is
switched to False.""",
"\n".join((_read_docstring_common,
"""use_pandas_metadata : bool, default False
If True and file has custom pandas schema metadata, ensure that
index columns are also loaded""")),
"""pyarrow.Table
Content of the file as a table (of columns)""",
_DNF_filter_doc)
def read_pandas(source, columns=None, use_threads=True, memory_map=False,
metadata=None, filters=None, buffer_size=0,
use_legacy_dataset=True, ignore_prefixes=None):
return read_table(
source,
columns=columns,
use_threads=use_threads,
metadata=metadata,
filters=filters,
memory_map=memory_map,
buffer_size=buffer_size,
use_pandas_metadata=True,
use_legacy_dataset=use_legacy_dataset,
ignore_prefixes=ignore_prefixes
)
read_pandas.__doc__ = _read_table_docstring.format(
'Read a Table from Parquet format, also reading DataFrame\n'
'index values if known in the file metadata',
_read_docstring_common,
"""pyarrow.Table
Content of the file as a Table of Columns, including DataFrame
indexes as columns""",
_DNF_filter_doc)
def write_table(table, where, row_group_size=None, version='1.0',
use_dictionary=True, compression='snappy',
write_statistics=True,
use_deprecated_int96_timestamps=None,
coerce_timestamps=None,
allow_truncated_timestamps=False,
data_page_size=None, flavor=None,
filesystem=None,
compression_level=None,
use_byte_stream_split=False,
data_page_version='1.0',
**kwargs):
row_group_size = kwargs.pop('chunk_size', row_group_size)
use_int96 = use_deprecated_int96_timestamps
try:
with ParquetWriter(
where, table.schema,
filesystem=filesystem,
version=version,
flavor=flavor,
use_dictionary=use_dictionary,
write_statistics=write_statistics,
coerce_timestamps=coerce_timestamps,
data_page_size=data_page_size,
allow_truncated_timestamps=allow_truncated_timestamps,
compression=compression,
use_deprecated_int96_timestamps=use_int96,
compression_level=compression_level,
use_byte_stream_split=use_byte_stream_split,
data_page_version=data_page_version,
**kwargs) as writer:
writer.write_table(table, row_group_size=row_group_size)
except Exception:
if _is_path_like(where):
try:
os.remove(_stringify_path(where))
except os.error:
pass
raise
write_table.__doc__ = """
Write a Table to Parquet format.
Parameters
----------
table : pyarrow.Table
where: string or pyarrow.NativeFile
row_group_size: int
The number of rows per rowgroup
{}
""".format(_parquet_writer_arg_docs)
def _mkdir_if_not_exists(fs, path):
if fs._isfilestore() and not fs.exists(path):
try:
fs.mkdir(path)
except OSError:
assert fs.exists(path)
def write_to_dataset(table, root_path, partition_cols=None,
partition_filename_cb=None, filesystem=None,
use_legacy_dataset=True, **kwargs):
"""Wrapper around parquet.write_table for writing a Table to
Parquet format by partitions.
For each combination of partition columns and values,
a subdirectories are created in the following
manner:
root_dir/
group1=value1
group2=value1
<uuid>.parquet
group2=value2
<uuid>.parquet
group1=valueN
group2=value1
<uuid>.parquet
group2=valueN
<uuid>.parquet
Parameters
----------
table : pyarrow.Table
root_path : str, pathlib.Path
The root directory of the dataset
filesystem : FileSystem, default None
If nothing passed, paths assumed to be found in the local on-disk
filesystem
partition_cols : list,
Column names by which to partition the dataset
Columns are partitioned in the order they are given
partition_filename_cb : callable,
A callback function that takes the partition key(s) as an argument
and allow you to override the partition filename. If nothing is
passed, the filename will consist of a uuid.
use_legacy_dataset : bool, default True
Set to False to enable the new code path (experimental, using the
new Arrow Dataset API). This is more efficient when using partition
columns, but does not (yet) support `partition_filename_cb` and
`metadata_collector` keywords.
**kwargs : dict,
Additional kwargs for write_table function. See docstring for
`write_table` or `ParquetWriter` for more information.
Using `metadata_collector` in kwargs allows one to collect the
file metadata instances of dataset pieces. The file paths in the
ColumnChunkMetaData will be set relative to `root_path`.
"""
if not use_legacy_dataset:
import pyarrow.dataset as ds
# extract non-file format options
schema = kwargs.pop("schema", None)
use_threads = kwargs.pop("use_threads", True)
# raise for unsupported keywords
msg = (
"The '{}' argument is not supported with the new dataset "
"implementation."
)
metadata_collector = kwargs.pop('metadata_collector', None)
if metadata_collector is not None:
raise ValueError(msg.format("metadata_collector"))
if partition_filename_cb is not None:
raise ValueError(msg.format("partition_filename_cb"))
# map format arguments
parquet_format = ds.ParquetFileFormat()
write_options = parquet_format.make_write_options(**kwargs)
# map old filesystems to new one
if filesystem is not None:
filesystem = _ensure_filesystem(filesystem)
partitioning = None
if partition_cols:
part_schema = table.select(partition_cols).schema
partitioning = ds.partitioning(part_schema, flavor="hive")
ds.write_dataset(
table, root_path, filesystem=filesystem,
format=parquet_format, file_options=write_options, schema=schema,
partitioning=partitioning, use_threads=use_threads)
return
fs, root_path = legacyfs.resolve_filesystem_and_path(root_path, filesystem)
_mkdir_if_not_exists(fs, root_path)
metadata_collector = kwargs.pop('metadata_collector', None)
if partition_cols is not None and len(partition_cols) > 0:
df = table.to_pandas()
partition_keys = [df[col] for col in partition_cols]
data_df = df.drop(partition_cols, axis='columns')
data_cols = df.columns.drop(partition_cols)
if len(data_cols) == 0:
raise ValueError('No data left to save outside partition columns')
subschema = table.schema
# ARROW-2891: Ensure the output_schema is preserved when writing a
# partitioned dataset
for col in table.schema.names:
if col in partition_cols:
subschema = subschema.remove(subschema.get_field_index(col))
for keys, subgroup in data_df.groupby(partition_keys):
if not isinstance(keys, tuple):
keys = (keys,)
subdir = '/'.join(
['{colname}={value}'.format(colname=name, value=val)
for name, val in zip(partition_cols, keys)])
subtable = pa.Table.from_pandas(subgroup, schema=subschema,
safe=False)
_mkdir_if_not_exists(fs, '/'.join([root_path, subdir]))
if partition_filename_cb:
outfile = partition_filename_cb(keys)
else:
outfile = guid() + '.parquet'
relative_path = '/'.join([subdir, outfile])
full_path = '/'.join([root_path, relative_path])
with fs.open(full_path, 'wb') as f:
write_table(subtable, f, metadata_collector=metadata_collector,
**kwargs)
if metadata_collector is not None:
metadata_collector[-1].set_file_path(relative_path)
else:
if partition_filename_cb:
outfile = partition_filename_cb(None)
else:
outfile = guid() + '.parquet'
full_path = '/'.join([root_path, outfile])
with fs.open(full_path, 'wb') as f:
write_table(table, f, metadata_collector=metadata_collector,
**kwargs)
if metadata_collector is not None:
metadata_collector[-1].set_file_path(outfile)
def write_metadata(schema, where, metadata_collector=None, **kwargs):
"""
Write metadata-only Parquet file from schema. This can be used with
`write_to_dataset` to generate `_common_metadata` and `_metadata` sidecar
files.
Parameters
----------
schema : pyarrow.Schema
where: string or pyarrow.NativeFile
metadata_collector:
**kwargs : dict,
Additional kwargs for ParquetWriter class. See docstring for
`ParquetWriter` for more information.
Examples
--------
Write a dataset and collect metadata information.
>>> metadata_collector = []
>>> write_to_dataset(
... table, root_path,
... metadata_collector=metadata_collector, **writer_kwargs)
Write the `_common_metadata` parquet file without row groups statistics.
>>> write_metadata(
... table.schema, root_path / '_common_metadata', **writer_kwargs)
Write the `_metadata` parquet file with row groups statistics.
>>> write_metadata(
... table.schema, root_path / '_metadata',
... metadata_collector=metadata_collector, **writer_kwargs)
"""
writer = ParquetWriter(where, schema, **kwargs)
writer.close()
if metadata_collector is not None:
# ParquetWriter doesn't expose the metadata until it's written. Write
# it and read it again.
metadata = read_metadata(where)
for m in metadata_collector:
metadata.append_row_groups(m)
metadata.write_metadata_file(where)
def read_metadata(where, memory_map=False):
"""
Read FileMetadata from footer of a single Parquet file.
Parameters
----------
where : str (filepath) or file-like object
memory_map : bool, default False
Create memory map when the source is a file path.
Returns
-------
metadata : FileMetadata
"""
return ParquetFile(where, memory_map=memory_map).metadata
def read_schema(where, memory_map=False):
"""
Read effective Arrow schema from Parquet file metadata.
Parameters
----------
where : str (filepath) or file-like object
memory_map : bool, default False
Create memory map when the source is a file path.
Returns
-------
schema : pyarrow.Schema
"""
return ParquetFile(where, memory_map=memory_map).schema.to_arrow_schema()
|
xhochy/arrow
|
python/pyarrow/parquet.py
|
Python
|
apache-2.0
| 74,846
|
[
"VisIt"
] |
29f4abe0b9fb56b3c26f866b1e07ce7782565d189797b64a018f57bd9fd3c102
|
#!/usr/bin/env python
# Copyright 2014-2020 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Author: Qiming Sun <[email protected]>
#
'''
Dirac Hartree-Fock
'''
import time
from functools import reduce
import numpy
import scipy.linalg
from pyscf import lib
from pyscf import gto
from pyscf.lib import logger
from pyscf.scf import hf
from pyscf.scf import _vhf
from pyscf.scf import chkfile
from pyscf.data import nist
from pyscf import __config__
def kernel(mf, conv_tol=1e-9, conv_tol_grad=None,
dump_chk=True, dm0=None, callback=None, conv_check=True):
'''the modified SCF kernel for Dirac-Hartree-Fock. In this kernel, the
SCF is carried out in three steps. First the 2-electron part is
approximated by large component integrals (LL|LL); Next, (SS|LL) the
interaction between large and small components are added; Finally,
converge the SCF with the small component contributions (SS|SS)
'''
if conv_tol_grad is None:
conv_tol_grad = numpy.sqrt(conv_tol)
logger.info(mf, 'Set gradient conv threshold to %g', conv_tol_grad)
if dm0 is None:
dm = mf.get_init_guess(mf.mol, mf.init_guess)
else:
dm = dm0
mf._coulomb_now = 'LLLL'
if dm0 is None and mf._coulomb_now.upper() == 'LLLL':
scf_conv, e_tot, mo_energy, mo_coeff, mo_occ \
= hf.kernel(mf, 1e-2, 1e-1,
dump_chk, dm0=dm, callback=callback,
conv_check=False)
dm = mf.make_rdm1(mo_coeff, mo_occ)
mf._coulomb_now = 'SSLL'
if dm0 is None and (mf._coulomb_now.upper() == 'SSLL' or
mf._coulomb_now.upper() == 'LLSS'):
scf_conv, e_tot, mo_energy, mo_coeff, mo_occ \
= hf.kernel(mf, 1e-3, 1e-1,
dump_chk, dm0=dm, callback=callback,
conv_check=False)
dm = mf.make_rdm1(mo_coeff, mo_occ)
mf._coulomb_now = 'SSSS'
if mf.with_ssss:
mf._coulomb_now = 'SSSS'
else:
mf._coulomb_now = 'SSLL'
return hf.kernel(mf, conv_tol, conv_tol_grad, dump_chk, dm0=dm,
callback=callback, conv_check=conv_check)
def get_jk_coulomb(mol, dm, hermi=1, coulomb_allow='SSSS',
opt_llll=None, opt_ssll=None, opt_ssss=None, omega=None, verbose=None):
log = logger.new_logger(mol, verbose)
with mol.with_range_coulomb(omega):
if coulomb_allow.upper() == 'LLLL':
log.debug('Coulomb integral: (LL|LL)')
j1, k1 = _call_veff_llll(mol, dm, hermi, opt_llll)
n2c = j1.shape[1]
vj = numpy.zeros_like(dm)
vk = numpy.zeros_like(dm)
vj[...,:n2c,:n2c] = j1
vk[...,:n2c,:n2c] = k1
elif coulomb_allow.upper() == 'SSLL' \
or coulomb_allow.upper() == 'LLSS':
log.debug('Coulomb integral: (LL|LL) + (SS|LL)')
vj, vk = _call_veff_ssll(mol, dm, hermi, opt_ssll)
j1, k1 = _call_veff_llll(mol, dm, hermi, opt_llll)
n2c = j1.shape[1]
vj[...,:n2c,:n2c] += j1
vk[...,:n2c,:n2c] += k1
else: # coulomb_allow == 'SSSS'
log.debug('Coulomb integral: (LL|LL) + (SS|LL) + (SS|SS)')
vj, vk = _call_veff_ssll(mol, dm, hermi, opt_ssll)
j1, k1 = _call_veff_llll(mol, dm, hermi, opt_llll)
n2c = j1.shape[1]
vj[...,:n2c,:n2c] += j1
vk[...,:n2c,:n2c] += k1
j1, k1 = _call_veff_ssss(mol, dm, hermi, opt_ssss)
vj[...,n2c:,n2c:] += j1
vk[...,n2c:,n2c:] += k1
return vj, vk
get_jk = get_jk_coulomb
def get_hcore(mol):
n2c = mol.nao_2c()
n4c = n2c * 2
c = lib.param.LIGHT_SPEED
t = mol.intor_symmetric('int1e_spsp_spinor') * .5
vn = mol.intor_symmetric('int1e_nuc_spinor')
wn = mol.intor_symmetric('int1e_spnucsp_spinor')
h1e = numpy.empty((n4c, n4c), numpy.complex)
h1e[:n2c,:n2c] = vn
h1e[n2c:,:n2c] = t
h1e[:n2c,n2c:] = t
h1e[n2c:,n2c:] = wn * (.25/c**2) - t
return h1e
def get_ovlp(mol):
n2c = mol.nao_2c()
n4c = n2c * 2
c = lib.param.LIGHT_SPEED
s = mol.intor_symmetric('int1e_ovlp_spinor')
t = mol.intor_symmetric('int1e_spsp_spinor')
s1e = numpy.zeros((n4c, n4c), numpy.complex)
s1e[:n2c,:n2c] = s
s1e[n2c:,n2c:] = t * (.5/c)**2
return s1e
make_rdm1 = hf.make_rdm1
def init_guess_by_minao(mol):
'''Initial guess in terms of the overlap to minimal basis.'''
dm = hf.init_guess_by_minao(mol)
return _proj_dmll(mol, dm, mol)
def init_guess_by_1e(mol):
'''Initial guess from one electron system.'''
return UHF(mol).init_guess_by_1e(mol)
def init_guess_by_atom(mol):
'''Initial guess from atom calculation.'''
dm = hf.init_guess_by_atom(mol)
return _proj_dmll(mol, dm, mol)
def init_guess_by_huckel(mol):
'''Initial guess from on-the-fly Huckel, doi:10.1021/acs.jctc.8b01089.'''
dm = hf.init_guess_by_huckel(mol)
return _proj_dmll(mol, dm, mol)
def init_guess_by_chkfile(mol, chkfile_name, project=None):
'''Read SCF chkfile and make the density matrix for 4C-DHF initial guess.
Kwargs:
project : None or bool
Whether to project chkfile's orbitals to the new basis. Note when
the geometry of the chkfile and the given molecule are very
different, this projection can produce very poor initial guess.
In PES scanning, it is recommended to swith off project.
If project is set to None, the projection is only applied when the
basis sets of the chkfile's molecule are different to the basis
sets of the given molecule (regardless whether the geometry of
the two molecules are different). Note the basis sets are
considered to be different if the two molecules are derived from
the same molecule with different ordering of atoms.
'''
from pyscf.scf import addons
chk_mol, scf_rec = chkfile.load_scf(chkfile_name)
if project is None:
project = not gto.same_basis_set(chk_mol, mol)
# Check whether the two molecules are similar
if abs(mol.inertia_moment() - chk_mol.inertia_moment()).sum() > 0.5:
logger.warn(mol, "Large deviations found between the input "
"molecule and the molecule from chkfile\n"
"Initial guess density matrix may have large error.")
if project:
s = get_ovlp(mol)
def fproj(mo):
#TODO: check if mo is GHF orbital
if project:
mo = addons.project_mo_r2r(chk_mol, mo, mol)
norm = numpy.einsum('pi,pi->i', mo.conj(), s.dot(mo))
mo /= numpy.sqrt(norm)
return mo
mo = scf_rec['mo_coeff']
mo_occ = scf_rec['mo_occ']
if numpy.iscomplexobj(mo[0]): # DHF
dm = make_rdm1(fproj(mo), mo_occ)
else:
if mo[0].ndim == 1: # nr-RHF
dm = reduce(numpy.dot, (mo*mo_occ, mo.T))
else: # nr-UHF
dm = reduce(numpy.dot, (mo[0]*mo_occ[0], mo[0].T)) \
+ reduce(numpy.dot, (mo[1]*mo_occ[1], mo[1].T))
dm = _proj_dmll(chk_mol, dm, mol)
return dm
def get_init_guess(mol, key='minao'):
'''Generate density matrix for initial guess
Kwargs:
key : str
One of 'minao', 'atom', 'huckel', 'hcore', '1e', 'chkfile'.
'''
return UHF(mol).get_init_guess(mol, key)
def time_reversal_matrix(mol, mat):
''' T(A_ij) = A[T(i),T(j)]^*
'''
n2c = mol.nao_2c()
tao = numpy.asarray(mol.time_reversal_map())
# tao(i) = -j means T(f_i) = -f_j
# tao(i) = j means T(f_i) = f_j
idx = abs(tao)-1 # -1 for C indexing convention
#:signL = [(1 if x>0 else -1) for x in tao]
#:sign = numpy.hstack((signL, signL))
#:tmat = numpy.empty_like(mat)
#:for j in range(mat.__len__()):
#: for i in range(mat.__len__()):
#: tmat[idx[i],idx[j]] = mat[i,j] * sign[i]*sign[j]
#:return tmat.conjugate()
sign_mask = tao<0
if mat.shape[0] == n2c*2:
idx = numpy.hstack((idx, idx+n2c))
sign_mask = numpy.hstack((sign_mask, sign_mask))
tmat = mat.take(idx,axis=0).take(idx,axis=1)
tmat[sign_mask,:] *= -1
tmat[:,sign_mask] *= -1
return tmat.T
def analyze(mf, verbose=logger.DEBUG, **kwargs):
from pyscf.tools import dump_mat
log = logger.new_logger(mf, verbose)
mo_energy = mf.mo_energy
mo_occ = mf.mo_occ
mo_coeff = mf.mo_coeff
mf.dump_scf_summary(log)
log.info('**** MO energy ****')
for i in range(len(mo_energy)):
if mo_occ[i] > 0:
log.info('occupied MO #%d energy= %.15g occ= %g', \
i+1, mo_energy[i], mo_occ[i])
else:
log.info('virtual MO #%d energy= %.15g occ= %g', \
i+1, mo_energy[i], mo_occ[i])
mol = mf.mol
if mf.verbose >= logger.DEBUG1:
log.debug(' ** MO coefficients of large component of postive state (real part) **')
label = mol.spinor_labels()
n2c = mo_coeff.shape[0] // 2
dump_mat.dump_rec(mf.stdout, mo_coeff[n2c:,:n2c].real, label, start=1)
dm = mf.make_rdm1(mo_coeff, mo_occ)
pop_chg = mf.mulliken_pop(mol, dm, mf.get_ovlp(), log)
dip = mf.dip_moment(mol, dm, verbose=log)
return pop_chg, dip
def mulliken_pop(mol, dm, s=None, verbose=logger.DEBUG):
r'''Mulliken population analysis
.. math:: M_{ij} = D_{ij} S_{ji}
Mulliken charges
.. math:: \delta_i = \sum_j M_{ij}
'''
if s is None: s = get_ovlp(mol)
log = logger.new_logger(mol, verbose)
pop = numpy.einsum('ij,ji->i', dm, s).real
log.info(' ** Mulliken pop **')
for i, s in enumerate(mol.spinor_labels()):
log.info('pop of %s %10.5f', s, pop[i])
log.note(' ** Mulliken atomic charges **')
chg = numpy.zeros(mol.natm)
for i, s in enumerate(mol.spinor_labels(fmt=None)):
chg[s[0]] += pop[i]
chg = mol.atom_charges() - chg
for ia in range(mol.natm):
symb = mol.atom_symbol(ia)
log.note('charge of %d%s = %10.5f', ia, symb, chg[ia])
return pop, chg
def dip_moment(mol, dm, unit='Debye', verbose=logger.NOTE, **kwargs):
r''' Dipole moment calculation
.. math::
\mu_x = -\sum_{\mu}\sum_{\nu} P_{\mu\nu}(\nu|x|\mu) + \sum_A Q_A X_A\\
\mu_y = -\sum_{\mu}\sum_{\nu} P_{\mu\nu}(\nu|y|\mu) + \sum_A Q_A Y_A\\
\mu_z = -\sum_{\mu}\sum_{\nu} P_{\mu\nu}(\nu|z|\mu) + \sum_A Q_A Z_A
where :math:`\mu_x, \mu_y, \mu_z` are the x, y and z components of dipole
moment
Args:
mol: an instance of :class:`Mole`
dm : a 2D ndarrays density matrices
Return:
A list: the dipole moment on x, y and z component
'''
log = logger.new_logger(mol, verbose)
charges = mol.atom_charges()
coords = mol.atom_coords()
charge_center = numpy.einsum('i,ix->x', charges, coords)
with mol.with_common_orig(charge_center):
ll_dip = mol.intor_symmetric('int1e_r_spinor', comp=3)
ss_dip = mol.intor_symmetric('int1e_sprsp_spinor', comp=3)
n2c = mol.nao_2c()
c = lib.param.LIGHT_SPEED
dip = numpy.einsum('xij,ji->x', ll_dip, dm[:n2c,:n2c]).real
dip+= numpy.einsum('xij,ji->x', ss_dip, dm[n2c:,n2c:]).real * (.5/c**2)
if unit.upper() == 'DEBYE':
dip *= nist.AU2DEBYE
log.note('Dipole moment(X, Y, Z, Debye): %8.5f, %8.5f, %8.5f', *dip)
else:
log.note('Dipole moment(X, Y, Z, A.U.): %8.5f, %8.5f, %8.5f', *dip)
return dip
def get_grad(mo_coeff, mo_occ, fock_ao):
'''DHF Gradients'''
occidx = mo_occ > 0
viridx = ~occidx
g = reduce(numpy.dot, (mo_coeff[:,viridx].T.conj(), fock_ao,
mo_coeff[:,occidx]))
return g.ravel()
class UHF(hf.SCF):
__doc__ = hf.SCF.__doc__ + '''
Attributes for Dirac-Hartree-Fock
with_ssss : bool, for Dirac-Hartree-Fock only
If False, ignore small component integrals (SS|SS). Default is True.
with_gaunt : bool, for Dirac-Hartree-Fock only
Default is False.
with_breit : bool, for Dirac-Hartree-Fock only
Gaunt + gauge term. Default is False.
Examples:
>>> mol = gto.M(atom='H 0 0 0; H 0 0 1', basis='ccpvdz', verbose=0)
>>> mf = scf.RHF(mol)
>>> e0 = mf.scf()
>>> mf = scf.DHF(mol)
>>> e1 = mf.scf()
>>> print('Relativistic effects = %.12f' % (e1-e0))
Relativistic effects = -0.000008854205
'''
conv_tol = getattr(__config__, 'scf_dhf_SCF_conv_tol', 1e-8)
with_ssss = getattr(__config__, 'scf_dhf_SCF_with_ssss', True)
with_gaunt = getattr(__config__, 'scf_dhf_SCF_with_gaunt', False)
with_breit = getattr(__config__, 'scf_dhf_SCF_with_breit', False)
def __init__(self, mol):
hf.SCF.__init__(self, mol)
self._coulomb_now = 'SSSS' # 'SSSS' ~ LLLL+LLSS+SSSS
self.opt = None # (opt_llll, opt_ssll, opt_ssss, opt_gaunt)
self._keys.update(('conv_tol', 'with_ssss', 'with_gaunt',
'with_breit', 'opt'))
def dump_flags(self, verbose=None):
hf.SCF.dump_flags(self, verbose)
log = logger.new_logger(self, verbose)
log.info('with_ssss %s, with_gaunt %s, with_breit %s',
self.with_ssss, self.with_gaunt, self.with_breit)
log.info('light speed = %s', lib.param.LIGHT_SPEED)
return self
@lib.with_doc(get_hcore.__doc__)
def get_hcore(self, mol=None):
if mol is None:
mol = self.mol
return get_hcore(mol)
@lib.with_doc(get_ovlp.__doc__)
def get_ovlp(self, mol=None):
if mol is None:
mol = self.mol
return get_ovlp(mol)
def get_grad(self, mo_coeff, mo_occ, fock=None):
if fock is None:
dm1 = self.make_rdm1(mo_coeff, mo_occ)
fock = self.get_hcore(self.mol) + self.get_veff(self.mol, dm1)
return get_grad(mo_coeff, mo_occ, fock)
def init_guess_by_minao(self, mol=None):
'''Initial guess in terms of the overlap to minimal basis.'''
if mol is None: mol = self.mol
return init_guess_by_minao(mol)
def init_guess_by_atom(self, mol=None):
if mol is None: mol = self.mol
return init_guess_by_atom(mol)
@lib.with_doc(hf.SCF.init_guess_by_huckel.__doc__)
def init_guess_by_huckel(self, mol=None):
if mol is None: mol = self.mol
logger.info(self, 'Initial guess from on-the-fly Huckel, doi:10.1021/acs.jctc.8b01089.')
return init_guess_by_huckel(mol)
def init_guess_by_chkfile(self, chkfile=None, project=None):
if chkfile is None: chkfile = self.chkfile
return init_guess_by_chkfile(self.mol, chkfile, project=project)
def build(self, mol=None):
if self.verbose >= logger.WARN:
self.check_sanity()
self.opt = None
def get_occ(self, mo_energy=None, mo_coeff=None):
if mo_energy is None: mo_energy = self.mo_energy
mol = self.mol
c = lib.param.LIGHT_SPEED
n4c = len(mo_energy)
n2c = n4c // 2
mo_occ = numpy.zeros(n2c * 2)
if mo_energy[n2c] > -1.999 * c**2:
mo_occ[n2c:n2c+mol.nelectron] = 1
else:
lumo = mo_energy[mo_energy > -1.999 * c**2][mol.nelectron]
mo_occ[mo_energy > -1.999 * c**2] = 1
mo_occ[mo_energy >= lumo] = 0
if self.verbose >= logger.INFO:
logger.info(self, 'HOMO %d = %.12g LUMO %d = %.12g',
n2c+mol.nelectron, mo_energy[n2c+mol.nelectron-1],
n2c+mol.nelectron+1, mo_energy[n2c+mol.nelectron])
logger.debug1(self, 'NES mo_energy = %s', mo_energy[:n2c])
logger.debug(self, 'PES mo_energy = %s', mo_energy[n2c:])
return mo_occ
# full density matrix for UHF
def make_rdm1(self, mo_coeff=None, mo_occ=None, **kwargs):
if mo_coeff is None: mo_coeff = self.mo_coeff
if mo_occ is None: mo_occ = self.mo_occ
return make_rdm1(mo_coeff, mo_occ, **kwargs)
def init_direct_scf(self, mol=None):
if mol is None: mol = self.mol
def set_vkscreen(opt, name):
opt._this.contents.r_vkscreen = _vhf._fpointer(name)
opt_llll = _vhf.VHFOpt(mol, 'int2e_spinor', 'CVHFrkbllll_prescreen',
'CVHFrkbllll_direct_scf',
'CVHFrkbllll_direct_scf_dm')
opt_llll.direct_scf_tol = self.direct_scf_tol
set_vkscreen(opt_llll, 'CVHFrkbllll_vkscreen')
opt_ssss = _vhf.VHFOpt(mol, 'int2e_spsp1spsp2_spinor',
'CVHFrkbllll_prescreen',
'CVHFrkbssss_direct_scf',
'CVHFrkbssss_direct_scf_dm')
opt_ssss.direct_scf_tol = self.direct_scf_tol
set_vkscreen(opt_ssss, 'CVHFrkbllll_vkscreen')
opt_ssll = _vhf.VHFOpt(mol, 'int2e_spsp1_spinor',
'CVHFrkbssll_prescreen',
'CVHFrkbssll_direct_scf',
'CVHFrkbssll_direct_scf_dm')
opt_ssll.direct_scf_tol = self.direct_scf_tol
set_vkscreen(opt_ssll, 'CVHFrkbssll_vkscreen')
#TODO: prescreen for gaunt
opt_gaunt = None
return opt_llll, opt_ssll, opt_ssss, opt_gaunt
def get_jk(self, mol=None, dm=None, hermi=1, with_j=True, with_k=True,
omega=None):
if mol is None: mol = self.mol
if dm is None: dm = self.make_rdm1()
t0 = (time.clock(), time.time())
log = logger.new_logger(self)
if self.direct_scf and self.opt is None:
self.opt = self.init_direct_scf(mol)
opt_llll, opt_ssll, opt_ssss, opt_gaunt = self.opt
vj, vk = get_jk_coulomb(mol, dm, hermi, self._coulomb_now,
opt_llll, opt_ssll, opt_ssss, omega, log)
if self.with_breit:
if 'SSSS' in self._coulomb_now.upper():
vj1, vk1 = _call_veff_gaunt_breit(mol, dm, hermi, opt_gaunt, True)
log.debug('Add Breit term')
vj += vj1
vk += vk1
elif self.with_gaunt and 'SS' in self._coulomb_now.upper():
log.debug('Add Gaunt term')
vj1, vk1 = _call_veff_gaunt_breit(mol, dm, hermi, opt_gaunt, False)
vj += vj1
vk += vk1
log.timer('vj and vk', *t0)
return vj, vk
def get_veff(self, mol=None, dm=None, dm_last=0, vhf_last=0, hermi=1):
'''Dirac-Coulomb'''
if mol is None: mol = self.mol
if dm is None: dm = self.make_rdm1()
if self.direct_scf:
ddm = numpy.array(dm, copy=False) - numpy.array(dm_last, copy=False)
vj, vk = self.get_jk(mol, ddm, hermi=hermi)
return numpy.array(vhf_last, copy=False) + vj - vk
else:
vj, vk = self.get_jk(mol, dm, hermi=hermi)
return vj - vk
def scf(self, dm0=None):
cput0 = (time.clock(), time.time())
self.build()
self.dump_flags()
self.converged, self.e_tot, \
self.mo_energy, self.mo_coeff, self.mo_occ \
= kernel(self, self.conv_tol, self.conv_tol_grad,
dm0=dm0, callback=self.callback,
conv_check=self.conv_check)
logger.timer(self, 'SCF', *cput0)
self._finalize()
return self.e_tot
def analyze(self, verbose=None):
if verbose is None: verbose = self.verbose
return analyze(self, verbose)
@lib.with_doc(mulliken_pop.__doc__)
def mulliken_pop(self, mol=None, dm=None, s=None, verbose=logger.DEBUG):
if mol is None: mol = self.mol
if dm is None: dm = self.make_rdm1()
if s is None: s = self.get_ovlp(mol)
return mulliken_pop(mol, dm, s=s, verbose=verbose)
@lib.with_doc(dip_moment.__doc__)
def dip_moment(self, mol=None, dm=None, unit='Debye', verbose=logger.NOTE,
**kwargs):
if mol is None: mol = self.mol
if dm is None: dm =self.make_rdm1()
return dip_moment(mol, dm, unit, verbose=verbose, **kwargs)
def sfx2c1e(self):
raise NotImplementedError
def x2c1e(self):
from pyscf.x2c import x2c
x2chf = x2c.UHF(self.mol)
x2c_keys = x2chf._keys
x2chf.__dict__.update(self.__dict__)
x2chf._keys = self._keys.union(x2c_keys)
return x2chf
x2c = x2c1e
def nuc_grad_method(self):
from pyscf.grad import dhf
return dhf.Gradients(self)
def reset(self, mol=None):
'''Reset mol and clean up relevant attributes for scanner mode'''
if mol is not None:
self.mol = mol
self._coulomb_now = 'SSSS' # 'SSSS' ~ LLLL+LLSS+SSSS
self.opt = None # (opt_llll, opt_ssll, opt_ssss, opt_gaunt)
return self
DHF = UHF
class HF1e(UHF):
def scf(self, *args):
logger.info(self, '\n')
logger.info(self, '******** 1 electron system ********')
self.converged = True
h1e = self.get_hcore(self.mol)
s1e = self.get_ovlp(self.mol)
self.mo_energy, self.mo_coeff = self.eig(h1e, s1e)
self.mo_occ = self.get_occ(self.mo_energy, self.mo_coeff)
self.e_tot = (self.mo_energy[self.mo_occ>0][0] +
self.mol.energy_nuc()).real
self._finalize()
return self.e_tot
class RHF(UHF):
'''Dirac-RHF'''
def __init__(self, mol):
if mol.nelectron.__mod__(2) != 0:
raise ValueError('Invalid electron number %i.' % mol.nelectron)
UHF.__init__(self, mol)
# full density matrix for RHF
def make_rdm1(self, mo_coeff=None, mo_occ=None, **kwargs):
r'''D/2 = \psi_i^\dag\psi_i = \psi_{Ti}^\dag\psi_{Ti}
D(UHF) = \psi_i^\dag\psi_i + \psi_{Ti}^\dag\psi_{Ti}
RHF average the density of spin up and spin down:
D(RHF) = (D(UHF) + T[D(UHF)])/2
'''
if mo_coeff is None: mo_coeff = self.mo_coeff
if mo_occ is None: mo_occ = self.mo_occ
dm = make_rdm1(mo_coeff, mo_occ, **kwargs)
return (dm + time_reversal_matrix(self.mol, dm)) * .5
def _jk_triu_(vj, vk, hermi):
if hermi == 0:
if vj.ndim == 2:
vj = lib.hermi_triu(vj, 1)
else:
for i in range(vj.shape[0]):
vj[i] = lib.hermi_triu(vj[i], 1)
else:
if vj.ndim == 2:
vj = lib.hermi_triu(vj, hermi)
vk = lib.hermi_triu(vk, hermi)
else:
for i in range(vj.shape[0]):
vj[i] = lib.hermi_triu(vj[i], hermi)
vk[i] = lib.hermi_triu(vk[i], hermi)
return vj, vk
def _call_veff_llll(mol, dm, hermi=1, mf_opt=None):
if isinstance(dm, numpy.ndarray) and dm.ndim == 2:
n2c = dm.shape[0] // 2
dms = dm[:n2c,:n2c].copy()
else:
n2c = dm[0].shape[0] // 2
dms = []
for dmi in dm:
dms.append(dmi[:n2c,:n2c].copy())
vj, vk = _vhf.rdirect_mapdm('int2e_spinor', 's8',
('ji->s2kl', 'jk->s1il'), dms, 1,
mol._atm, mol._bas, mol._env, mf_opt)
return _jk_triu_(vj, vk, hermi)
def _call_veff_ssll(mol, dm, hermi=1, mf_opt=None):
if isinstance(dm, numpy.ndarray) and dm.ndim == 2:
n_dm = 1
n2c = dm.shape[0] // 2
dmll = dm[:n2c,:n2c].copy()
dmsl = dm[n2c:,:n2c].copy()
dmss = dm[n2c:,n2c:].copy()
dms = (dmll, dmss, dmsl)
else:
n_dm = len(dm)
n2c = dm[0].shape[0] // 2
dms = [dmi[:n2c,:n2c].copy() for dmi in dm] \
+ [dmi[n2c:,n2c:].copy() for dmi in dm] \
+ [dmi[n2c:,:n2c].copy() for dmi in dm]
jks = ('lk->s2ij',) * n_dm \
+ ('ji->s2kl',) * n_dm \
+ ('jk->s1il',) * n_dm
c1 = .5 / lib.param.LIGHT_SPEED
vx = _vhf.rdirect_bindm('int2e_spsp1_spinor', 's4', jks, dms, 1,
mol._atm, mol._bas, mol._env, mf_opt) * c1**2
vj = numpy.zeros((n_dm,n2c*2,n2c*2), dtype=numpy.complex)
vk = numpy.zeros((n_dm,n2c*2,n2c*2), dtype=numpy.complex)
vj[:,n2c:,n2c:] = vx[ :n_dm ,:,:]
vj[:,:n2c,:n2c] = vx[n_dm :n_dm*2,:,:]
vk[:,n2c:,:n2c] = vx[n_dm*2: ,:,:]
if n_dm == 1:
vj = vj.reshape(vj.shape[1:])
vk = vk.reshape(vk.shape[1:])
return _jk_triu_(vj, vk, hermi)
def _call_veff_ssss(mol, dm, hermi=1, mf_opt=None):
c1 = .5 / lib.param.LIGHT_SPEED
if isinstance(dm, numpy.ndarray) and dm.ndim == 2:
n2c = dm.shape[0] // 2
dms = dm[n2c:,n2c:].copy()
else:
n2c = dm[0].shape[0] // 2
dms = []
for dmi in dm:
dms.append(dmi[n2c:,n2c:].copy())
vj, vk = _vhf.rdirect_mapdm('int2e_spsp1spsp2_spinor', 's8',
('ji->s2kl', 'jk->s1il'), dms, 1,
mol._atm, mol._bas, mol._env, mf_opt) * c1**4
return _jk_triu_(vj, vk, hermi)
def _call_veff_gaunt_breit(mol, dm, hermi=1, mf_opt=None, with_breit=False):
if with_breit:
intor_prefix = 'int2e_breit_'
else:
intor_prefix = 'int2e_'
if isinstance(dm, numpy.ndarray) and dm.ndim == 2:
n_dm = 1
n2c = dm.shape[0] // 2
dmls = dm[:n2c,n2c:].copy()
dmsl = dm[n2c:,:n2c].copy()
dmll = dm[:n2c,:n2c].copy()
dmss = dm[n2c:,n2c:].copy()
dms = [dmsl, dmsl, dmls, dmll, dmss]
else:
n_dm = len(dm)
n2c = dm[0].shape[0] // 2
dmll = [dmi[:n2c,:n2c].copy() for dmi in dm]
dmls = [dmi[:n2c,n2c:].copy() for dmi in dm]
dmsl = [dmi[n2c:,:n2c].copy() for dmi in dm]
dmss = [dmi[n2c:,n2c:].copy() for dmi in dm]
dms = dmsl + dmsl + dmls + dmll + dmss
vj = numpy.zeros((n_dm,n2c*2,n2c*2), dtype=numpy.complex)
vk = numpy.zeros((n_dm,n2c*2,n2c*2), dtype=numpy.complex)
jks = ('lk->s1ij',) * n_dm \
+ ('jk->s1il',) * n_dm
vx = _vhf.rdirect_bindm(intor_prefix+'ssp1ssp2_spinor', 's1', jks, dms[:n_dm*2], 1,
mol._atm, mol._bas, mol._env, mf_opt)
vj[:,:n2c,n2c:] = vx[:n_dm,:,:]
vk[:,:n2c,n2c:] = vx[n_dm:,:,:]
jks = ('lk->s1ij',) * n_dm \
+ ('li->s1kj',) * n_dm \
+ ('jk->s1il',) * n_dm
vx = _vhf.rdirect_bindm(intor_prefix+'ssp1sps2_spinor', 's1', jks, dms[n_dm*2:], 1,
mol._atm, mol._bas, mol._env, mf_opt)
vj[:,:n2c,n2c:]+= vx[ :n_dm ,:,:]
vk[:,n2c:,n2c:] = vx[n_dm :n_dm*2,:,:]
vk[:,:n2c,:n2c] = vx[n_dm*2: ,:,:]
if hermi == 1:
vj[:,n2c:,:n2c] = vj[:,:n2c,n2c:].transpose(0,2,1).conj()
vk[:,n2c:,:n2c] = vk[:,:n2c,n2c:].transpose(0,2,1).conj()
elif hermi == 2:
vj[:,n2c:,:n2c] = -vj[:,:n2c,n2c:].transpose(0,2,1).conj()
vk[:,n2c:,:n2c] = -vk[:,:n2c,n2c:].transpose(0,2,1).conj()
else:
raise NotImplementedError
if n_dm == 1:
vj = vj.reshape(n2c*2,n2c*2)
vk = vk.reshape(n2c*2,n2c*2)
c1 = .5 / lib.param.LIGHT_SPEED
if with_breit:
return vj*c1**2, vk*c1**2
else:
return -vj*c1**2, -vk*c1**2
def _proj_dmll(mol_nr, dm_nr, mol):
'''Project non-relativistic atomic density matrix to large component spinor
representation
'''
from pyscf.scf import addons
proj = addons.project_mo_nr2r(mol_nr, numpy.eye(mol_nr.nao_nr()), mol)
n2c = proj.shape[0]
n4c = n2c * 2
dm = numpy.zeros((n4c,n4c), dtype=complex)
# *.5 because alpha and beta are summed in project_mo_nr2r
dm_ll = reduce(numpy.dot, (proj, dm_nr*.5, proj.T.conj()))
dm[:n2c,:n2c] = (dm_ll + time_reversal_matrix(mol, dm_ll)) * .5
return dm
if __name__ == '__main__':
import pyscf.gto
mol = pyscf.gto.Mole()
mol.verbose = 5
mol.output = 'out_dhf'
mol.atom.extend([['He', (0.,0.,0.)], ])
mol.basis = {
'He': [(0, 0, (1, 1)),
(0, 0, (3, 1)),
(1, 0, (1, 1)), ]}
mol.build()
##############
# SCF result
method = UHF(mol)
energy = method.scf() #-2.38146942868
print(energy)
method.with_gaunt = True
print(method.scf()) # -2.38138339005
method.with_breit = True
print(method.scf()) # -2.38138339005
|
gkc1000/pyscf
|
pyscf/scf/dhf.py
|
Python
|
apache-2.0
| 28,936
|
[
"DIRAC",
"PySCF"
] |
6ee9c97318dd90390292f07ba482d7a51957ff17efc2188c2890c4c9a46820ca
|
# SYSTEM LIBS
from matplotlib.colors import LogNorm
import matplotlib.pyplot as plt; plt.ion();
import matplotlib
import numpy as np
import pandas as pd
# from root_pandas import read_root
import sys
from itertools import islice
from scipy import signal # To find peaks, for edge-detecting the crystal
from scipy.optimize import curve_fit
import scipy
import os
from sklearn import mixture
import random
import math
from mpl_toolkits.mplot3d import Axes3D
# MY LIBS
# import editable_input as ei # My script for editable text input
# from bin_dataframe import bin2D_dataframe
import mie_utils as my
# def gaussian(x, mu, sig):
# return np.exp(-np.power(x - mu, 2.) / (2 * np.power(sig, 2.)))
def gaussian(x, mu, sig, c):
return c*matplotlib.mlab.normpdf(x, mu, sig)
def gaussian_sum(x, c1, mu1, sig1, mu2, sig2):
return c1*matplotlib.mlab.normpdf(x, mu1, sig1) + \
(1-c1)*matplotlib.mlab.normpdf(x, mu2, sig2)
# def histobinwidth_cost_func(x, y, bw_x, bw_y):
# """
# From http://www.neuralengine.org/res/histogram.html
#
# Target cost function of the binwidth bw, minimize to get best binwidth from
# the shimazaki-shimonoto rule.
#
# x,y: ordered data arrays, the same passed to np.histogram2d
# bw_x,bw_y: Bin width parameters along the two axes to optimize
# """
# bin_x_num =
#
#
# hist, x1, y1 = np.histogram2d(events.loc[:,'Tracks_thetaIn_x'].values,
# events.loc[:,'Tracks_thetaOut_x'].values - events.loc[:,'Tracks_thetaIn_x'].values,
# bins=[bw_x,bw_y], range=[[-100,100], [-80,120]])
def best_binwidth(x, y):
x_max = 100
x_min = -100
y_max = 120
y_min = -80
Nx_MIN = 10 #Minimum number of bins in x (integer)
Nx_MAX = 200 #Maximum number of bins in x (integer)
Ny_MIN = 10 #Minimum number of bins in y (integer)
Ny_MAX = 200 #Maximum number of bins in y (integer)
Nx = np.arange(Nx_MIN, Nx_MAX,5) # #of Bins
Ny = np.arange(Ny_MIN, Ny_MAX,5) # #of Bins
Dx = (x_max - x_min) / Nx #Bin size vector
Dy = (y_max - y_min) / Ny #Bin size vector
Dxy=[]
for i in Dx: #Bin size vector
a=[]
for j in Dy: #Bin size vector
a.append((i,j))
Dxy.append(a)
Dxy=np.array( Dxy, dtype=[('x', float),('y', float)]) #matrix of bin size vector
Cxy=np.zeros(np.shape(Dxy))
Cxy__Dxy_plot=[] #save data to plot in scatterplot x,y,z
#Computation of the cost function to x and y
for i in range(np.size(Nx)):
for j in range(np.size(Ny)):
print(Nx[i], " ", Ny[j])
ki = np.histogram2d(x,y, bins=(Nx[i],Ny[j]))
ki = ki[0] #The mean and the variance are simply computed from the event counts in all the bins of the 2-dimensional histogram.
k = np.mean(ki) #Mean of event count
v = np.var(ki) #Variance of event count
Cxy[i,j] = (2 * k - v) / ( (Dxy[i,j][0]*Dxy[i,j][1])**2 ) #The cost Function
#(Cxy , Dx , Dy)
Cxy__Dxy_plot.append((Cxy[i,j] , Dxy[i,j][0] , Dxy[i,j][1]))#Save result of cost function to scatterplot
Cxy__Dxy_plot = np.array( Cxy__Dxy_plot , dtype=[('Cxy', float),('Dx', float), ('Dy', float)]) #Save result of cost function to scatterplot
#Optimal Bin Size Selection
#combination of i and j that produces the minimum cost function
idx_min_Cxy=np.where(Cxy == np.min(Cxy)) #get the index of the min Cxy
Cxymin=Cxy[idx_min_Cxy[0][0],idx_min_Cxy[1][0]] #value of the min Cxy
print(sum(Cxy==Cxymin)) #check if there is only one min value
optDxy=Dxy[idx_min_Cxy[0][0],idx_min_Cxy[1][0]]#get the bins size pairs that produces the minimum cost function
optDx=optDxy[0]
optDy=optDxy[1]
idx_Nx=idx_min_Cxy[0][0]#get the index in x that produces the minimum cost function
idx_Ny=idx_min_Cxy[1][0]#get the index in y that produces the minimum cost function
print('Cxymin', Cxymin, Nx[idx_Nx], optDx)
print('Cxymin', Cxymin, Ny[idx_Ny], optDy)
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
x=Cxy__Dxy_plot['Dx']
y=Cxy__Dxy_plot['Dy']
z =Cxy__Dxy_plot['Cxy']
ax.scatter(x, y, z, c=z, marker='o')
ax.set_xlabel('Dx')
ax.set_ylabel('Dy')
ax.set_zlabel('Cxy')
plt.draw()
ax.scatter( [optDx], [optDy],[Cxymin], marker='v', s=150,c="red")
ax.text(optDx, optDy,Cxymin, "Cxy min", color='red')
plt.draw()
plt.show()
return Nx[idx_Nx],Ny[idx_Ny]
def sgolay2d ( z, window_size, order, derivative=None):
"""
Taken from https://scipy-cookbook.readthedocs.io/items/SavitzkyGolay.html
"""
# number of terms in the polynomial expression
n_terms = ( order + 1 ) * ( order + 2) / 2.0
if window_size % 2 == 0:
raise ValueError('window_size must be odd')
if window_size**2 < n_terms:
raise ValueError('order is too high for the window size')
half_size = window_size // 2
# exponents of the polynomial.
# p(x,y) = a0 + a1*x + a2*y + a3*x^2 + a4*y^2 + a5*x*y + ...
# this line gives a list of two item tuple. Each tuple contains
# the exponents of the k-th term. First element of tuple is for x
# second element for y.
# Ex. exps = [(0,0), (1,0), (0,1), (2,0), (1,1), (0,2), ...]
exps = [ (k-n, n) for k in range(order+1) for n in range(k+1) ]
# coordinates of points
ind = np.arange(-half_size, half_size+1, dtype=np.float64)
dx = np.repeat( ind, window_size )
dy = np.tile( ind, [window_size, 1]).reshape(window_size**2, )
# build matrix of system of equation
A = np.empty( (window_size**2, len(exps)) )
for i, exp in enumerate( exps ):
A[:,i] = (dx**exp[0]) * (dy**exp[1])
# pad input array with appropriate values at the four borders
new_shape = z.shape[0] + 2*half_size, z.shape[1] + 2*half_size
Z = np.zeros( (new_shape) )
# top band
band = z[0, :]
Z[:half_size, half_size:-half_size] = band - np.abs( np.flipud( z[1:half_size+1, :] ) - band )
# bottom band
band = z[-1, :]
Z[-half_size:, half_size:-half_size] = band + np.abs( np.flipud( z[-half_size-1:-1, :] ) -band )
# left band
band = np.tile( z[:,0].reshape(-1,1), [1,half_size])
Z[half_size:-half_size, :half_size] = band - np.abs( np.fliplr( z[:, 1:half_size+1] ) - band )
# right band
band = np.tile( z[:,-1].reshape(-1,1), [1,half_size] )
Z[half_size:-half_size, -half_size:] = band + np.abs( np.fliplr( z[:, -half_size-1:-1] ) - band )
# central band
Z[half_size:-half_size, half_size:-half_size] = z
# top left corner
band = z[0,0]
Z[:half_size,:half_size] = band - np.abs( np.flipud(np.fliplr(z[1:half_size+1,1:half_size+1]) ) - band )
# bottom right corner
band = z[-1,-1]
Z[-half_size:,-half_size:] = band + np.abs( np.flipud(np.fliplr(z[-half_size-1:-1,-half_size-1:-1]) ) - band )
# top right corner
band = Z[half_size,-half_size:]
Z[:half_size,-half_size:] = band - np.abs( np.flipud(Z[half_size+1:2*half_size+1,-half_size:]) - band )
# bottom left corner
band = Z[-half_size:,half_size].reshape(-1,1)
Z[-half_size:,:half_size] = band - np.abs( np.fliplr(Z[-half_size:, half_size+1:2*half_size+1]) - band )
# solve system and convolve
if derivative == None:
m = np.linalg.pinv(A)[0].reshape((window_size, -1))
return scipy.signal.fftconvolve(Z, m, mode='valid')
elif derivative == 'col':
c = np.linalg.pinv(A)[1].reshape((window_size, -1))
return scipy.signal.fftconvolve(Z, -c, mode='valid')
elif derivative == 'row':
r = np.linalg.pinv(A)[2].reshape((window_size, -1))
return scipy.signal.fftconvolve(Z, -r, mode='valid')
elif derivative == 'both':
c = np.linalg.pinv(A)[1].reshape((window_size, -1))
r = np.linalg.pinv(A)[2].reshape((window_size, -1))
return scipy.signal.fftconvolve(Z, -r, mode='valid'), scipy.signal.fftconvolve(Z, -c, mode='valid')
def fit_channeling(input_df,lowest_percentage, highest_percentage,
fit_tolerance,max_iterations):
"""
Fit the histogram for two gaussian peaks (CH for channeling and AM for
amorphous), then return the fit object for further processing.
data: input dataset.
return: dict cointaining the parameters:
"weight_AM"
"weight_CH"
"mean_AM"
"mean_CH"
"sigma_AM"
"sigma_CH"
"""
clf = mixture.GaussianMixture(
n_components=2,
covariance_type='full',
verbose=0,
verbose_interval=10,
random_state=random.SystemRandom().randrange(0,2147483647), # 2**31-1
# means_init=[[-5], [77]],
# weights_init=[0.5, 0.5],
init_params="kmeans",
n_init = 5,
tol=fit_tolerance,
# precisions_init = [[[1/10**2]],[[1/10**2]]],
#warm_start=True,
max_iter=max_iterations)
################# GET THE DATA FROM THE DATAFRAME
first_percentile = np.percentile(input_df, lowest_percentage)
last_percentile = np.percentile(input_df, highest_percentage)
data_reduced = input_df.values[(input_df.values >= \
first_percentile) & (input_df.values <= last_percentile)]
data = data_reduced.reshape(-1, 1)
################# FIT THE DATA
# Check that we have enough data for a fit, otherwise just return eff=0
clf.fit(data)
if not clf.converged_:
print("[LOG]: Fit did not converge in this bin, bin ignored")
efficiency = np.NaN
r_m1, r_m2 = clf.means_
w1, w2 = clf.weights_
m1, m2 = r_m1[0], r_m2[0]
r_c1, r_c2 = clf.covariances_
#r_c1 = clf.covariances_
#r_c2 = clf.covariances_
c1, c2 = np.sqrt(r_c1[0][0]), np.sqrt(r_c2[0][0])
# print("Means: ", clf.means_, "\n")
# print("Weights: ", clf.weights_, "\n")
# print("Precisions: ", 1/c1, " ", 1/c2, "\n")
# print("Covariances: ", c1, " ", c2, "\n")
fit_results = {}
# Save the weights in the right array
# Lower delta_thetax is the AM peak, higher CH
fit_results["nevents"] = len(data)
if (m1 < m2):
fit_results["weight_AM"] = w1
fit_results["weight_CH"] = w2
fit_results["mean_AM"] = m1
fit_results["mean_CH"] = m2
fit_results["sigma_AM"] = c1
fit_results["sigma_CH"] = c2
else:
fit_results["weight_AM"] = w2
fit_results["weight_CH"]= w1
fit_results["mean_AM"] = m2
fit_results["mean_CH"] = m1
fit_results["sigma_AM"] = c2
fit_results["sigma_CH"] = c1
# Calculate errors plugging the found parameters in a chi2 fit.
data_histo = np.histogram(data,bins=200,normed=True)
histo_bin_centers = (data_histo[1] + (data_histo[1][1] - data_histo[1][0])/2)[:-1]
initial_guess = [fit_results["weight_AM"], fit_results["mean_AM"], fit_results["sigma_AM"],
fit_results["mean_CH"], fit_results["sigma_CH"]]
popt, pcov = curve_fit(gaussian_sum, histo_bin_centers, data_histo[0],
p0=initial_guess)
print(popt)
# # Plot the chi2 fit, for debug purposes
# plt.figure()
# plt.plot(histo_bin_centers,data_histo[0],".")
# plt.plot(histo_bin_centers,gaussian_sum(histo_bin_centers,*popt))
# plt.plot(histo_bin_centers,gaussian_sum(histo_bin_centers,*initial_guess))
#
# plt.show()
perr = np.sqrt(np.diag(pcov))
# Should be in same order as in p0 of curve_fit
fit_results["weight_AM_err"] = perr[0]
fit_results["weight_CH_err"] = perr[0] # c2=1-c1, by propagation same error
fit_results["mean_AM_err"] = perr[1]
fit_results["sigma_AM_err"] = perr[2]
fit_results["mean_CH_err"] = perr[3]
fit_results["sigma_CH_err"] = perr[4]
return fit_results,data
######################################
################# MAIN
file_name = sys.argv[1]
crystal_name = sys.argv[2]
run_number = sys.argv[3]
particle_name = sys.argv[4]
particle_energy_input = sys.argv[5] # [GeV]
run_date = sys.argv[6] # [GeV]
# Use a run specific params file, otherwise look for a crystal specific one,
# otherwise use the general one.
if os.path.isfile(run_number + '_analysis_configuration_params.csv'):
analysis_configuration_params_file = run_number + '_analysis_configuration_params.csv'
elif os.path.isfile(crystal_name + '_analysis_configuration_params.csv.csv'):
analysis_configuration_params_file = crystal_name + '_analysis_configuration_params.csv'
else:
analysis_configuration_params_file = 'analysis_configuration_params.csv'
print("[LOG]: Reading crystal analysis parameters from ", analysis_configuration_params_file)
# Check if the run number is in the actual data file name, otherwise print a
# warning
if '_'+run_number+'_' not in file_name:
print("[WARNING]: '_{}_' not found in file name '{}', maybe check if "
"correct run number or correct file.".format(run_number, file_name))
# Read by chunk not needed by now probably.
events = pd.read_hdf(file_name)
# Angles in microradians from torsion_correction.py lines 171-174
# events["Delta_Theta_x"] = events.loc[:,'Tracks_thetaOut_x'].values - \
# events.loc[:,'Tracks_thetaIn_x'].values
# # Read crystal parameters
cpars = pd.read_csv("crystal_physical_characteristics.csv", index_col=0)
crystal_params = cpars[~cpars.index.isnull()] # Remove empty (like ,,,,,,) lines
crystal_lenght = float(crystal_params.loc[crystal_name,"Lenght (z) (mm)"])*1e-3 # [m]
# Taken from my thesis code
# Initial guesses for crystal parameter, uses either data from
# crystal_physical_characteristics.csv or a default value if the latter is not
# found.
particle_energy = float(particle_energy_input)*1e9 # [eV] TODO generalize to pions!
critical_radius = particle_energy / 550e9 # [m] 550 GeV/m electric field strength from byriukov
pot_well = 21.34 # [eV] Potential well between crystal planes
theta_bending = float(crystal_params.loc[crystal_name,"H8 bending angle (urad)"]) * 1e-6 # [rad]
crystal_curvature_radius = crystal_lenght / theta_bending
theta_c = math.sqrt(2*pot_well/particle_energy) * (1 - critical_radius/crystal_curvature_radius)*1e6 # [murad]
# c1_thetavr, c2_thetavr = (-1.5, 1.66666)
# theta_vr = c1_thetavr * theta_c * (1 - c2_thetavr*critical_radius/crystal_curvature_radius) # [murad]
################# FIT USING 5 and 10 AS CUTS
# ang_cut_low = [-5,-10] # [murad]
# ang_cut_high = [5,10] # [murad]
# for low_cut, high_cut in zip(ang_cut_low,ang_cut_high):
# plt.figure()
# geocut_df = events.loc[(events.loc[:,'Tracks_thetaIn_x'] > low_cut) & \
# (events.loc[:,'Tracks_thetaIn_x'] < high_cut)]
# # plt.hist2d(geocut_df.loc[:,'Tracks_thetaIn_x'].values, \
# # geocut_df.loc[:,'Tracks_thetaOut_x'].values - geocut_df.loc[:,'Tracks_thetaIn_x'].values,\
# # bins=[400,200], norm=LogNorm(), range=[[-100,100], [-80,120]])
# fit_results = fit_channeling(geocut_df.Delta_Theta_x)[0]
# filtered_data = fit_channeling(geocut_df.Delta_Theta_x)[1]
# plt.hist(filtered_data, bins=200, range=[-100,100], normed=False) # [murad]
#
#
# total_number_of_events = fit_results["nevents"]
# gauss_AM = total_number_of_events * fit_results["weight_AM"] * matplotlib.mlab.normpdf(x_histo, fit_results["mean_AM"], fit_results["sigma_AM"])
# gauss_CH = total_number_of_events * fit_results["weight_CH"] * matplotlib.mlab.normpdf(x_histo, fit_results["mean_CH"], fit_results["sigma_CH"])
#
# plt.plot(x_histo, gauss_AM, label="Amorphous Peak", color='r')
# plt.plot(x_histo, gauss_CH, label="Channeling Peak", color='Orange')
# plt.suptitle(r"Crystal {}, run {} — Channeling, cut ±{:.3} ".format(crystal_name, run_number, float(high_cut)),fontweight='bold')
# plt.title(r"Efficiency {:.3}% — bending angle {:.3} ".format(fit_results["weight_CH"]*100, fit_results["mean_CH"]) + r"$[\mu rad]$")
# plt.xlabel(r'$\Delta \theta_{x}\ [\mu rad]$')
# plt.ylabel('Frequency')
# plt.legend()
# #plt.tight_layout()
# plt.savefig("latex/img/" + str(high_cut) + "_chan_histo.pdf")
# plt.show()
#
#
# print("\nCut: +-",low_cut)
# print(pd.Series(fit_results))
#################
################# FIT USING CRITICAL ANGLE AS CUT
# theta_bending = fit_results["mean_CH"]
# crystal_curvature_radius = crystal_lenght / (theta_bending*1e-6)
# theta_c = math.sqrt(2*pot_well/particle_energy)*1e6 * (1 - critical_radius/crystal_curvature_radius) # [murad]
#### How much to move the absolute position of the cuts
# Example, with cuts [-5,5] and offset +3, we have an actual cut of [-2,8]
# Useful if torsion correction is not employed, to center the cuts
# center_offset = float(my.get_from_csv(analysis_configuration_params_file,
# "chan_center_offset"
# ))
dtx_low, dtx_high = my.get_from_csv(analysis_configuration_params_file,
"chan_hist_range_dtx_low",
"chan_hist_range_dtx_high",
)
dtx_nbins = int(my.get_from_csv(analysis_configuration_params_file,
"chan_hist_tx_nbins"))
x_histo = np.linspace(dtx_low,dtx_high,dtx_nbins + 1) # [murad]
print("New Thetac: ", theta_c)
lowest_percentage, highest_percentage = my.get_from_csv(analysis_configuration_params_file,
"chan_low_percentage",
"chan_high_percentage")
dech_start, dech_end = my.get_from_csv(analysis_configuration_params_file,
"dech_start",
"dech_end")
# dech_start, dech_end = 1e6*dech_start, 1e6*dech_end # convert to mura
chan_fit_tolerance = my.get_from_csv(analysis_configuration_params_file,
"chan_fit_tolerance")
max_iterations = int(my.get_from_csv(analysis_configuration_params_file,
"chan_max_iterations"))
i = 0
plt.figure()
# TODO SavitzkyGolay filter
hist, x1, y1, img = plt.hist2d(events.loc[:,'Tracks_thetaIn_x'].values, \
events.loc[:,'Tracks_thetaOut_x'].values - events.loc[:,'Tracks_thetaIn_x'].values,\
bins=[200,dtx_nbins], norm=LogNorm(), range=[[-100,100], [dtx_low,dtx_high]]) # ideal 29,17
plt.suptitle(r"Crystal {}, run {} — {} {} GeV".format(crystal_name, run_number, particle_name, particle_energy_input),fontweight='bold')
plt.title(r"Original histogram and calculated offset: {}".format(r"$\theta_{x}$ vs $\Delta \theta_{x}$"))
plt.xlabel(r'$\theta_{x_{in}}\ [\mu rad]$')
plt.ylabel(r'$\Delta \theta_{x}\ [\mu rad]$')
# print(events)
plt.colorbar()
# Window size = next odd number after rounded thetac
window_size_sg = int(np.round(theta_c)) + int(np.round(theta_c))%2 + 1
newhist = sgolay2d(hist, window_size=window_size_sg, order=3);
# Find the maximum only in the channeling spot.
# To do so, consider only the part of the smoothed histogram for which dtx>half thetab
half_thetab_index = np.argwhere(y1==my.take_closest(y1,theta_bending*1e6/2))[0,0]
newhist_upper_half = newhist[:,half_thetab_index:]
ind = np.unravel_index(np.argmax(np.rot90(newhist_upper_half), axis=None), np.rot90(newhist_upper_half).shape);
angular_offset = x1[ind[1]]
print("Calculated offset = ",angular_offset)
plt.axvline(x=angular_offset, linestyle="dashed", color='Crimson', label="")
nocorr_offset_filename = 'offset_nocorr_histo'
plt.savefig("latex/img/"+ nocorr_offset_filename + ".pdf")
plt.matshow(np.rot90(newhist)); plt.plot(ind[1],ind[0],'r.')
filtered_filename = 'offset_filtered_histo'
plt.savefig("latex/img/"+ filtered_filename + ".pdf")
#plt.show()
#plt.figure(); plt.hist2d(events.loc[:,'Tracks_thetaIn_x'].values, events.loc[:,'Tracks_thetaOut_x'].values - events.loc[:,'Tracks_thetaIn_x'].values, bins=[400,200], norm=LogNorm(), range=[[-100,100], [-80,120]])
# SET THE CUTS
center_offset = angular_offset
ang_cut_low = [center_offset - theta_c / 2, center_offset - theta_c]
ang_cut_high = [center_offset + theta_c / 2, center_offset + theta_c]
#input("Proceed?")
for low_cut, high_cut in zip(ang_cut_low,ang_cut_high):
plt.figure()
geocut_df = events.loc[(events.loc[:,'Tracks_thetaIn_x'] > low_cut) & \
(events.loc[:,'Tracks_thetaIn_x'] < high_cut)]
totcut_df = geocut_df.loc[(events.loc[:,'Delta_Theta_x'] < dech_start) | \
(events.loc[:,'Delta_Theta_x'] > dech_end)]
# geocut_df = events.loc[(events.loc[:,'Tracks_thetaIn_x'] > low_cut) & \
# (events.loc[:,'Tracks_thetaIn_x'] < high_cut)]
# plt.hist2d(geocut_df.loc[:,'Tracks_thetaIn_x'].values, \
# geocut_df.loc[:,'Tracks_thetaOut_x'].values - geocut_df.loc[:,'Tracks_thetaIn_x'].values,\
# bins=[400,200], norm=LogNorm(), range=[[-100,100], [-80,120]])
fit_and_data = fit_channeling(totcut_df.Delta_Theta_x,
lowest_percentage, highest_percentage,
chan_fit_tolerance, max_iterations)
fit_results = fit_and_data[0]
filtered_data = fit_and_data[1]
#plt.yscale('log', nonposy='clip')
plt.hist(geocut_df.Delta_Theta_x, bins=dtx_nbins, range=[dtx_low,dtx_high], normed=False) # [murad]
# plt.hist(filtered_data, bins=dtx_nbins, range=[dtx_low,dtx_high], normed=False) # [murad]
total_number_of_events = len(filtered_data)#fit_results["nevents"]
area_bin = (dtx_high-dtx_low)/dtx_nbins * 1
gauss_AM = area_bin*total_number_of_events * fit_results["weight_AM"] * matplotlib.mlab.normpdf(x_histo, fit_results["mean_AM"], fit_results["sigma_AM"])
gauss_CH = area_bin*total_number_of_events * fit_results["weight_CH"] * matplotlib.mlab.normpdf(x_histo, fit_results["mean_CH"], fit_results["sigma_CH"])
# gauss_AM = fit_results["weight_AM"] * matplotlib.mlab.normpdf(x_histo, fit_results["mean_AM"], fit_results["sigma_AM"])
# gauss_CH = fit_results["weight_CH"] * matplotlib.mlab.normpdf(x_histo, fit_results["mean_CH"], fit_results["sigma_CH"])
plt.plot(x_histo, gauss_AM, label="Amorphous Peak", color='r')
plt.plot(x_histo, gauss_CH, label="Channeling Peak", color='Orange')
thetac_title = r"$\theta_c/2$" if i == 0 else r"$\theta_c$"
cut_value = theta_c/2 if i == 0 else theta_c
plt.suptitle(r"{} run {}, {} {} GeV — Chan., cut ± {} = {:.1f}±{:.3}".format(crystal_name,run_number,particle_name,particle_energy_input,thetac_title,angular_offset,cut_value),fontweight='bold')
plt.title(r"Efficiency {:.3}% ± {:.1f}% — Bending Angle {:.1f} ± {:.1f} {}".format(fit_results["weight_CH"]*100, fit_results["weight_CH_err"]*100,
fit_results["mean_CH"],fit_results["mean_CH_err"],r"$[\mu rad]$"))
plt.xlabel(r'$\Delta \theta_{x}\ [\mu rad]$')
plt.ylabel('Frequency')
plt.legend()
#plt.tight_layout()
thetac_filename = 'offset_half_thetac' if i == 0 else 'offset_thetac'
plt.savefig("latex/img/"+ thetac_filename + "_chan_histo.pdf")
plt.show()
print("\nCut low: ", low_cut)
print("\nCut high: ", high_cut)
print(pd.Series(fit_results))
# my.save_parameters_in_csv("crystal_analysis_parameters.csv",**fit_results)
i=i+1
#################
################# WRITE TO LATEX THE PARAMS
cut_x_left, cut_x_right = my.get_from_csv("crystal_analysis_parameters.csv", "xmin", "xmax")
tor_m,tor_q,tor_m_err,tor_q_err = my.get_from_csv("crystal_analysis_parameters.csv",\
"torsion_m", "torsion_q", "torsion_m_err", "torsion_q_err")
cut_y_low, cut_y_high = my.get_from_csv(analysis_configuration_params_file, "cut_y_low", "cut_y_high")
#Example \newcommand{\myname}{Francesco Forcher}
#with open("latex/text_gen-definitions.tex", "a") as myfile:
file_name = sys.argv[1]
crystal_name = sys.argv[2]
run_number = sys.argv[3]
particle_name = sys.argv[4]
particle_energy_input = sys.argv[5] # [GeV]
run_date = sys.argv[6] # [GeV]
with open("latex/test_gen-definitions.tex","w") as myfile:
myfile.write(r"% FILE GENERATED AUTOMATICALLY")
myfile.write("\n\n")
myfile.write("\\newcommand{{{}}}{{{}}}\n".format("\\myname","Francesco Forcher"))
myfile.write("\\newcommand{{{}}}{{{}}}\n".format("\\crystalname",crystal_name))
myfile.write("\\newcommand{{{}}}{{{}}}\n".format("\\runnumber", run_number))
myfile.write("\\newcommand{{{}}}{{{}}}\n".format("\\rundate", run_date))
myfile.write("\\newcommand{{{}}}{{{}}}\n".format("\\particletype", particle_name))
myfile.write("\\newcommand{{{}}}{{{}}}\n".format("\\particleenergy", particle_energy_input + " GeV"))
myfile.write("\n")
myfile.write("\\newcommand{{{}}}{{{:.3f}}}\n".format("\\xmin",float(cut_x_left)))
myfile.write("\\newcommand{{{}}}{{{:.3f}}}\n".format("\\xmax",float(cut_x_right)))
myfile.write("\\newcommand{{{}}}{{{:.3f}}}\n".format("\\ymin",float(cut_y_low)))
myfile.write("\\newcommand{{{}}}{{{:.3f}}}\n".format("\\ymax",float(cut_y_high)))
myfile.write("\n")
myfile.write("\\newcommand{{{}}}{{{:.1f}}}\n".format("\\torsionm", float(tor_m)))
myfile.write("\\newcommand{{{}}}{{{:.1f}}}\n".format("\\torsionq", float(tor_q)))
myfile.write("\\newcommand{{{}}}{{{:.1f}}}\n".format("\\torsionmerr",float(tor_m_err)))
myfile.write("\\newcommand{{{}}}{{{:.1f}}}\n".format("\\torsionqerr",float(tor_q_err)))
#################
|
f-forcher/crystal-channeling-analysis
|
channeling_efficiency_autooffset.py
|
Python
|
gpl-3.0
| 25,655
|
[
"CRYSTAL",
"Gaussian"
] |
817d74bc5fe2f5606d6abc0155f963fbada94a965d8733529bccf8b0c50515d6
|
"""This module tests various ways how to set up the provisioning using the provisioning dialog."""
import re
from datetime import datetime
from datetime import timedelta
import fauxfactory
import pytest
from widgetastic.utils import partial_match
from widgetastic_patternfly import CheckableBootstrapTreeview as CbTree
from cfme import test_requirements
from cfme.common import BaseLoggedInPage
from cfme.infrastructure.provider import InfraProvider
from cfme.infrastructure.provider.rhevm import RHEVMProvider
from cfme.infrastructure.provider.scvmm import SCVMMProvider
from cfme.infrastructure.provider.virtualcenter import VMwareProvider
from cfme.markers.env_markers.provider import providers
from cfme.utils.appliance.implementations.ui import navigate_to
from cfme.utils.blockers import BZ
from cfme.utils.generators import random_vm_name
from cfme.utils.log import logger
from cfme.utils.providers import ProviderFilter
from cfme.utils.wait import TimedOutError
from cfme.utils.wait import wait_for
not_scvmm = ProviderFilter(classes=[SCVMMProvider], inverted=True)
all_infra = ProviderFilter(classes=[InfraProvider],
required_fields=[['provisioning', 'template'],
['provisioning', 'host'],
['provisioning', 'datastore']])
pytestmark = [
pytest.mark.meta(server_roles="+automate"),
pytest.mark.usefixtures('uses_infra_providers'),
pytest.mark.long_running,
test_requirements.provision,
pytest.mark.tier(3),
pytest.mark.provider(gen_func=providers, filters=[all_infra], scope="module"),
]
@pytest.fixture(scope="function")
def vm_name():
vm_name = random_vm_name('provd')
return vm_name
@pytest.fixture(scope="function")
def prov_data(provisioning, provider):
data = {
'request': {
'email': fauxfactory.gen_email(),
'first_name': fauxfactory.gen_alphanumeric(),
'last_name': fauxfactory.gen_alphanumeric(),
'manager_name': fauxfactory.gen_alphanumeric(20, start="manager ")},
'network': {'vlan': partial_match(provisioning.get('vlan'))},
'environment': {'datastore_name': {'name': provisioning['datastore']},
'host_name': {'name': provisioning['host']}},
'catalog': {},
'hardware': {},
'schedule': {},
'purpose': {},
}
if provider.one_of(RHEVMProvider):
data['catalog']['provision_type'] = 'Native Clone'
elif provider.one_of(VMwareProvider):
data['catalog']['provision_type'] = 'VMware'
# Otherwise just leave it alone
return data
@pytest.fixture(scope="function")
def provisioner(appliance, request, setup_provider, provider, vm_name):
def _provisioner(template, provisioning_data, delayed=None):
vm = appliance.collections.infra_vms.instantiate(name=vm_name,
provider=provider,
template_name=template)
provisioning_data['template_name'] = template
provisioning_data['provider_name'] = provider.name
view = navigate_to(vm.parent, 'Provision')
view.form.fill_with(provisioning_data, on_change=view.form.submit_button)
base_view = vm.appliance.browser.create_view(BaseLoggedInPage)
base_view.flash.assert_no_error()
request.addfinalizer(
lambda: appliance.collections.infra_vms.instantiate(vm_name, provider)
.cleanup_on_provider())
request_description = 'Provision from [{}] to [{}]'.format(template, vm_name)
provision_request = appliance.collections.requests.instantiate(
description=request_description)
if delayed is not None:
total_seconds = (delayed - datetime.utcnow()).total_seconds()
try:
wait_for(provision_request.is_finished,
fail_func=provision_request.update, num_sec=total_seconds, delay=5)
pytest.fail("The provisioning was not postponed")
except TimedOutError:
pass
logger.info('Waiting for vm %s to appear on provider %s', vm_name, provider.key)
wait_for(
provider.mgmt.does_vm_exist, [vm_name],
fail_func=provider.refresh_provider_relationships,
handle_exception=True, num_sec=600
)
# nav to requests page happens on successful provision
logger.info('Waiting for cfme provision request for vm %s', vm_name)
provision_request.wait_for_request()
msg = "Provisioning failed with the message {}".format(provision_request.rest.message)
assert provision_request.is_succeeded(), msg
return vm
return _provisioner
@pytest.mark.rhv2
@pytest.mark.meta(blockers=[BZ(1627673, forced_streams=['5.10'])])
def test_change_cpu_ram(provisioner, soft_assert, provider, prov_data, vm_name):
""" Tests change RAM and CPU in provisioning dialog.
Prerequisites:
* A provider set up, supporting provisioning in CFME
Steps:
* Open the provisioning dialog.
* Apart from the usual provisioning settings, set number of CPUs and amount of RAM.
* Submit the provisioning request and wait for it to finish.
* Visit the page of the provisioned VM. The summary should state correct values for CPU&RAM.
Metadata:
test_flag: provision
Polarion:
assignee: jhenner
caseimportance: high
casecomponent: Provisioning
initialEstimate: 1/6h
"""
prov_data['catalog']["vm_name"] = vm_name
prov_data['hardware']["num_sockets"] = "4"
prov_data['hardware']["cores_per_socket"] = "1" if not provider.one_of(SCVMMProvider) else None
prov_data['hardware']["memory"] = "2048"
template_name = provider.data['provisioning']['template']
vm = provisioner(template_name, prov_data)
# Go to the VM info
view = navigate_to(vm, "Details")
data = view.entities.summary("Properties").get_text_of("Container").strip()
# No longer possible to use version pick because of cherrypicking?
regexes = list(map(re.compile, [
r"^[^(]*(\d+) CPUs?.*, ([^)]+)[^)]*$",
r"^[^(]*\((\d+) CPUs?, ([^)]+)\)[^)]*$",
r"^.*?(\d+) CPUs? .*?(\d+ MB)$"]))
for regex in regexes:
match = regex.match(data)
if match is not None:
num_cpus, memory = match.groups()
break
else:
raise ValueError("Could not parse string {}".format(repr(data)))
soft_assert(num_cpus == "4", "num_cpus should be {}, is {}".format("4", num_cpus))
soft_assert(memory == "2048 MB", "memory should be {}, is {}".format("2048 MB", memory))
@pytest.mark.rhv3
# Special parametrization in testgen above
@pytest.mark.meta(blockers=[1209847, 1380782], automates=[1633867])
@pytest.mark.provider(gen_func=providers,
filters=[all_infra, not_scvmm],
scope="module")
@pytest.mark.parametrize("disk_format", ["Thin", "Thick", "Preallocated",
"Thick - Lazy Zero", "Thick - Eager Zero"],
ids=["thin", "thick", "preallocated", "thick_lazy", "thick_eager"])
@pytest.mark.uncollectif(lambda provider, disk_format, appliance:
(provider.one_of(RHEVMProvider) and
disk_format in ["Thick", "Thick - Lazy Zero", "Thick - Eager Zero"]) or
(provider.one_of(VMwareProvider) and
disk_format == "Thick" and
appliance.version > '5.11') or
(provider.one_of(VMwareProvider) and
disk_format in ["Thick - Lazy Zero", "Thick - Eager Zero"] and
appliance.version < '5.11') or
(not provider.one_of(RHEVMProvider) and
disk_format == "Preallocated") or
# Temporarily, our storage domain cannot handle Preallocated disks
(provider.one_of(RHEVMProvider) and
disk_format == "Preallocated"),
reason='Invalid combination of disk format and provider type '
'or appliance version (or both!)')
def test_disk_format_select(provisioner, disk_format, provider, prov_data, vm_name):
""" Tests disk format selection in provisioning dialog.
Prerequisites:
* A provider set up, supporting provisioning in CFME
Steps:
* Open the provisioning dialog.
* Apart from the usual provisioning settings, set the disk format to be thick or thin.
* Submit the provisioning request and wait for it to finish.
* Visit the page of the provisioned VM.
* The ``Thin Provisioning Used`` field should state true of false according to the selection
Metadata:
test_flag: provision
Polarion:
assignee: jhenner
casecomponent: Provisioning
caseimportance: high
initialEstimate: 1/6h
"""
prov_data['catalog']['vm_name'] = vm_name
prov_data['hardware']["disk_format"] = disk_format
template_name = provider.data['provisioning']['template']
vm = provisioner(template_name, prov_data)
# Go to the VM info
view = navigate_to(vm, 'Details')
thin = view.entities.summary('Datastore Allocation Summary').get_text_of(
'Thin Provisioning Used').strip().lower()
vm.load_details(refresh=True)
if disk_format == "Thin":
assert thin == 'true', "The disk format should be Thin"
else:
assert thin != 'true', "The disk format should not be Thin"
@pytest.mark.rhv3
@pytest.mark.parametrize("started", [True, False])
def test_power_on_or_off_after_provision(provisioner, prov_data, provider, started, vm_name):
""" Tests setting the desired power state after provisioning.
Prerequisites:
* A provider set up, supporting provisioning in CFME
Steps:
* Open the provisioning dialog.
* Apart from the usual provisioning settings, set whether you want or not the VM to be
powered on after provisioning.
* Submit the provisioning request and wait for it to finish.
* The VM should become steady in the desired VM power state.
Metadata:
test_flag: provision
Polarion:
assignee: jhenner
caseimportance: high
casecomponent: Provisioning
initialEstimate: 1/4h
"""
prov_data['catalog']['vm_name'] = vm_name
prov_data['schedule']["power_on"] = started
template_name = provider.data['provisioning']['template']
vm = provisioner(template_name, prov_data)
wait_for(
lambda: vm.exists_on_provider and
(vm.mgmt.is_running if started else vm.mgmt.is_stopped),
num_sec=240, delay=5
)
@pytest.mark.rhv3
@test_requirements.tag
def test_tag(provisioner, prov_data, provider, vm_name):
""" Tests tagging VMs using provisioning dialogs.
Prerequisites:
* A provider set up, supporting provisioning in CFME
Steps:
* Open the provisioning dialog.
* Apart from the usual provisioning settings, pick a tag.
* Submit the provisioning request and wait for it to finish.
* Visit th page of VM, it should display the selected tags
Metadata:
test_flag: provision
Polarion:
assignee: anikifor
casecomponent: Tagging
initialEstimate: 1/8h
"""
prov_data['catalog']['vm_name'] = vm_name
prov_data['purpose']["apply_tags"] = CbTree.CheckNode(path=("Service Level *", "Gold"))
template_name = provider.data['provisioning']['template']
vm = provisioner(template_name, prov_data)
tags = vm.get_tags()
assert any(
tag.category.display_name == "Service Level" and tag.display_name == "Gold"
for tag in tags
), "Service Level: Gold not in tags ({})".format(tags)
@pytest.mark.rhv3
@pytest.mark.meta(blockers=[1204115])
@test_requirements.scheduled_ops
def test_provisioning_schedule(provisioner, provider, prov_data, vm_name):
""" Tests provision scheduling.
Prerequisites:
* A provider set up, supporting provisioning in CFME
Steps:
* Open the provisioning dialog.
* Apart from the usual provisioning settings, set a scheduled provision and pick a time.
* Submit the provisioning request, it should not start before the scheduled time.
Metadata:
test_flag: provision
Polarion:
assignee: jhenner
caseimportance: high
casecomponent: Provisioning
initialEstimate: 1/4h
"""
now = datetime.utcnow()
prov_data['catalog']['vm_name'] = vm_name
prov_data['schedule']["schedule_type"] = "Schedule"
prov_data['schedule']["provision_date"] = now.strftime("%m/%d/%Y")
STEP = 5
minutes_diff = (STEP - (now.minute % STEP))
# To have some gap for automation
if minutes_diff <= 3:
minutes_diff += 5
provision_time = timedelta(minutes=minutes_diff) + now
prov_data['schedule']["provision_start_hour"] = str(provision_time.hour)
prov_data['schedule']["provision_start_min"] = str(provision_time.minute)
template_name = provider.data['provisioning']['template']
provisioner(template_name, prov_data, delayed=provision_time)
@pytest.mark.rhv2
@pytest.mark.provider([RHEVMProvider],
required_fields=[['provisioning', 'template'],
['provisioning', 'host'],
['provisioning', 'datastore']])
@pytest.mark.parametrize('vnic_profile', ['<No Profile>', '<Use template nics>'],
ids=['no_profile', 'use_template_nics'])
def test_provisioning_vnic_profiles(provisioner, provider, prov_data, vm_name, vnic_profile):
""" Tests provision VM with other than specific vnic profile selected - <No Profile>
and <Use template nics>.
Prerequisites:
* A provider set up, supporting provisioning in CFME
Steps:
* Open the provisioning dialog.
* Apart from the usual provisioning settings, set vlan
to values <No Profile>/<Use template nics>
* Submit the provisioning request, it should provision the vm successfully.
* Check NIC configuration of provisioned VM
Metadata:
test_flag: provision
Polarion:
assignee: jhenner
caseimportance: high
casecomponent: Provisioning
initialEstimate: 1/4h
"""
prov_data['catalog']['vm_name'] = vm_name
prov_data['network'] = {'vlan': vnic_profile}
template_name = provider.data['provisioning']['template']
vm = provisioner(template_name, prov_data)
wait_for(
lambda: vm.exists_on_provider,
num_sec=300, delay=5
)
if vnic_profile == '<No Profile>':
# Check the VM vNIC
nics = vm.mgmt.get_nics()
assert nics, 'The VM should have a NIC attached.'
# Check the vNIC network profile
profile = nics[0].vnic_profile
assert not profile, 'The vNIC profile should be empty.'
@pytest.mark.rhv2
@pytest.mark.provider([RHEVMProvider],
required_fields=[['provisioning', 'template_2_nics']])
@pytest.mark.meta(blockers=[BZ(1625139, forced_streams=['5.10', 'upstream'])])
def test_provision_vm_with_2_nics(provisioner, provisioning, prov_data, vm_name):
""" Tests provision VM from a template configured with 2 NICs.
Prerequisites:
* A provider set up, supporting provisioning in CFME, template with 2 NICs
Steps:
* Open the provisioning dialog.
* Apart from the usual provisioning settings, select template with 2 NICs.
* Submit the provisioning request, it should provision the vm successfully.
* Check NIC configuration of provisioned VM - it should have 2 NICs attached.
Bugzilla:
1625139
Metadata:
test_flag: provision
Polarion:
assignee: jhenner
caseimportance: high
casecomponent: Provisioning
initialEstimate: 1/4h
testSteps:
1. Open the provisioning dialog.
2. Apart from the usual provisioning settings, select template with 2 NICs.
3. Submit the provisioning request, it should provision the vm successfully.
4. Check NIC configuration of provisioned VM - it should have 2 NICs attached.
"""
template_name = provisioning.get('template_2_nics', None)
prov_data['catalog']['vm_name'] = vm_name
prov_data['network']['vlan'] = '<Use template nics>'
vm = provisioner(template_name, prov_data)
nics = vm.mgmt.get_nics()
assert len(nics) == 2, 'The VM should have 2 NICs attached.'
@pytest.mark.provider([VMwareProvider])
def test_vmware_default_placement(provisioner, prov_data, provider, setup_provider, vm_name):
""" Tests whether vm placed in Datacenter root after the provisioning.
Prerequisites:
* A provider set up, supporting provisioning in CFME
Steps:
* Open the provisioning dialog.
* Apart from the usual provisioning settings, set "Choose automatically"
* Submit the provisioning request and wait for it to finish.
* The VM should be placed in the Datacenter root folder (that's two levels up in API).
Metadata:
test_flag: provision
Polarion:
assignee: jhenner
caseimportance: high
casecomponent: Provisioning
initialEstimate: 1/4h
"""
template_name = provider.data['provisioning']['template']
prov_data['catalog']['vm_name'] = vm_name
prov_data['environment'] = {'automatic_placement': True}
vm = provisioner(template_name, prov_data)
wait_for(
lambda: vm.exists_on_provider,
num_sec=240, delay=5,
message="VM {} exists on provider.".format(vm_name)
)
assert 'Datacenter' == provider.mgmt.get_vm(vm_name).raw.parent.parent.name, (
'The new vm is not placed in the Datacenter root directory!')
@pytest.mark.rhv2
@pytest.mark.provider([RHEVMProvider], required_fields=[['provisioning', 'template_false_sparse']])
@pytest.mark.meta(automates=[1726590], blockers=[BZ(1726590, forced_streams=["5.10"])])
def test_linked_clone_default(provisioner, provisioning, provider, prov_data, vm_name):
""" Tests provision VM from a template with the selected "Linked Clone" option.
The template must have preallocated disks (at least one) for this test.
Required_fields is set to [['cap_and_util', 'capandu_vm']] because template for this VM has
a preallocated disk for sure.
Bugzilla:
1726590
Metadata:
test_flag: provision
Polarion:
assignee: anikifor
caseimportance: high
casecomponent: Provisioning
initialEstimate: 1/4h
"""
template_name = provider.data['provisioning']['template_false_sparse']
prov_data['catalog']['vm_name'] = vm_name
prov_data['catalog']['linked_clone'] = True
# should be automatic but due to limited storage on rhv sometimes it may fail
prov_data['environment'] = {'automatic_placement': True}
provisioner(template_name, prov_data)
|
izapolsk/integration_tests
|
cfme/tests/infrastructure/test_provisioning_dialog.py
|
Python
|
gpl-2.0
| 19,319
|
[
"VisIt"
] |
0247831c708e4ea4b8bafe79b77fba6fee7a6a5f8ce6a374ff67c46baf37ebaa
|
"""
Structure contains the description of a set of atoms in space, for periodic structures it adds a lattice.
"""
try:
import itertools.izip as zip
except ImportError:
pass
import json
import os
import struct
import sys
import numpy as np
from collections.abc import MutableSequence
from itertools import combinations, repeat
from math import sin, cos
from multiprocessing import Pool
from pychemia import pcm_log
from pychemia.crystal.lattice import Lattice
from pychemia.core.composition import Composition
from pychemia.core.delaunay import get_reduced_bases
from pychemia.utils.computing import deep_unicode
from pychemia.utils.periodic import mass, atomic_number, covalent_radius, valence, atomic_symbols
import scipy.spatial
class Structure(MutableSequence):
"""
Define an object that contains information about atomic positions,
cell parameters and periodicity and provides methods to manipulate
those elements
A Structure is basically a set of sites with eventually a lattice
each site could have one or more species with occupations equal
or lower than one.
A Structure could represents a molecule, cluster, wire, slab,
crystal structure or alloy with define sites.
The positions of the atoms and their atomic symbols are declared
in 'positions' and 'symbols' respectively.
For periodic structures, the 'periodicity' can be declared.
and cell parameters in 'cell'
Magnetic moments can be associated in the array vector_info['magnetic_moments'].
"""
def __init__(self, natom=None, symbols=None, periodicity=False, cell=None, positions=None, reduced=None,
mag_moments=None, occupancies=None, sites=None, name=None, comment=None, vector_info=None):
""" Structure is a container for geometric structure and composition for both periodic and non periodic
atomic structures
:param natom: Number of atoms
:param symbols: List of atomic symbols
:param periodicity: (True) if the structure is periodic, False for finite structures and a list of booleans for
structures that are periodic only along specific directions.
:param cell: The cell parameters, could be scalar for a cubic cell, a list of 3 numbers for a orthogonal cell
or a complete numpy array or list of lists. Each row is considered a cell vector.
:param positions: Array of rows with 3 elements for the positions of atoms in cartesian coordinates.
:param reduced: Positions of atoms as reduced coordinates relative to the cell vectors ie cell-scaled in range
[0,1]
:param mag_moments: Magnetic moments for atoms in the structure
:param occupancies: Atomic occupancies. 1 by default, lower values for vacancies and non-perfect crystals.
:param sites: Atomic sites
>>> a = Structure()
>>> print(a)
Empty structure
>>> a = Structure(symbols=['Xe'])
>>> print(a.natom)
1
>>> d = 1.104
>>> a = Structure(symbols=['N', 'N'], positions=[[0, 0, 0], [0, 0, d]], periodicity=False)
>>> print(a.natom)
2
>>> a = 4.05
>>> b = a/2
>>> fcc = Structure(symbols=['Au'], cell=[[0, b, b], [b, 0, b], [b, b, 0]], periodicity=True)
>>> print(fcc.natom)
1
"""
self.vector_info = {}
self.name = None
self.comment = None
self.natom = None
self.symbols = None
self.positions = None
self.reduced = None
self.cell = None
self.periodicity = None
self.vector_info['mag_moments'] = None
self.sites = None
self.occupancies = None
self._lattice = None
self._composition = None
self.vector_info = None
# By default the number of atoms will be the value given or zero except if other information overrules
# that value
if natom is not None:
self.natom = int(natom)
else:
self.natom = 0
if symbols is not None:
if symbols in atomic_symbols:
self.symbols = [symbols]
else:
for iatom in list(symbols):
assert(iatom in atomic_symbols)
self.symbols = list(symbols)
self.natom = len(self.symbols)
else:
if self.natom != 0:
raise ValueError('List of atomic symbols not provided for structure with %d atoms', self.natom)
# No periodicity will be assumed except if cell or reduced coordinates are provided
if periodicity is None:
periodicity = 3*[False]
if isinstance(periodicity, bool):
periodicity = 3*[periodicity]
self.set_periodicity(periodicity)
if cell is not None:
cell = np.array(cell)
self.set_cell(cell)
self.periodicity = 3*[True]
if positions is not None:
positions = np.array(positions)
self.set_positions(positions)
if reduced is not None:
reduced = np.array(reduced)
self.set_reduced(reduced)
self.periodicity = 3*[True]
if mag_moments is not None:
self.set_mag_moments(np.array(mag_moments))
if occupancies is not None:
self.occupancies = list(occupancies)
if sites is not None:
self.sites = sites
if vector_info is None:
self.vector_info = {'mag_moments': None}
else:
self.vector_info = vector_info
self.name = name
self.comment = comment
# This routine completes the missing values and makes all the values coherent.
self._autocomplete()
if not self._check():
raise ValueError('Arguments non consistent')
def __len__(self):
""" Number of sites in structure.
In for perfect crystals it will match the number of atoms as each atomic site will have a single atom.
:return: Number of sites in structure
:rtype; int
>>> st = Structure(symbols=['H', 'O'], positions= [[0,0,0], [0,0,1]])
>>> len(st)
2
"""
return self.nsites
def __str__(self):
"""String representation of Structure
:return: Human readable text for Structure
>>> st = Structure(symbols=['Xe'])
>>> print(st)
1
<BLANKLINE>
Symb ( Positions )
Xe ( 0.0000 0.0000 0.0000 )
<BLANKLINE>
Non-periodic structure
>>> a = 4.05
>>> b = a/2
>>> fcc = Structure(symbols=['Au'], cell=[[0, b, b], [b, 0, b], [b, b, 0]], periodicity=True)
>>> print(fcc)
1
<BLANKLINE>
Symb ( Positions ) [ Cell-reduced coordinates ]
Au ( 0.0000 0.0000 0.0000 ) [ 0.0000 0.0000 0.0000 ]
<BLANKLINE>
Periodicity: X Y Z
<BLANKLINE>
Lattice vectors:
0.0000 2.0250 2.0250
2.0250 0.0000 2.0250
2.0250 2.0250 0.0000
<BLANKLINE>
"""
if self.natom == 0:
xyz = 'Empty structure'
else:
xyz = str(self.natom) + '\n\n'
if self.is_crystal:
xyz += 'Symb ( Positions ) [ Cell-reduced coordinates ]\n'
else:
xyz += 'Symb ( Positions )\n'
for i in range(self.natom):
if self.is_crystal:
xyz += ("%4s ( %10.4f %10.4f %10.4f ) [ %10.4f %10.4f %10.4f ]\n"
% (self.symbols[i],
self.positions[i, 0],
self.positions[i, 1],
self.positions[i, 2],
self.reduced[i, 0],
self.reduced[i, 1],
self.reduced[i, 2]))
else:
xyz += ("%4s ( %10.4f %10.4f %10.4f )\n"
% (self.symbols[i],
self.positions[i, 0],
self.positions[i, 1],
self.positions[i, 2]))
if self.periodicity[0] or self.periodicity[1] or self.periodicity[2]:
xyz += '\nPeriodicity: '
if self.periodicity[0]:
xyz += ' X'
if self.periodicity[1]:
xyz += ' Y'
if self.periodicity[2]:
xyz += ' Z'
xyz += '\n\nLattice vectors:\n'
for i in range(3):
xyz += (" %10.4f %10.4f %10.4f\n"
% (self.cell[i, 0], self.cell[i, 1], self.cell[i, 2]))
else:
xyz += '\nNon-periodic structure'
return xyz
def __repr__(self):
"""
Evaluatable representation of Structure
:return: String representation of the structure
:rtype: str
>>> st1 = Structure(symbols=['H'])
>>> st2 = eval(repr(st1))
>>> st1 == st2
True
>>> st = Structure(symbols='He', cell=[2,2,2])
>>> st
Structure(symbols=['He'], cell=2, reduced=[[0.0, 0.0, 0.0]], periodicity=True)
"""
ret = 'Structure(symbols=' + str(self.symbols)
if self.is_periodic:
if np.all(np.diag(self.cell.diagonal()) == self.cell):
if np.max(self.cell.diagonal()) == np.min(self.cell.diagonal()):
ret += ', cell=' + str(self.cell[0, 0])
else:
ret += ', cell=' + str(self.cell.diagonal().tolist())
else:
ret += ', cell=' + str(self.cell.tolist())
ret += ', reduced=' + str(self.reduced.tolist())
else:
ret += ', positions=' + str(self.positions.tolist())
if all([self.periodicity[0] == item for item in self.periodicity]):
ret += ', periodicity=' + str(self.periodicity[0])
else:
ret += ', periodicity=' + str(self.periodicity)
ret += ')'
return ret
def __delitem__(self, key):
self.del_atom(key)
def __setitem__(self, key, value):
self.add_atom(value['symbols'], value['positions'])
def __getitem__(self, item):
return SiteSet(self)[item]
def __iter__(self):
return iter(SiteSet(self))
def insert(self, index, value):
self.add_atom(value['symbols'], value['positions'])
def _autocomplete(self):
if self.natom is None:
if self.positions is not None:
self.natom = len(self.positions)
elif self.reduced is not None:
self.natom = len(self.reduced)
elif self.symbols is not None:
self.natom = len(self.symbols)
else:
self.natom = 0
if self.symbols is None and self.natom == 0:
self.symbols = []
if self.periodicity is None:
self.set_periodicity(True)
if self.cell is None and self.is_periodic:
self.set_cell(1)
if self.positions is None:
if self.reduced is not None:
self.reduced2positions()
else:
if self.natom == 0:
self.positions = np.array([])
elif self.natom == 1:
self.positions = np.array([[0.0, 0.0, 0.0]])
else:
raise ValueError('Positions must be present for more than 1 atom')
if self.reduced is None and self.is_crystal:
if self.positions is not None and self.natom > 0:
self.positions2reduced()
else:
self.reduced = np.array([])
if self.sites is None:
self.sites = range(self.natom)
if self.occupancies is None:
self.occupancies = self.natom * [1.0]
def _check(self):
check = True
if len(self.symbols) != self.natom:
print('Error: Bad symbols')
check = False
if len(self.positions) != self.natom:
print('Error: Bad positions')
check = False
if self.is_crystal and len(self.reduced) != self.natom:
print('Error: Bad reduced')
check = False
if self.vector_info['mag_moments'] is not None and len(self.vector_info['mag_moments']) != self.natom:
print('Error: Bad mag_moments')
check = False
return check
def add_atom(self, name, coordinates, option='cartesian'):
"""
Add an atom with a given 'name' and cartesian or reduced 'position'
The atom will be added at the end of the list of atoms in the Structure
:param name: (str)
:param coordinates: (list, numpy.array)
:param option: (str)
"""
assert (name in atomic_symbols)
assert (option in ['cartesian', 'reduced'])
self.symbols.append(name)
self.natom += 1
self._composition = None
if option == 'cartesian':
if self.natom == 0:
self.positions = np.array(coordinates).reshape([-1, 3])
else:
self.positions = np.append(self.positions, coordinates).reshape([-1, 3])
self.positions2reduced()
elif option == 'reduced':
if self.natom == 0:
self.reduced = np.array(coordinates).reshape([-1, 3])
else:
self.reduced = np.append(self.reduced, coordinates).reshape([-1, 3])
self.reduced2positions()
def del_atom(self, index):
"""
Removes the atom with the given index
:param index:
:return:
"""
assert (abs(index) < self.natom)
self.symbols.pop(index)
np.delete(self.positions, index, 0)
if self.is_periodic:
np.delete(self.reduced, index, 0)
self.natom -= 1
self._composition = None
def center_mass(self, list_of_atoms=None):
"""
Computes the center of mass (CM) of the XYZ object or
a partial list of atoms. The default is to compute the
CM of all the atoms in the object, if a list
is enter only those in the list will be included for the CM
Return the CM as a numpy array
"""
if list_of_atoms is None:
list_of_atoms = range(self.natom)
total_mass = 0.0
center_of_mass = np.zeros(3)
if self.natom == 0:
return center_of_mass
atomicnumber = atomic_number(list(self.symbols))
for i in range(self.natom):
if i in list_of_atoms:
total_mass += mass(atomicnumber[i])
center_of_mass += mass(atomicnumber[i]) * self.positions[i]
return center_of_mass / total_mass
def rotation(self, tx, ty, tz):
"""
Rotate the molecule in the three directions
"""
rotationx = np.array([[1, 0, 0], [0, cos(tx), -sin(tx)], [0, sin(tx), cos(tx)]])
rotationy = np.array([[cos(ty), 0, sin(ty)], [0, 1, 0], [-sin(ty), 0, cos(ty)]])
rotationz = np.array([[cos(tz), -sin(tz), 0], [sin(tz), cos(tz), 0], [0, 0, 1]])
rotation = np.dot(np.dot(rotationx, rotationy), rotationz)
for i in range(self.natom):
self.positions[i] = np.dot(rotation, self.positions[i])
def get_cell(self):
if self._lattice is None:
self._lattice = Lattice(self.cell)
return self._lattice
@property
def lattice(self):
return self.get_cell()
def get_composition(self, gcd=True):
"""
Computes the composition of the Structure
as the count of each species in the cell
If gcd is True the values are divided by the
greatest common divisor
:param gcd: bool
:rtype : Composition
"""
if self._composition is None:
species = {}
for atom in self.symbols:
if atom in species:
species[atom] += 1
else:
species[atom] = 1
self._composition = Composition(species)
return self._composition
def positions2reduced(self):
"""
Computes the cell-reduced coordinates from the
cartesian dimensional coordinates
"""
self.reduced = np.linalg.solve(self.cell.T, self.positions.T).T
for i in range(3):
if self.periodicity[i]:
self.reduced[:, i] %= 1.0
def reduced2positions(self):
"""
Computes the dimensional cartesian coordinates
from the adimensional cell-reduced coordinates
"""
self.positions = np.dot(self.reduced, self.cell)
def relocate_to_cm(self, list_of_atoms=None):
"""
Relocates the system of atoms to the center of mass
a partial list of atoms can be used to compute
the center, but all the atoms are moved to the
computed center
:param list_of_atoms: (list) List of atoms that will be considered for computing the center of mass
by default all atoms are included
"""
cm = self.center_mass(list_of_atoms)
self.positions += -cm
def get_distance(self, iatom, jatom, with_periodicity=True, tolerance=1e-5):
"""
Calculates the distance between 2 atom, identified by index
iatom and jatom
:param iatom: (int) index of first atom
:param jatom: (int) index of second atom
:param with_periodicity: (bool) if the periodic images should be considered to compute the shortest distance
:param tolerance: (float) Tolerance for the bases reduction
:rtype : (float) distance between iatom and jatom
"""
if with_periodicity:
reduced_bases = get_reduced_bases(self.cell, tolerance)
scaled_pos = np.dot(self.positions, np.linalg.inv(reduced_bases))
# move scaled atomic positions into -0.5 < r <= 0.5
for pos in scaled_pos:
pos -= pos.round()
# Look for the shortest one in surrounded 3x3x3 cells
distances_list = []
for i in (-1, 0, 1):
for j in (-1, 0, 1):
for k in (-1, 0, 1):
distances_list.append(np.linalg.norm(
np.dot(scaled_pos[iatom] - scaled_pos[jatom] +
np.array([i, j, k]), reduced_bases)))
ret = min(distances_list)
else:
posi = self.positions[iatom]
posj = self.positions[jatom]
ret = np.linalg.norm(posi - posj)
return ret
@staticmethod
def random_cell(composition, method='stretching', stabilization_number=20, nparal=5, periodic=True,
factor_optimal_volume=8):
"""
Generate a random cell
There are two algorithms implemented:
scaling: Generate a random cell and random distribution of atoms and
scale the lattice to separate the atoms.
stretching: Generating a random cell and random distribution of atoms
and stretching their bonds until the distance between any
two atoms is always greater than the sum of covalent radius.
:param composition: (pychemia.Composition)
:param method: (str)
:param stabilization_number: (int)
:param nparal: (int)
:param periodic: (bool)
:param factor_optimal_volume: (float)
:return:
>>> import os
>>> st = Structure.random_cell('LiAlCl4', stabilization_number=3)
>>> st.natom
6
>>> st.save_json('test.json')
>>> st2 = Structure.load_json('test.json')
>>> st == st2
True
>>> os.remove('test.json')
"""
comp = Composition(composition)
pcm_log.debug('Generating a random structure with composition: ' + str(comp.composition))
natom = comp.natom
symbols = comp.symbols
best_volume = sys.float_info.max
best_volume = float('inf')
best_structure = None
optimal_volume = comp.covalent_volume('cubes')
stabilization_history = 0
pool = Pool(processes=nparal)
trial = 0
while stabilization_history < stabilization_number:
args = list(best_volume * np.ones(10))
ret = pool.map(worker_star, zip(repeat(method), repeat(composition), repeat(periodic), args))
ngood = 0
for structure in ret:
if structure is not None:
# print('SH:%d Vol:%10.3f Factor:%10.3f' % (stabilization_history,
# structure.volume,
# structure.volume / optimal_volume))
ngood += 1
if best_structure is not None:
if structure.volume < best_structure.volume:
best_structure = structure
else:
best_structure = structure
# log.debug('Good structures: %d/10 Best volume: %7.3f' % (ngood, best_structure.volume))
if best_structure is not None and best_volume > best_structure.volume:
best_volume = best_structure.volume
stabilization_history = 0
else:
stabilization_history += 1
if best_volume < factor_optimal_volume * optimal_volume:
break
trial += 1
# log.debug('Trial: %4d Volume: %7.2f Optimal Volume: %7.2f Ratio: %5.2f' %
# (trial, best_volume, optimal_volume, best_volume/optimal_volume))
pool.close()
if best_structure is not None and periodic:
# Analysis of the quality for the best structure
rpos = best_structure.reduced
for i, j in combinations(range(natom), 2):
distance = best_structure.lattice.minimal_distance(rpos[i], rpos[j])
covalent_distance = sum(covalent_radius([symbols[i], symbols[j]]))
if distance < covalent_distance:
pcm_log.debug('Covalent distance: %7.4f Minimal distance: %7.4f Difference: %7.3e' %
(covalent_distance, distance, covalent_distance - distance))
best_structure.canonical_form()
return best_structure
@staticmethod
def random_cluster(composition, method='stretching', stabilization_number=20, nparal=5):
st = Structure.random_cell(composition=composition, method=method, stabilization_number=stabilization_number,
nparal=nparal, periodic=False)
return Structure(symbols=st.symbols, positions=st.positions, periodicity=False)
def adjust_reduced(self):
for i in range(self.natom):
for j in range(3):
for value in [0.5, 0.25, 0.75, 0.125]:
if abs(value - self.reduced[i, j]) < 1E-4:
self.reduced[i, j] = value
self.reduced2positions()
def set_cell(self, cell):
"""
Set the vectors defining the cell
:param cell: A matrix with the 3 unit cell
vectors
:return:
"""
npcell = np.array(cell)
if npcell.shape == () or npcell.shape == (1,):
self.cell = npcell * np.eye(3)
elif npcell.shape == (3,):
self.cell = np.diag(npcell)
else:
self.cell = np.array(cell).reshape((3, 3))
self._lattice = None
def set_mag_moments(self, mag_moments):
"""
Set the magnetic moments with one vector on each
atom
Args:
mag_moments: List or numpy array with one
vector for each atom.
The values will be converted into a numpy array
"""
self.vector_info['mag_moments'] = np.array(mag_moments).reshape([-1, 3])
def set_periodicity(self, periodicity):
"""
Set periodicity of the structure
Args:
periodicity: (Boolean) a single value means that the structure has that
periodicity all along the 3 directions. Otherwise a list
of 3 booleans is required
"""
if isinstance(periodicity, bool):
self.periodicity = 3 * [periodicity]
elif isinstance(periodicity, list) and len(periodicity) == 1:
self.periodicity = 3 * periodicity
else:
self.periodicity = list(periodicity)
def set_positions(self, positions):
"""
Set the positions of the atoms
This contains dimensional values
in cartesian coordinates
Args:
positions: A array of 3 vectors
with dimensional coordinates
"""
self.positions = np.array(positions).reshape([-1, 3])
def set_reduced(self, reduced):
"""
Set the reduced positions of the atoms
This contains adimensional values
relative to cell vectors
:param reduced:
:return:
"""
self.reduced = np.array(reduced).reshape([-1, 3])
def sort_sites_using_list(self, sorted_indices):
sorted_indices = np.array([int(x) for x in sorted_indices])
self.symbols = list(np.array(self.symbols)[sorted_indices])
self.positions = self.positions[sorted_indices]
if self.is_periodic:
self.reduced = self.reduced[sorted_indices]
if self.vector_info is not None:
for vi in self.vector_info:
if self.vector_info[vi] is not None:
self.vector_info[vi] = self.vector_info[vi][sorted_indices]
def sort_sites(self):
# First: Sort sites using the distance to the origin
sorted_indices = np.array([np.linalg.norm(self.positions[i]) for i in range(self.nsites)]).argsort()
# print sorted_indices
self.sort_sites_using_list(sorted_indices)
# Second: Sort again using the atomic number
if len(self.species) > 1:
sorted_indices = np.array([atomic_number(x) for x in self.symbols]).argsort()
self.sort_sites_using_list(sorted_indices)
def sort_axes(self):
"""
Sort the lattice vectors in decremental order of their size.
'a' will be the longest lattice vector
'c' he shortest
"""
sorted_indices = self.lattice.lengths.argsort()[::-1]
self.set_cell(self.cell[sorted_indices])
self.reduced = self.reduced[:, sorted_indices]
def align_with_axis(self, axis=0, round_decimals=14):
lattice = self.lattice
lattice.align_with_axis(axis=axis, round_decimals=round_decimals)
self.set_cell(lattice.cell)
self.reduced2positions()
def align_with_plane(self, axis=2, round_decimals=14):
lattice = self.lattice
lattice.align_with_plane(axis=axis, round_decimals=round_decimals)
self.set_cell(lattice.cell)
self.reduced2positions()
def align_inertia_momenta(self):
ii = self.inertia_matrix()
eigval, eigvec = np.linalg.eig(ii)
eigvec = eigvec.T[eigval.argsort()[::-1]].T
inveigvec = np.linalg.inv(eigvec)
self.positions = np.dot(inveigvec, self.positions.T).T
def canonical_form(self):
if not self.is_periodic:
self.relocate_to_cm()
self.align_inertia_momenta()
self.sort_sites()
if self.is_periodic:
self.sort_axes()
self.align_with_axis()
self.align_with_plane()
self.atoms_in_box()
self.sort_sites()
def supercell(self, size):
"""
Creates a supercell, replicating the positions
of atoms in the x,y,z directions a number of
size=(nx,ny,nz) times
"""
new_natom = np.prod(size) * self.natom
new_symbols = []
new_positions = np.zeros((new_natom, 3))
size = np.array(size).astype(int)
index = 0
for i in range(size[0]):
for j in range(size[1]):
for k in range(size[2]):
for n in range(self.natom):
new_symbols.append(self.symbols[n])
new_positions[index] = self.positions[n] + (
i * self.cell[0] + j * self.cell[1] + k * self.cell[2])
index += 1
new_cell = np.zeros((3, 3))
new_cell[0] = size[0] * self.cell[0]
new_cell[1] = size[1] * self.cell[1]
new_cell[2] = size[2] * self.cell[2]
return Structure(symbols=new_symbols, positions=new_positions, cell=new_cell)
def copy(self):
"""
Get a copy of the object
"""
copy_struct = Structure(name=self.name, comment=self.comment, natom=self.natom, symbols=self.symbols,
periodicity=self.periodicity, cell=self.cell, positions=self.positions,
reduced=self.reduced, vector_info=self.vector_info, sites=self.sites,
occupancies=self.occupancies)
return copy_struct
@property
def to_dict(self):
ret = {'natom': self.natom,
'symbols': self.symbols,
'periodicity': self.periodicity,
'positions': self.positions.tolist(),
'nspecies': len(self.species),
'formula': self.formula}
if self.is_periodic:
ret['cell'] = self.cell.tolist()
ret['reduced'] = self.reduced.tolist()
ret['density'] = self.density
if self.name is not None:
ret['name'] = self.name
if self.comment is not None:
ret['comment'] = self.comment
if self.sites != range(self.natom):
ret['sites'] = list(self.sites)
if self.occupancies != self.natom * [1.0]:
ret['occupancies'] = self.occupancies
# if len(self.vector_info) != 1 or self.vector_info['mag_moments'] is not None:
# ret['vector_info'] = self.vector_info
return ret
def round(self, decimals=6, pos='reduced'):
self.set_cell(np.around(self.cell, decimals))
if pos == 'reduced':
self.set_reduced(np.around(self.reduced, decimals))
self.reduced2positions()
else:
self.set_positions(np.around(self.positions, decimals))
self.positions2reduced()
@staticmethod
def from_dict(structdict):
natom = structdict['natom']
symbols = deep_unicode(structdict['symbols'])
periodicity = structdict['periodicity']
positions = np.array(structdict['positions'])
if 'name' in structdict:
name = structdict['name']
else:
name = None
if 'comment' in structdict:
comment = structdict['comment']
else:
comment = None
if 'cell' in structdict:
cell = np.array(structdict['cell'])
else:
cell = None
if 'reduced' in structdict:
reduced = np.array(structdict['reduced'])
else:
reduced = None
if 'vector_info' in structdict:
vector_info = structdict['vector_info']
else:
vector_info = None
if 'sites' in structdict:
sites = structdict['sites']
else:
sites = range(natom)
if 'occupancies' in structdict:
occupancies = structdict['occupancies']
else:
occupancies = list(np.ones(natom))
return Structure(name=name, comment=comment, natom=natom, symbols=symbols, periodicity=periodicity, cell=cell,
positions=positions, reduced=reduced, vector_info=vector_info, sites=sites,
occupancies=occupancies)
def save_json(self, filename):
filep = open(filename, 'w')
json.dump(self.to_dict, filep, sort_keys=True, indent=4, separators=(',', ': '))
filep.close()
@staticmethod
def load_json(filename):
filep = open(filename, 'r')
structdict = deep_unicode(json.load(filep))
filep.close()
return Structure.from_dict(structdict)
def distance2(self, atom1, atom2):
assert (isinstance(atom1, int))
assert (isinstance(atom2, int))
assert (atom1 < self.natom)
assert (atom2 < self.natom)
if self.is_periodic:
return self.lattice.distance2(self.reduced[atom1], self.reduced[atom2])
else:
dm = scipy.spatial.distance_matrix(self.positions, self.positions)
return dm[atom1, atom2]
def distance_matrix(self):
if self.is_periodic:
dm = np.zeros((self.nsites, self.nsites))
for i in range(self.nsites - 1):
for j in range(i + 1, self.nsites):
d = self.lattice.distance2(self.reduced[i], self.reduced[j], radius=1E10, limits=[1, 1, 1])
# print("%d %d - %d" % (i,j, len(d)))
dm[i, j] = min([d[x]['distance'] for x in d])
dm[j, i] = dm[i, j]
else:
dm = scipy.spatial.distance_matrix(self.positions, self.positions)
return dm
def valence_electrons(self):
ret = 0
for key, value in self.composition.items():
ret += value * valence(key)
return ret
def __eq__(self, other):
if self.natom != other.natom:
ret = False
elif not np.array_equal(self.positions, other.positions):
ret = False
elif not np.array_equal(self.periodicity, other.periodicity):
ret = False
elif self.is_periodic and other.is_periodic:
if not np.array_equal(self.reduced, other.reduced):
ret = False
elif not np.array_equal(self.cell, other.cell):
ret = False
else:
ret = True
else:
ret = True
return ret
def __ne__(self, other):
return not self.__eq__(other)
@property
def is_perfect(self):
"""
Return True if two conditions are met:
1. The number of sites is equal to
the number of atoms. ie there is no more than
one atom on each site.
2. All the occupancies are equal to 1
:rtype : bool
:return: bool
"""
return self.natom == self.nsites and min(self.occupancies) == 1.0
@property
def is_periodic(self):
"""
Return True if the Structure is periodic in any direction
False for non-periodic structures
:rtype : bool
:return: bool
"""
return any(self.periodicity)
@property
def is_crystal(self):
"""
True if structure is periodic in all directions
False otherwise
:rtype : bool
:return: bool
"""
if not self.is_periodic:
return False
else:
return self.get_cell().periodic_dimensions == 3
@property
def composition(self):
"""
Dictionary with the composition, the keys are the species and the values
represent the number of atoms of that specie
:rtype : dict
:return: dict
"""
return self.get_composition().composition
@property
def formula(self):
"""
String with the chemical formula
:rtype: str
:return: str
"""
return self.get_composition().formula
@property
def density(self):
"""
Computes the density of the cell
:rtype: float
:return: float
"""
return sum(np.array(mass(self.symbols))) / self.volume
@property
def volume(self):
"""
Computes the volume of the cell
:rtype: float
:return: float
"""
if self.is_periodic:
return abs(np.linalg.det(self.cell))
else:
volume = (np.max(self.positions[:, 0]) - np.min(self.positions[:, 0])) * \
(np.max(self.positions[:, 1]) - np.min(self.positions[:, 1])) * \
(np.max(self.positions[:, 2]) - np.min(self.positions[:, 2]))
if volume > 0.0:
return volume
else:
return 4.0 / 3.0 * np.pi * np.max(self.positions.flatten()) ** 3
@property
def species(self):
return self.get_composition().species
@property
def nspecies(self):
return len(self.get_composition().species)
@property
def nsites(self):
return len(self.positions)
def scale(self, tolerance=0.7):
assert self.is_perfect
assert self.is_crystal
lattice = self.lattice.scale(self.symbols, self.reduced, tolerance=tolerance)
return Structure(cell=lattice.cell, reduced=self.reduced, symbols=self.symbols)
# Not working
# def cut_void(self, factor=1.5):
# ret = self.copy()
# ret.canonical_form()
# mins = [min(ret.reduced[:, i]) for i in range(3)]
# ret.reduced = ret.reduced - mins
# ret.reduced2positions()
# max_lenght = max(ret.positions[:, 0]) + 2.0*max([covalent_radius(ret.symbols[i]) for i in range(ret.nsites)])
# print factor, max_lenght, ret.cell[0,0]
# if factor * max_lenght < ret.cell[0, 0]:
# cell = ret.cell
# cell[0, 0] = factor * max_lenght
# ret.set_cell(cell)
# return ret
# else:
# return self
def atoms_in_box(self):
while min(self.reduced.flatten()) < 0.0 or max(self.reduced.flatten()) > 1.0:
self.reduced = (self.reduced + 1.0) % 1.0
self.reduced2positions()
def moment_of_inertia(self, axis):
assert self.is_perfect
mofi = 0
for isite in self:
mofi += mass(isite.symbols[0]) * (sum(np.array(isite.position) ** 2) - isite.position[axis] ** 2)
return mofi
def product_of_inertia(self, axis):
assert self.is_perfect
pofi = 0
for isite in self:
pofi += mass(isite.symbols[0]) * (np.prod(isite.position) / isite.position[axis])
return pofi
def inertia_matrix(self):
im_xx = self.moment_of_inertia(0)
im_yy = self.moment_of_inertia(1)
im_zz = self.moment_of_inertia(2)
im_xy = self.product_of_inertia(2)
im_xz = self.product_of_inertia(1)
im_yz = self.product_of_inertia(0)
im = np.array([[im_xx, -im_xy, -im_xz], [-im_xy, im_yy, -im_yz], [-im_xz, -im_yz, im_zz]])
return im
def signature(self):
comp = self.get_composition()
gcd = self.get_composition().gcd
ret = '%02X_%014X_%02X_' % (self.valence_electrons() / gcd, comp.species_hex(), gcd)
formula = "%s" % comp.sorted_formula(sortby='electroneg')
formula += (17 - len(formula)) * '_'
ret += formula
return ret
def add_vacuum(self, length, direction=2):
vacuum = np.zeros(3)
vacuum[direction] = length
alpha = self.lattice.alpha
beta = self.lattice.beta
gamma = self.lattice.gamma
newlenghts = self.lattice.lengths + vacuum
a = newlenghts[0]
b = newlenghts[1]
c = newlenghts[2]
newlattice = self.lattice.from_parameters_to_cell(a, b, c, alpha, beta, gamma)
return Structure(symbols=self.symbols, cell=newlattice.cell, positions=self.positions)
def load_structure_json(filename):
ret = Structure()
ret.load_json(filename)
return ret
class SiteSet:
"""
Collection of atomic sites.
Starting from a Structure the object will create a set of Sites storing the species, occupancies and
positions of each site.
"""
def __init__(self, structure):
"""
SiteSet is a container for a list of Sites
:param structure: structure from which the Sites will be created.
"""
self.structure = structure
self.sitelist = []
reduced = None
for isite in range(structure.nsites):
if structure.sites.count(isite) > 1:
symbols = []
occupancies = []
for jatom in range(structure.natom):
if structure.sites[jatom] == isite:
symbols.append(structure.symbols[jatom])
occupancies.append(structure.occupancies[jatom])
position = structure.positions[isite]
if self.structure.is_periodic:
reduced = structure.reduced[isite]
else:
symbols = [structure.symbols[isite]]
occupancies = [structure.occupancies[isite]]
position = structure.positions[isite]
if self.structure.is_periodic:
reduced = structure.reduced[isite]
self.sitelist.append(Site(symbols=symbols, occupancies=occupancies, position=position))
def __iter__(self):
return iter(self.sitelist)
class Site:
"""
A site is a mapping of one location of space with one or more atoms and the corresponding occupancies.
The sum of occupancies must be lower or equal to 1.
"""
def __init__(self, symbols, occupancies=None, position=None):
"""
Create a atomic site with one or more atoms defined on a unique location on space.
:param symbols: atomic symbol or list of atomic symbols
:param occupancies: list of floats with the probability of occupancy for the atom in the site
:param position: iterable with the location of the atom in space in cartesian coordinates
.. notes: This class is used for internal operations inside Structure. Not much reason to expose it
out of this module.
>>> sit = Site('H')
>>> sit
Site(symbols=['H'],occupancies=[1.0],position=[0.0, 0.0, 0.0])
>>> sit = Site(symbols = ['O', 'N'], occupancies=[0.5, 0.5], position=[1,1,1])
>>> sit
Site(symbols=['O', 'N'],occupancies=[0.5, 0.5],position=[1.0, 1.0, 1.0])
"""
if symbols in atomic_symbols:
self.symbols = [symbols]
else:
try:
self.symbols = list(symbols)
except TypeError:
raise TypeError("%s is not iterable" % symbols)
for i in self.symbols:
assert i in atomic_symbols
if occupancies is None:
occupancies = 1.0
if position is None:
position = 3*[0.0]
try:
self.occupancies = [float(x) for x in occupancies]
except TypeError:
self.occupancies = [float(occupancies)]
assert (len(self.occupancies) == len(self.symbols))
assert sum(self.occupancies) <= 1
self.position = [float(x) for x in position]
def __repr__(self):
ret = self.__class__.__name__+'(symbols=' + repr(self.symbols)
ret += ',occupancies=' + repr(self.occupancies)
ret += ',position=' + repr(self.position)
ret += ')'
return ret
def __str__(self):
return repr(self)
def worker_star(x):
return random_structure(*x)
def random_structure(method, composition, periodic=True, max_volume=1E10):
"""
Random Structure created by random positioning of atoms followed by either scaling of the cell or
adding a sheer stretching along the smaller distances. The purpose of the lattice change is to avoid any two
atoms to be closer than the sum of their covalent radius.
:param method: Can be 'stretching' or 'scaling'.
:param composition: Can be a Composition object or formula.
:param periodic: If True, the structure will be periodical in all directions, otherwise a finite system is created.
:param max_volume: Threshold for creating the Structure, if the volume exceeds the target the method returns None
:return: Structure if the volume is below than best_volume, None otherwise
>>> st = random_structure(method='scaling', composition='H2O', periodic=False)
>>> st.natom
3
>>> st = random_structure(method='stretching', composition='NaCl', periodic=True)
>>> st.natom
2
"""
comp = Composition(composition)
natom = comp.natom
symbols = comp.symbols
np.random.seed(struct.unpack("<L", os.urandom(4))[0])
if periodic:
new_structure = None
assert (method in ['scaling', 'stretching'])
while True:
trial=0
if method == 'scaling':
lattice = Lattice.random_cell(comp)
# Random reduced positions
rpos = np.random.rand(natom, 3)
mins = [min(rpos[:, i]) for i in range(3)]
rpos -= mins
new_lattice = lattice
else:
lattice = Lattice.random_cell(comp)
# Random reduced positions
rpos = np.random.rand(natom, 3)
mins = [min(rpos[:, i]) for i in range(3)]
rpos -= mins
new_lattice = lattice.stretch(symbols, rpos, tolerance=1.0, extra=0.1)
while True:
changed_lattice=False
for i in range(natom):
for j in range(i + 1, natom):
distance = new_lattice.minimal_distance(rpos[i], rpos[j])
covalent_dim = sum(covalent_radius([symbols[i], symbols[j]]))
if distance < covalent_dim:
#print("Distance of %s was smaller than covalent distance of %s" % (distance, covalent_dim))
new_lattice = lattice.scale(symbols, rpos, tolerance=1.1)
#print("Lattice was changed, recomputing...")
changed_lattice=True
if changed_lattice:
#print("Lattice was changed, recomputing...")
break
if changed_lattice:
#print("Lattice was changed, recomputing...")
break
if not changed_lattice:
#print("Lattice was not changed, accepting new value")
break
if new_lattice.volume < max_volume:
test = True
for i in range(natom):
for j in range(i + 1, natom):
distance = new_lattice.minimal_distance(rpos[i], rpos[j])
covalent_dim = sum(covalent_radius([symbols[i], symbols[j]]))
if distance < covalent_dim:
test = False
if test:
new_structure = Structure(symbols=symbols, reduced=rpos, cell=new_lattice.cell, periodicity=True)
minimal_distance = np.min(new_structure.distance_matrix() + 10 * np.eye(new_structure.natom))
break
else:
print("Trial failed, distance %f is less than covalent radious %f" % (distance, covalent_dim))
trial += 1
if trial > 100:
print("Leaving after 100 attemps")
break
# else:
# print('Volume of Structure %f is larger than max_volume=%f' % (new_lattice.volume, max_volume))
# new_structure = None
else:
pos = np.random.rand(natom, 3)
mindis = cluster_minimal_distance(pos)
if mindis == 0:
raise ValueError("Distance too small")
max_cov = np.max(covalent_radius(symbols))
pos *= max_cov / mindis
current_volume = (max(pos[:, 0]) - min(pos[:, 0])) * (max(pos[:, 1]) - min(pos[:, 1])) * (
max(pos[:, 2]) - min(pos[:, 2]))
if current_volume < max_volume:
new_structure = Structure(symbols=symbols, positions=pos, periodicity=False)
else:
print('Volume of Structure %f is larger than max_volume=%f' % (current_volume, max_volume))
new_structure = None
return new_structure
# End of Worker
def cluster_minimal_distance(pos):
pos = np.array(pos).reshape((-1, 3))
dismat = scipy.spatial.distance_matrix(pos, pos)
tmp = np.max(dismat.flatten())
return np.min((dismat + tmp * np.eye(len(pos))).flatten())
|
MaterialsDiscovery/PyChemia
|
pychemia/core/structure.py
|
Python
|
mit
| 49,235
|
[
"CRYSTAL"
] |
e53568e58943dd2eeb383a70c939d5f869d93174f73ae338f453484faf941148
|
# -*- coding: utf-8 -*-
#############################################################################
# SRWLIB Example: Virtual Beamline: a set of utilities and functions allowing to simulate
# operation of an SR Beamline.
# The standard use of this script is from command line, with some optional arguments,
# e.g. for calculation (with default parameter values) of:
# UR Spectrum Through a Slit (Flux within a default aperture):
# python SRWLIB_VirtBL_*.py --sm
# Single-Electron UR Spectrum (Flux per Unit Surface):
# python SRWLIB_VirtBL_*.py --ss
# UR Power Density (at the first optical element):
# python SRWLIB_VirtBL_*.py --pw
# Input Single-Electron UR Intensity Distribution (at the first optical element):
# python SRWLIB_VirtBL_*.py --si
# Single-Electron Wavefront Propagation:
# python SRWLIB_VirtBL_*.py --ws
# Multi-Electron Wavefront Propagation:
# Sequential Mode:
# python SRWLIB_VirtBL_*.py --wm
# Parallel Mode (using MPI / mpi4py), e.g.:
# mpiexec -n 6 python SRWLIB_VirtBL_*.py --wm
# For changing parameters of all these calculaitons from the default valuse, see the definition
# of all options in the list at the end of the script.
# v 0.07
#############################################################################
from __future__ import print_function #Python 2.7 compatibility
from srwl_bl import *
try:
import cPickle as pickle
except:
import pickle
#import time
#*********************************Setting Up Optical Elements and Propagation Parameters
def set_optics(_v):
"""This function describes optical layout of the Coherent Hoard X-ray (CHX) beamline of NSLS-II.
Such function has to be written for every beamline to be simulated; it is specific to a particular beamline.
:param _v: structure containing all parameters allowed to be varied for that particular beamline
"""
#---Nominal Positions of Optical Elements [m] (with respect to straight section center)
zS0 = 20.5 #S0 (primary slit)
zHDM = 27.4 #Horizontally-Deflecting Mirror (HDM)
zS1 = 29.9 #S1 slit
zDCM = 31.6 #DCM (vertically-deflecting)
zS2 = 34.3 #S2 slit
zBPM = 34.6 #BPM for beam visualization
zCRL = 35.4 #+tzCRL*1e-3 #CRL transfocator (corrected by translation)
zKL = 45.0 #44.5 #+tzKL*1e-3 #Kinoform Lens for horizontal focusing (corrected by translation)
zS3 = 48.0 #S3 slit ('pinhole', waist position)
zSample = 48.7 #Sample position, COR of diffractometer
zD = 58.7 #Detector position
#---Instantiation of the Optical Elements
arElNamesAllOpt = [
['S0', 'S0_S1', 'S1', 'S1_S2', 'S2', 'S2_BPM', 'BPM_CRL', 'CRL1', 'CRL2', 'CRL_KL', 'KLA', 'KL', 'KL_S3', 'S3', 'S3_SMP', 'SMP', 'SMP_D'], #1
['S0', 'S0_HDM', 'HDM', 'HDM_S1', 'S1', 'S1_S2', 'S2', 'S2_CRL', 'CRL1', 'CRL2', 'CRL_SMP'], #2
['S0', 'S0_HDM', 'HDM', 'HDM_S1', 'S1', 'S1_DCM', 'DCM', 'DCM_S2', 'S2', 'S2_CRL', 'CRL1', 'CRL2', 'CRL_KL', 'KLA', 'KL', 'KL_S3', 'S3', 'SMP', 'SMP_D'], #3
['S0', 'S0_HDM', 'HDM', 'HDM_S1', 'S1', 'S1_DCM', 'DCM', 'DCM_S2', 'S2', 'S2_CRL', 'CRL1', 'CRL2', 'CRL_SMP'], #4
['S0', 'S0_HDM', 'HDM', 'HDM_S1', 'S1', 'S1_DCM', 'DCM', 'DCM_S2', 'S2', 'S2_CRL', 'FIB', 'CRL_SMP'], #5
]
arElNamesAll = arElNamesAllOpt[int(round(_v.op_BL - 1))]
if(len(_v.op_fin) > 0):
if(_v.op_fin not in arElNamesAll): raise Exception('Optical element with the name specified in the "op_fin" option is not present in this beamline')
#Could be made more general
arElNames = [];
for i in range(len(arElNamesAll)):
arElNames.append(arElNamesAll[i])
if(len(_v.op_fin) > 0):
if(arElNamesAll[i] == _v.op_fin): break
el = []; pp = [] #lists of SRW optical element objects and their corresponding propagation parameters
#S0 (primary slit)
if('S0' in arElNames):
el.append(SRWLOptA('r', 'a', _v.op_S0_dx, _v.op_S0_dy, _v.op_S0_x, _v.op_S0_y)); pp.append(_v.op_S0_pp)
#Drift S0 -> HDM
if('S0_HDM' in arElNames):
el.append(SRWLOptD(zHDM - zS0)); pp.append(_v.op_S0_HDM_pp)
#Drift S0 -> S1
if('S0_S1' in arElNames):
el.append(SRWLOptD(zS1 - zS0)); pp.append(_v.op_S0_S1_pp)
#HDM (Height Profile Error)
if('HDM' in arElNames):
horApHDM = 0.94e-03 #Projected dimensions
verApHDM = 1.e-03
angHDM = 3.1415926e-03 #? grazing angle
ifnHDM = os.path.join(_v.fdir, _v.op_HDM_ifn) if len(_v.op_HDM_ifn) > 0 else ''
if(len(ifnHDM) > 0):
hProfDataHDM = srwl_uti_read_data_cols(ifnHDM, '\t', 0, 1)
opHDM = srwl_opt_setup_surf_height_1d(hProfDataHDM, 'x', _ang=angHDM, _amp_coef=_v.op_HDM_amp, _nx=1000, _ny=200, _size_x=horApHDM, _size_y=verApHDM, _xc=_v.op_HDM_x, _yc=_v.op_HDM_y)
ofnHDM = os.path.join(_v.fdir, _v.op_HDM_ofn) if len(_v.op_HDM_ofn) > 0 else ''
if(len(ofnHDM) > 0):
pathDifHDM = opHDM.get_data(3, 3)
srwl_uti_save_intens_ascii(pathDifHDM, opHDM.mesh, ofnHDM, 0, ['', 'Horizontal Position', 'Vertical Position', 'Opt. Path Dif.'], _arUnits=['', 'm', 'm', 'm'])
el.append(opHDM); pp.append(_v.op_HDM_pp)
#Drift HDM -> S1
if('HDM_S1' in arElNames):
el.append(SRWLOptD(zS1 - zHDM + _v.op_S1_dz)); pp.append(_v.op_HDM_S1_pp)
#S1 slit
if('S1' in arElNames):
el.append(SRWLOptA('r', 'a', _v.op_S1_dx, _v.op_S1_dy, _v.op_S1_x, _v.op_S1_y)); pp.append(_v.op_S1_pp)
#Drift S1 -> DCM
if('S1_DCM' in arElNames):
el.append(SRWLOptD(zDCM - zS1)); pp.append(_v.op_S1_DCM_pp)
#Double-Crystal Monochromator
tCr1 = [0, 0, -1] #required for surface error
if('DCM' in arElNames):
tc = 1e-02 # [m] crystal thickness
angAs = 0.*3.1415926/180. # [rad] asymmetry angle
hc = [1,1,1]
dc = srwl_uti_cryst_pl_sp(hc, 'Si')
#print('DCM Interplannar dist.:', dc)
psi = srwl_uti_cryst_pol_f(_v.op_DCM_e, hc, 'Si')
#print('DCM Fourier Components:', psi)
#---------------------- DCM Crystal #1
opCr1 = SRWLOptCryst(_d_sp=dc, _psi0r=psi[0], _psi0i=psi[1], _psi_hr=psi[2], _psi_hi=psi[3], _psi_hbr=psi[2], _psi_hbi=psi[3], _tc=tc, _ang_as=angAs, _e_avg=_v.op_DCM_e)
#Find appropriate orientation of the Crystal #1 and the Output Beam Frame (using a member-function in SRWLOptCryst):
#orientDataCr1 = opCr1.find_orient(_en=_v.op_DCM_e, _ang_dif_pl=1.5707963) # Horizontally-deflecting (from HXN)
orientDataCr1 = opCr1.find_orient(_en=_v.op_DCM_e) # Vertically-deflecting
#Crystal #1 Orientation found:
orientCr1 = orientDataCr1[0]
tCr1 = orientCr1[0] #Tangential Vector to Crystal surface
sCr1 = orientCr1[1] #Sagital Vector to Crystal surface
nCr1 = orientCr1[2] #Normal Vector to Crystal surface
print('DCM Crystal #1 Orientation (original):')
print(' t =', tCr1, 's =', orientCr1[1], 'n =', nCr1)
if(_v.op_DCM_ac1 != 0): #Small rotation of DCM Crystal #1:
rot = uti_math.trf_rotation([0,1,0], _v.op_DCM_ac1, [0,0,0])
tCr1 = uti_math.matr_prod(rot[0], tCr1)
sCr1 = uti_math.matr_prod(rot[0], sCr1)
nCr1 = uti_math.matr_prod(rot[0], nCr1)
#Set the Crystal #1 orientation:
opCr1.set_orient(nCr1[0], nCr1[1], nCr1[2], tCr1[0], tCr1[1])
#Orientation of the Outgoing Beam Frame being found:
orientCr1OutFr = orientDataCr1[1]
rxCr1 = orientCr1OutFr[0] #Horizontal Base Vector of the Output Beam Frame
ryCr1 = orientCr1OutFr[1] #Vertical Base Vector of the Output Beam Frame
rzCr1 = orientCr1OutFr[2] #Longitudinal Base Vector of the Output Beam Frame
print('DCM Crystal #1 Outgoing Beam Frame:')
print(' ex =', rxCr1, 'ey =', ryCr1, 'ez =', rzCr1)
#Incoming/Outgoing beam frame transformation matrix for the DCM Crystal #1
TCr1 = [rxCr1, ryCr1, rzCr1]
print('Total transformation matrix after DCM Crystal #1:')
uti_math.matr_print(TCr1)
#print(' ')
el.append(opCr1); pp.append(_v.op_DCMC1_pp)
#---------------------- DCM Crystal #2
opCr2 = SRWLOptCryst(_d_sp=dc, _psi0r=psi[0], _psi0i=psi[1], _psi_hr=psi[2], _psi_hi=psi[3], _psi_hbr=psi[2], _psi_hbi=psi[3], _tc=tc, _ang_as=angAs)
#Find appropriate orientation of the Crystal #2 and the Output Beam Frame
#orientDataCr2 = opCr2.find_orient(_en=_v.op_DCM_e, _ang_dif_pl=-1.5707963) #from HXN
orientDataCr2 = opCr2.find_orient(_en=_v.op_DCM_e, _ang_dif_pl=3.1415926) #Vertically-deflecting
#Crystal #2 Orientation found:
orientCr2 = orientDataCr2[0]
tCr2 = orientCr2[0] #Tangential Vector to Crystal surface
sCr2 = orientCr2[1]
nCr2 = orientCr2[2] #Normal Vector to Crystal surface
print('Crystal #2 Orientation (original):')
print(' t =', tCr2, 's =', sCr2, 'n =', nCr2)
if(_v.op_DCM_ac2 != 0): #Small rotation of DCM Crystal #2:
rot = uti_math.trf_rotation([0,1,0], _v.op_DCM_ac2, [0,0,0])
tCr2 = uti_math.matr_prod(rot[0], tCr2)
sCr2 = uti_math.matr_prod(rot[0], sCr2)
nCr2 = uti_math.matr_prod(rot[0], nCr2)
#Set the Crystal #2 orientation
opCr2.set_orient(nCr2[0], nCr2[1], nCr2[2], tCr2[0], tCr2[1])
#Orientation of the Outgoing Beam Frame being found:
orientCr2OutFr = orientDataCr2[1]
rxCr2 = orientCr2OutFr[0] #Horizontal Base Vector of the Output Beam Frame
ryCr2 = orientCr2OutFr[1] #Vertical Base Vector of the Output Beam Frame
rzCr2 = orientCr2OutFr[2] #Longitudinal Base Vector of the Output Beam Frame
print('DCM Crystal #2 Outgoing Beam Frame:')
print(' ex =', rxCr2, 'ey =', ryCr2, 'ez =',rzCr2)
#Incoming/Outgoing beam transformation matrix for the DCM Crystal #2
TCr2 = [rxCr2, ryCr2, rzCr2]
Ttot = uti_math.matr_prod(TCr2, TCr1)
print('Total transformation matrix after DCM Crystal #2:')
uti_math.matr_print(Ttot)
#print(' ')
el.append(opCr2); pp.append(_v.op_DCMC2_pp)
#DCM Surface Error
horApDCM = 2.e-03 #Projected dimensions
verApDCM = 2.e-03
angDCM = asin(abs(tCr1[2])) #Grazing angle to crystal surface
ifnDCME = os.path.join(_v.fdir, _v.op_DCME_ifn) if len(_v.op_DCME_ifn) > 0 else ''
if(len(ifnDCME) > 0):
hProfDataDCME = srwl_uti_read_data_cols(ifnDCME, '\t', 0, 1)
opDCME = srwl_opt_setup_surf_height_1d(hProfDataDCME, 'y', _ang=angDCM, _amp_coef=_v.op_DCME_amp, _nx=1000, _ny=200, _size_x=horApDCM, _size_y=verApDCM, _xc=_v.op_DCME_x, _yc=_v.op_DCME_y)
ofnDCME = os.path.join(_v.fdir, _v.op_DCME_ofn) if len(_v.op_DCME_ofn) > 0 else ''
if(len(ofnDCME) > 0):
pathDifDCME = opDCME.get_data(3, 3)
srwl_uti_save_intens_ascii(pathDifDCME, opDCME.mesh, ofnDCME, 0, ['', 'Horizontal Position', 'Vertical Position', 'Opt. Path Dif.'], _arUnits=['', 'm', 'm', 'm'])
el.append(opDCME); pp.append(_v.op_DCME_pp)
#Drift DCM -> S2
if('DCM_S2' in arElNames):
el.append(SRWLOptD(zS2 - zDCM + _v.op_S2_dz)); pp.append(_v.op_DCM_S2_pp)
#Boron Fiber (with Tungsten core)
if('FIB' in arElNames):
fpln = 3 #focusing in both planes
if((_v.op_FIB_fpl == 'h') or (_v.op_FIB_fpl == 'H') or (_v.op_FIB_fpl == 'x') or (_v.op_FIB_fpl == 'X')): fpln = 1
elif((_v.op_FIB_fpl == 'v') or (_v.op_FIB_fpl == 'V') or (_v.op_FIB_fpl == 'y') or (_v.op_FIB_fpl == 'Y')): fpln = 2
el.append(srwl_opt_setup_cyl_fiber(fpln, _v.op_FIB_delta_e, _v.op_FIB_delta_c, _v.op_FIB_atnl_e, _v.op_FIB_atnl_c, _v.op_FIB_d_e, _v.op_FIB_d_c, _v.op_FIB_x, _v.op_FIB_y))
pp.append(_v.op_FIB_pp)
#Drift S1 -> S2
if('S1_S2' in arElNames):
el.append(SRWLOptD(zS2 - zS1 + _v.op_S2_dz)); pp.append(_v.op_S1_S2_pp)
#S2 slit
if('S2' in arElNames):
el.append(SRWLOptA('r', 'a', _v.op_S2_dx, _v.op_S2_dy, _v.op_S2_x, _v.op_S2_y)); pp.append(_v.op_S2_pp)
#Drift S2 -> BPM
if('S2_BPM' in arElNames):
el.append(SRWLOptD(zBPM - zS2 + _v.op_BPM_dz)); pp.append(_v.op_S2_BPM_pp)
#Drift BPM -> CRL
if('BPM_CRL' in arElNames):
el.append(SRWLOptD(zCRL - zBPM + _v.op_CRL_dz)); pp.append(_v.op_BPM_CRL_pp)
#Drift S2 -> CRL
if('S2_CRL' in arElNames):
el.append(SRWLOptD(zCRL - zS2 - _v.op_S2_dz + _v.op_CRL_dz)); pp.append(_v.op_S2_CRL_pp)
#CRL1 (1D, vertically-focusing)
if('CRL1' in arElNames):
if((_v.op_CRL1_n > 0) and (_v.op_CRL1_fpl != '')):
fpln = 3 #focusing in both planes
if((_v.op_CRL1_fpl == 'h') or (_v.op_CRL1_fpl == 'H') or (_v.op_CRL1_fpl == 'x') or (_v.op_CRL1_fpl == 'X')): fpln = 1
elif((_v.op_CRL1_fpl == 'v') or (_v.op_CRL1_fpl == 'V') or (_v.op_CRL1_fpl == 'y') or (_v.op_CRL1_fpl == 'Y')): fpln = 2
el.append(srwl_opt_setup_CRL(fpln, _v.op_CRL1_delta, _v.op_CRL1_atnl, 1, _v.op_CRL1_apnf, _v.op_CRL1_apf, _v.op_CRL1_rmin, _v.op_CRL1_n, _v.op_CRL1_thck, _v.op_CRL1_x, _v.op_CRL1_y))
pp.append(_v.op_CRL1_pp)
#CRL2 (1D, vertically-focusing)
if('CRL2' in arElNames):
if((_v.op_CRL2_n > 0) and (_v.op_CRL2_fpl != '')):
fpln = 3 #focusing in both planes
if((_v.op_CRL2_fpl == 'h') or (_v.op_CRL2_fpl == 'H') or (_v.op_CRL2_fpl == 'x') or (_v.op_CRL2_fpl == 'X')): fpln = 1
elif((_v.op_CRL2_fpl == 'v') or (_v.op_CRL2_fpl == 'V') or (_v.op_CRL2_fpl == 'y') or (_v.op_CRL2_fpl == 'Y')): fpln = 2
el.append(srwl_opt_setup_CRL(fpln, _v.op_CRL2_delta, _v.op_CRL2_atnl, 1, _v.op_CRL2_apnf, _v.op_CRL2_apf, _v.op_CRL2_rmin, _v.op_CRL2_n, _v.op_CRL2_thck, _v.op_CRL2_x, _v.op_CRL2_y))
pp.append(_v.op_CRL2_pp)
#Drift CRL -> KL
if('CRL_KL' in arElNames):
el.append(SRWLOptD(zKL - zCRL - _v.op_CRL_dz + _v.op_KL_dz)); pp.append(_v.op_CRL_KL_pp)
#Drift CRL -> Sample
if('CRL_SMP' in arElNames):
el.append(SRWLOptD(zSample - zCRL - _v.op_CRL_dz + _v.op_SMP_dz)); pp.append(_v.op_CRL_SMP_pp)
#KL Aperture
if('KLA' in arElNames):
el.append(SRWLOptA('r', 'a', _v.op_KLA_dx, _v.op_KLA_dy, _v.op_KL_x, _v.op_KL_y)); pp.append(_v.op_KLA_pp)
#KL (1D, horizontally-focusing)
if('KL' in arElNames):
el.append(SRWLOptL(_v.op_KL_fx, _v.op_KL_fy, _v.op_KL_x, _v.op_KL_y)) #KL as Ideal Lens; to make it a transmission element with a profile read from a file
pp.append(_v.op_KL_pp)
#Drift KL -> S3
if('KL_S3' in arElNames):
el.append(SRWLOptD(zS3 - zKL + _v.op_S3_dz)); pp.append(_v.op_KL_S3_pp)
#S3 slit
if('S3' in arElNames):
el.append(SRWLOptA('r', 'a', _v.op_S3_dx, _v.op_S3_dy, _v.op_S3_x, _v.op_S3_y)); pp.append(_v.op_S3_pp)
#Drift S3 -> Sample
if('S3_SMP' in arElNames):
el.append(SRWLOptD(zSample - zS3 + _v.op_SMP_dz)); pp.append(_v.op_S3_SMP_pp)
#Sample
if('SMP' in arElNames):
ifnSMP = os.path.join(_v.fdir, _v.op_SMP_ifn) if len(_v.op_SMP_ifn) > 0 else ''
if(len(ifnSMP) > 0):
ifSMP = open(ifnSMP, 'rb')
opSMP = pickle.load(ifSMP)
#Implementing transverse shift of sample ??
xSt = opSMP.mesh.xStart
xFi = opSMP.mesh.xFin
halfRangeX = 0.5*(xFi - xSt)
opSMP.mesh.xStart = -halfRangeX + _v.op_SMP_x
opSMP.mesh.xFin = halfRangeX + _v.op_SMP_x
ySt = opSMP.mesh.yStart
yFi = opSMP.mesh.yFin
halfRangeY = 0.5*(yFi - ySt)
opSMP.mesh.yStart = -halfRangeY + _v.op_SMP_y
opSMP.mesh.yFin = halfRangeY + _v.op_SMP_y
ofnSMP = os.path.join(_v.fdir, _v.op_SMP_ofn) if len(_v.op_SMP_ofn) > 0 else ''
if(len(ofnSMP) > 0):
pathDifSMP = opSMP.get_data(3, 3)
srwl_uti_save_intens_ascii(pathDifSMP, opSMP.mesh, ofnSMP, 0, ['', 'Horizontal Position', 'Vertical Position', 'Opt. Path Dif.'], _arUnits=['', 'm', 'm', 'm'])
el.append(opSMP); pp.append(_v.op_SMP_pp)
ifSMP.close()
#Drift Sample -> Detector
if('SMP_D' in arElNames):
el.append(SRWLOptD(zD - zSample + _v.op_D_dz)); pp.append(_v.op_SMP_D_pp)
pp.append(_v.op_fin_pp)
return SRWLOptC(el, pp)
#*********************************List of Parameters allowed to be varied
#---List of supported options / commands / parameters allowed to be varied for this Beamline (comment-out unnecessary):
varParam = [
#---Data Folder
['fdir', 's', os.path.join(os.getcwd(), 'data_CHX'), 'folder (directory) name for reading-in input and saving output data files'],
#---Electron Beam
['ebm_nm', 's', 'NSLS-II Low Beta ', 'standard electron beam name'],
['ebm_nms', 's', 'Day1', 'standard electron beam name suffix: e.g. can be Day1, Final'],
['ebm_i', 'f', 0.5, 'electron beam current [A]'],
['ebm_de', 'f', 0., 'electron beam average energy deviation [GeV]'],
['ebm_x', 'f', 0., 'electron beam initial average horizontal position [m]'],
['ebm_y', 'f', 0., 'electron beam initial average vertical position [m]'],
['ebm_xp', 'f', 0., 'electron beam initial average horizontal angle [rad]'],
['ebm_yp', 'f', 0., 'electron beam initial average vertical angle [rad]'],
['ebm_z', 'f', 0., 'electron beam initial average longitudinal position [m]'],
['ebm_dr', 'f', -1.7, 'electron beam longitudinal drift [m] to be performed before a required calculation'],
['ebm_ens', 'f', -1, 'electron beam relative energy spread'],
['ebm_emx', 'f', -1, 'electron beam horizontal emittance [m]'],
['ebm_emy', 'f', -1, 'electron beam vertical emittance [m]'],
#---Undulator
['und_per', 'f', 0.02, 'undulator period [m]'],
['und_len', 'f', 3., 'undulator length [m]'],
['und_b', 'f', 0.88770981, 'undulator vertical peak magnetic field [T]'],
#['und_bx', 'f', 0., 'undulator horizontal peak magnetic field [T]'],
#['und_by', 'f', 1., 'undulator vertical peak magnetic field [T]'],
#['und_phx', 'f', 1.5708, 'undulator horizontal magnetic field phase [rad]'],
#['und_phy', 'f', 0., 'undulator vertical magnetic field phase [rad]'],
['und_sx', 'i', 1, 'undulator horizontal magnetic field symmetry vs longitudinal position'],
['und_sy', 'i', -1, 'undulator vertical magnetic field symmetry vs longitudinal position'],
['und_zc', 'f', 0., 'undulator center longitudinal position [m]'],
['und_mdir', 's', 'magn_meas', 'name of magnetic measurements sub-folder'],
['und_mfs', 's', 'ivu20_chx_sum.txt', 'name of magnetic measurements for different gaps summary file'],
#['und_g', 'f', 0., 'undulator gap [mm] (assumes availability of magnetic measurement or simulation data)'],
#NOTE: the above option/variable names (fdir, ebm*, und*, ss*, sm*, pw*, is*, ws*, wm*) should be the same in all beamline scripts
#on the other hand, the beamline optics related options below (op*) are specific to a particular beamline (and can be differ from beamline to beamline).
#However, the default values of all the options/variables (above and below) can differ from beamline to beamline.
#---Beamline Optics
['op_r', 'f', 20.5, 'longitudinal position of the first optical element [m]'],
['op_fin', 's', 'S3_SMP', 'name of the final optical element wavefront has to be propagated through'],
['op_BL', 'f', 1, 'beamline version/option number'],
['op_S0_dx', 'f', 0.2e-03, 'slit S0: horizontal size [m]'],
['op_S0_dy', 'f', 1.0e-03, 'slit S0: vertical size [m]'],
['op_S0_x', 'f', 0., 'slit S0: horizontal center position [m]'],
['op_S0_y', 'f', 0., 'slit S0: vertical center position [m]'],
['op_HDM_ifn', 's', 'CHX_HDM_height_prof_1d.dat', 'mirror HDM: input file name of height profile data'],
['op_HDM_amp', 'f', 1., 'mirror HDM: amplification coefficient for height profile data'],
['op_HDM_ofn', 's', 'res_CHX_HDM_opt_path_dif.dat', 'mirror HDM: output file name of optical path difference data'],
['op_HDM_x', 'f', 0., 'mirror HDM surface error: horizontal center position [m]'],
['op_HDM_y', 'f', 0., 'mirror HDM surface error: vertical center position [m]'],
['op_S1_dz', 'f', 0., 'S1: offset of longitudinal position [m]'],
['op_S1_dx', 'f', 0.2e-03, 'slit S1: horizontal size [m]'],
['op_S1_dy', 'f', 1.0e-03, 'slit S1: vertical size [m]'],
['op_S1_x', 'f', 0., 'slit S1: horizontal center position [m]'],
['op_S1_y', 'f', 0., 'slit S1: vertical center position [m]'],
['op_DCM_e', 'f', 9000., 'DCM: central photon energy DCM is tuned to [eV]'],
['op_DCM_ac1', 'f', 0., 'DCM: angular deviation of 1st crystal from exact Bragg angle [rad]'],
['op_DCM_ac2', 'f', 0., 'DCM: angular deviation of 2nd crystal from exact Bragg angle [rad]'],
['op_DCME_ifn', 's', 'CHX_DCM_height_prof_1d.dat', 'DCM surface error: input file name of height profile data'],
['op_DCME_amp', 'f', 1., 'DCM surface error: amplification coefficient'],
['op_DCME_ofn', 's', 'res_CHX_DCM_opt_path_dif.dat', 'DCM surface error: output file name of optical path difference data'],
['op_DCME_x', 'f', 0., 'DCM surface error: horizontal center position [m]'],
['op_DCME_y', 'f', 0., 'DCM surface error: vertical center position [m]'],
['op_FIB_fpl', 's', '', 'FIB: focusing plane ("h" or "v" or "hv" or "")'],
['op_FIB_delta_e', 'f', 4.20756805e-06, 'Fiber: refractive index decrement of main (exterior) material'],
['op_FIB_delta_c', 'f', 4.20756805e-06, 'Fiber: refractive index decrement of core material'],
['op_FIB_atnl_e', 'f', 7312.94e-06, 'Fiber: attenuation length of main (exterior) material [m]'],
['op_FIB_atnl_c', 'f', 7312.94e-06, 'Fiber: attenuation length of core material [m]'],
['op_FIB_d_e', 'f', 100.e-06, 'Fiber: ext. diameter [m]'],
['op_FIB_d_c', 'f', 10.e-06, 'Fiber: core diameter [m]'],
['op_FIB_x', 'f', 0., 'Fiber: horizontal center position [m]'],
['op_FIB_y', 'f', 0., 'Fiber: vertical center position [m]'],
['op_S2_dz', 'f', 0., 'S2: offset of longitudinal position [m]'],
['op_S2_dx', 'f', 0.05e-03, 'slit S2: horizontal size [m]'],
['op_S2_dy', 'f', 0.2e-03, 'slit S2: vertical size [m]'], #1.0e-03, 'slit S2: vertical size [m]'],
['op_S2_x', 'f', 0., 'slit S2: horizontal center position [m]'],
['op_S2_y', 'f', 0., 'slit S2: vertical center position [m]'],
['op_BPM_dz', 'f', 0., 'BPM: offset of longitudinal position [m]'],
['op_CRL_dz', 'f', 0., 'CRL: offset of longitudinal position [m]'],
['op_CRL1_fpl', 's', 'v', 'CRL1: focusing plane ("h" or "v" or "hv" or "")'],
['op_CRL1_delta', 'f', 4.20756805e-06, 'CRL1: refractive index decrements of material'],
['op_CRL1_atnl', 'f', 7312.94e-06, 'CRL1: attenuation length of material [m]'],
['op_CRL1_apnf', 'f', 1.e-03, 'CRL1: geometrical aparture of 1D CRL in the plane where there is no focusing'],
['op_CRL1_apf', 'f', 2.4e-03, 'CRL1: geometrical aparture of 1D CRL in the focusing plane'],
['op_CRL1_rmin', 'f', 1.5e-03, 'CRL1: radius of curface curvature at the tip of parabola [m]'],
['op_CRL1_n', 'i', 1, 'CRL1: number of individual lenses'],
['op_CRL1_thck', 'f', 80.e-06, 'CRL1: wall thickness (at the tip of parabola) [m]'],
['op_CRL1_x', 'f', 0., 'CRL1: horizontal center position [m]'],
['op_CRL1_y', 'f', 0., 'CRL1: vertical center position [m]'],
['op_CRL2_fpl', 's', 'v', 'CRL2: focusing plane ("h" or "v" or "hv" or "")'],
['op_CRL2_delta', 'f', 4.20756805e-06, 'CRL2: refractive index decrements of material'],
['op_CRL2_atnl', 'f', 7312.94e-06, 'CRL2: attenuation length of material [m]'],
['op_CRL2_apnf', 'f', 1.e-03, 'CRL2: geometrical aparture of 1D CRL in the plane where there is no focusing'],
['op_CRL2_apf', 'f', 1.4e-03, 'CRL2: geometrical aparture of 1D CRL in the focusing plane'],
['op_CRL2_rmin', 'f', 0.5e-03, 'CRL2: radius of curface curvature at the tip of parabola [m]'],
['op_CRL2_n', 'i', 6, 'CRL2: number of individual lenses'],
['op_CRL2_thck', 'f', 80.e-06, 'CRL2: wall thickness (at the tip of parabola) [m]'],
['op_CRL2_x', 'f', 0., 'CRL2: horizontal center position [m]'],
['op_CRL2_y', 'f', 0., 'CRL2: vertical center position [m]'],
['op_KLA_dx', 'f', 1.0e-03, 'KL aperture: horizontal size [m]'], #1.4e-03, 'KL Aperture: horizontal size [m]'],
['op_KLA_dy', 'f', 0.1e-03, 'KL aperture: vertical size [m]'], #0.2e-03, 'KL Aperture: vertical size [m]'],
['op_KL_dz', 'f', 0., 'KL: offset of longitudinal position [m]'],
['op_KL_fx', 'f', 3.24479, 'KL: horizontal focal length [m]'],
['op_KL_fy', 'f', 1.e+23, 'KL: vertical focal length [m]'],
['op_KL_x', 'f', 0., 'KL: horizontal center position [m]'],
['op_KL_y', 'f', 0., 'KL: vertical center position [m]'],
['op_S3_dz', 'f', 0., 'S3: offset of longitudinal position [m]'],
['op_S3_dx', 'f', 10.e-06, 'slit S3: horizontal size [m]'],
['op_S3_dy', 'f', 10.e-06, 'slit S3: vertical size [m]'],
['op_S3_x', 'f', 0., 'slit S3: horizontal center position [m]'],
['op_S3_y', 'f', 0., 'slit S3: vertical center position [m]'],
['op_SMP_dz', 'f', 0., 'sample: offset of longitudinal position [m]'],
['op_SMP_ifn', 's', 'CHX_SMP_CDI_001.pickle', 'sample: model file name (binary "dumped" SRW transmission object)'],
['op_SMP_ofn', 's', 'res_CHX_SMP_opt_path_dif.dat', 'sample: output file name of optical path difference data'],
['op_SMP_x', 'f', 0., 'sample: horizontal center position [m]'],
['op_SMP_y', 'f', 0., 'sample: vertical center position [m]'],
['op_D_dz', 'f', 0., 'detector: offset of longitudinal position [m]'],
#to add options for different beamline cases, etc.
#Propagation Param.: [0][1][2][3][4] [5] [6] [7] [8] [9][10][11]
#['op_S0_pp', 'f', [0, 0, 1, 0, 0, 4.5, 5.0, 1.5, 2.5, 0, 0, 0], 'slit S0: propagation parameters'],
['op_S0_pp', 'f', [0, 0, 1, 0, 0, 2.5, 5.0, 1.5, 2.5, 0, 0, 0], 'slit S0: propagation parameters'],
['op_S0_HDM_pp', 'f', [0, 0, 1, 1, 0, 1.0, 1.0, 1.0, 1.0, 0, 0, 0], 'drift S0 -> HDM: propagation parameters'],
['op_S0_S1_pp', 'f', [0, 0, 1, 1, 0, 1.0, 1.0, 1.0, 1.0, 0, 0, 0], 'drift S0 -> S1: propagation parameters'],
['op_HDM_pp', 'f', [0, 0, 1, 1, 0, 1.0, 1.0, 1.0, 1.0, 0, 0, 0], 'mirror HDM: propagation parameters'],
['op_HDM_S1_pp', 'f', [0, 0, 1, 0, 0, 1.0, 1.0, 1.0, 1.0, 0, 0, 0], 'drift HDM -> S1: propagation parameters'],
['op_S1_pp', 'f', [0, 0, 1, 0, 0, 1.0, 1.0, 1.0, 1.0, 0, 0, 0], 'slit S1: propagation parameters'],
['op_S1_DCM_pp', 'f', [0, 0, 1, 1, 0, 1.0, 1.0, 1.0, 1.0, 0, 0, 0], 'drift S1 -> DCM: propagation parameters'],
['op_DCMC1_pp', 'f', [0, 0, 1, 0, 0, 1.0, 1.0, 1.0, 1.0, 0, 0, 0], 'DCM Crystal #1: propagation parameters'],
['op_DCMC2_pp', 'f', [0, 0, 1, 0, 0, 1.0, 1.0, 1.0, 1.0, 0, 0, 0], 'DCM Crystal #2: propagation parameters'],
['op_DCME_pp', 'f', [0, 0, 1, 0, 0, 1.0, 1.0, 1.0, 1.0, 0, 0, 0], 'DCM Crystal #1&2: surface height error'],
['op_FIB_pp', 'f', [0, 0, 1, 0, 0, 1.0, 1.0, 1.0, 1.0, 0, 0, 0], 'fiber: propagation parameters'],
['op_DCM_S2_pp', 'f', [0, 0, 1, 1, 0, 1.0, 1.0, 1.0, 1.0, 0, 0, 0], 'drift DCM -> S2: propagation parameters'],
['op_S1_S2_pp', 'f', [0, 0, 1, 1, 0, 1.0, 1.0, 1.0, 1.0, 0, 0, 0], 'drift S1 -> S2: propagation parameters'],
['op_S2_pp', 'f', [0, 0, 1, 0, 0, 1.0, 1.0, 1.0, 1.0, 0, 0, 0], 'slit S2: propagation parameters'],
['op_S2_BPM_pp', 'f', [0, 0, 1, 1, 0, 1.0, 1.0, 1.0, 1.0, 0, 0, 0], 'drift S2 -> BPM: propagation parameters'],
['op_S2_CRL_pp', 'f', [0, 0, 1, 1, 0, 1.0, 1.0, 1.0, 1.0, 0, 0, 0], 'drift S2 -> BPM: propagation parameters'],
['op_BPM_CRL_pp', 'f', [0, 0, 1, 1, 0, 1.0, 1.0, 1.0, 1.0, 0, 0, 0], 'drift BPM -> CRL: propagation parameters'],
['op_CRL1_pp', 'f', [0, 0, 1, 0, 0, 1.0, 1.0, 1.0, 1.0, 0, 0, 0], 'CRL1: propagation parameters'],
['op_CRL2_pp', 'f', [0, 0, 1, 0, 0, 1.0, 1.0, 1.0, 1.0, 0, 0, 0], 'CRL2: propagation parameters'],
['op_CRL_KL_pp', 'f', [0, 0, 1, 1, 0, 1.0, 1.0, 1.0, 1.0, 0, 0, 0], 'drift CRL -> KL: propagation parameters'],
['op_CRL_SMP_pp', 'f', [0, 0, 1, 1, 0, 1.0, 1.0, 1.0, 1.0, 0, 0, 0], 'drift CRL -> sample: propagation parameters'],
['op_KLA_pp', 'f', [0, 0, 1, 0, 0, 1.0, 1.0, 1.0, 1.0, 0, 0, 0], 'KL aperture: propagation parameters'],
#['op_KL_pp', 'f', [0, 0, 1, 0, 0, 1.0, 5.0, 1.0, 7.0, 0, 0, 0], 'KL: propagation parameters'],
['op_KL_pp', 'f', [0, 0, 1, 0, 0, 1.0, 1.0, 1.0, 1.0, 0, 0, 0], 'KL: propagation parameters'],
['op_KL_S3_pp', 'f', [0, 0, 1, 1, 0, 1.0, 1.0, 1.0, 1.0, 0, 0, 0], 'drift KL -> S3: propagation parameters'],
#['op_S3_pp', 'f', [0, 0, 1, 0, 0, 0.3, 3.0, 0.3, 3.0, 0, 0, 0], 'slit S3: propagation parameters'],
['op_S3_pp', 'f', [0, 0, 1, 0, 0, 1.0, 1.0, 1.0, 1.0, 0, 0, 0], 'slit S3: propagation parameters'],
#['op_S3_SMP_pp', 'f', [0, 0, 1, 1, 0, 1.0, 1.0, 1.0, 1.0, 0, 0, 0], 'drift S3 -> Sample: propagation parameters'],
['op_S3_SMP_pp', 'f', [0, 0, 1, 0, 0, 1.0, 1.0, 1.0, 1.0, 0, 0, 0], 'drift S3 -> sample: propagation parameters'],
['op_SMP_pp', 'f', [0, 0, 1, 0, 0, 1.0, 1.0, 1.0, 1.0, 0, 0, 0], 'sample: propagation parameters'],
['op_SMP_D_pp', 'f', [0, 0, 1, 3, 0, 1.0, 1.0, 1.0, 1.0, 0, 0, 0], 'sample -> detector: propagation parameters'],
#['op_fin_pp', 'f', [0, 0, 1, 0, 1, 0.1, 5.0, 1.0, 1.5, 0, 0, 0], 'final post-propagation (resize) parameters'],
['op_fin_pp', 'f', [0, 0, 1, 0, 0, 1.0, 1.0, 1.0, 1.0, 0, 0, 0], 'final post-propagation (resize) parameters'],
#[ 0]: Auto-Resize (1) or not (0) Before propagation
#[ 1]: Auto-Resize (1) or not (0) After propagation
#[ 2]: Relative Precision for propagation with Auto-Resizing (1. is nominal)
#[ 3]: Allow (1) or not (0) for semi-analytical treatment of the quadratic (leading) phase terms at the propagation
#[ 4]: Do any Resizing on Fourier side, using FFT, (1) or not (0)
#[ 5]: Horizontal Range modification factor at Resizing (1. means no modification)
#[ 6]: Horizontal Resolution modification factor at Resizing
#[ 7]: Vertical Range modification factor at Resizing
#[ 8]: Vertical Resolution modification factor at Resizing
#[ 9]: Type of wavefront Shift before Resizing (not yet implemented)
#[10]: New Horizontal wavefront Center position after Shift (not yet implemented)
#[11]: New Vertical wavefront Center position after Shift (not yet implemented)
#[12]: Optional: Orientation of the Output Optical Axis vector in the Incident Beam Frame: Horizontal Coordinate
#[13]: Optional: Orientation of the Output Optical Axis vector in the Incident Beam Frame: Vertical Coordinate
#[14]: Optional: Orientation of the Output Optical Axis vector in the Incident Beam Frame: Longitudinal Coordinate
#[15]: Optional: Orientation of the Horizontal Base vector of the Output Frame in the Incident Beam Frame: Horizontal Coordinate
#[16]: Optional: Orientation of the Horizontal Base vector of the Output Frame in the Incident Beam Frame: Vertical Coordinate
]
varParam = srwl_uti_ext_options(varParam)
#*********************************Entry
if __name__ == "__main__":
#---Parse options, defining Beamline elements and running calculations
v = srwl_uti_parse_options(varParam)
#---Add some constant "parameters" (not allowed to be varied) for the beamline
#v.und_per = 0.02 #['und_per', 'f', 0.02, 'undulator period [m]'],
#v.und_len = 3. #['und_len', 'f', 3., 'undulator length [m]'],
#v.und_zc = 0. #['und_zc', 'f', 0., 'undulator center longitudinal position [m]'],
#v.und_sy = -1 #['und_sy', 'i', -1, 'undulator horizontal magnetic field symmetry vs longitudinal position'],
#---Setup optics only if Wavefront Propagation is required:
v.ws = True
op = set_optics(v) if(v.ws or v.wm) else None
#---Run all requested calculations
SRWLBeamline('Coherent Hard X-ray beamline').calc_all(v, op)
|
mkeilman/sirepo
|
tests/template/srw_import_data/chx.py
|
Python
|
apache-2.0
| 32,127
|
[
"CRYSTAL"
] |
403d5539c88b7ff2a8b6ff8da921a8bc21b0241619cf1dce372e5d5973a04536
|
# coding=utf-8
# Copyright 2022 RigL Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Code for analyzing symmetries in NN."""
import functools
import math
import operator
from typing import Dict, Optional, Union
import jax.numpy as jnp
import numpy as np
from rigl.experimental.jax.pruning import masked
from rigl.experimental.jax.utils import utils
def count_permutations_mask_layer(
mask_layer,
next_mask_layer = None,
parameter_key = 'kernel'):
"""Calculates the number of permutations for a layer, given binary masks.
Args:
mask_layer: The binary weight mask of a dense/conv layer, where last
dimension is number of neurons/filters.
next_mask_layer: The binary weight mask of the following a dense/conv layer,
or None if this is the last layer.
parameter_key: The name of the parameter to count the permutations of in each
layer.
Returns:
A dictionary with stats on the permutation structure of a mask, including
the number of symmetric permutations of the mask, number of unique mask
columns, count of the zeroed out (structurally pruned) neurons, and total
number of neurons/filters.
"""
# Have to check 'is None' since mask_layer[parameter_key] is jnp.array.
if not mask_layer or parameter_key not in mask_layer or mask_layer[
parameter_key] is None:
return {
'permutations': 1,
'zeroed_neurons': 0,
'total_neurons': 0,
'unique_neurons': 0,
}
mask = mask_layer[parameter_key]
num_neurons = mask.shape[-1]
# Initialize with stats for an empty mask.
mask_stats = {
'permutations': 0,
'zeroed_neurons': num_neurons,
'total_neurons': num_neurons,
'unique_neurons': 0,
}
# Re-shape masks as 1D, in case they are 2D (e.g. convolutional).
connection_mask = mask.reshape(-1, num_neurons)
# Only consider non-zero columns (in JAX neurons/filters are last index).
non_zero_neurons = ~jnp.all(connection_mask == 0, axis=0)
# Count only zeroed neurons in the current layer.
zeroed_count = num_neurons - jnp.count_nonzero(non_zero_neurons)
# Special case where all neurons in current layer are ablated.
if zeroed_count == num_neurons:
return mask_stats
# Have to check is None since next_mask_layer[parameter_key] is jnp.array.
if next_mask_layer and parameter_key in next_mask_layer and next_mask_layer[
parameter_key] is not None:
next_mask = next_mask_layer[parameter_key]
# Re-shape masks as 1D, in case they are 2D (e.g. convolutional).
next_connection_mask = next_mask.T.reshape(-1, num_neurons)
# Update with neurons that are non-zero in outgoing connections too.
non_zero_neurons &= ~jnp.all(next_connection_mask == 0, axis=0)
# Remove rows corresponding to neurons that are ablated.
next_connection_mask = next_connection_mask[:, non_zero_neurons]
connection_mask = connection_mask[:, non_zero_neurons]
# Combine the outgoing and incoming masks in one vector per-neuron.
connection_mask = jnp.concatenate(
(connection_mask, next_connection_mask), axis=0)
else:
connection_mask = connection_mask[:, non_zero_neurons]
# Effectively no connections between these two layers.
if not connection_mask.size:
return mask_stats
# Note: np.unique not implemented in JAX numpy yet.
_, unique_counts = np.unique(connection_mask, axis=-1, return_counts=True)
# Convert from device array.
mask_stats['zeroed_neurons'] = int(zeroed_count)
mask_stats['permutations'] = functools.reduce(
operator.mul, (np.math.factorial(t) for t in unique_counts))
mask_stats['unique_neurons'] = len(unique_counts)
return mask_stats
def count_permutations_mask(mask):
"""Calculates the number of permutations for a given model mask.
Args:
mask: Model masks to check, similar to Model.params.
Returns:
A dictionary with stats on the permutation structure of a mask, including
the number of symmetric permutations of the mask, number of unique mask
columns, count of the zeroed out (structurally pruned) neurons, and total
number of neurons/filters.
"""
sum_keys = ('total_neurons', 'unique_neurons', 'zeroed_neurons')
product_keys = ('permutations',)
# Count permutation stats for each pairwise set of layers.
# Note: I tried doing this with more_itertools.pairwise/itertools.chain, but
# there is a type conflict in passing iterators of different types to
# itertools.chain.
counts = [
count_permutations_mask_layer(layer, next_layer)
for layer, next_layer in utils.pairwise_longest(mask.values())
]
sum_stats = {}
for key in sum_keys:
sum_stats[key] = functools.reduce(operator.add, (z[key] for z in counts))
product_stats = {}
for key in product_keys:
product_stats[key] = functools.reduce(operator.mul,
(z[key] for z in counts))
return {**sum_stats, **product_stats}
def get_mask_stats(mask):
"""Calculates an array of mask statistics.
Args:
mask: A model mask to calculate the statistics of.
Returns:
A dictionary, containing a set of mask statistics.
"""
mask_stats = count_permutations_mask(mask)
mask_stats.update({
'sparsity': masked.mask_sparsity(mask),
'permutation_num_digits': len(str(mask_stats['permutations'])),
'permutation_log10': math.log10(mask_stats['permutations'] + 1),
})
return mask_stats
|
google-research/rigl
|
rigl/experimental/jax/pruning/symmetry.py
|
Python
|
apache-2.0
| 5,956
|
[
"NEURON"
] |
9ab8bef5c9f87ec0930db047e54064aaff640da29d71dab2ca8143d422972748
|
import numpy as np
from ase.io.pupynere import NetCDFFile
# Write array
a1 = np.random.rand(5, 5)
a2 = a1 * 2 - 5
nc = NetCDFFile('test.nc', 'w')
nc.createDimension('dimx', a1.shape[0])
nc.createDimension('dimy', a1.shape[1])
nc.createVariable('matrix1', 'd', ('dimx', 'dimy'))[:] = a1
nc.createVariable('matrix2', 'd', ('dimx', 'dimy'))[:] = a2
nc.sync()
nc.close()
# Read array
nc = NetCDFFile('test.nc', 'r')
b1 = nc.variables['matrix1'][:]
b2 = nc.variables['matrix2'][:]
assert np.all(a1 == b1) and np.all(a2 == b2)
import os
os.remove('test.nc')
|
JConwayAWT/PGSS14CC
|
lib/python/multimetallics/ase/test/fio/netcdf.py
|
Python
|
gpl-2.0
| 556
|
[
"ASE"
] |
a5b55e85ab9f734c6fccddec0f4eeffae5b0e2348a5b842ecedf36dea16a7dc3
|
"""Removes hanging LogicalFiles in composite resources. Hanging LogicalFiles do not have a
Resource nor reference any files.
"""
from django.core.management.base import BaseCommand
from hs_file_types.models.generic import GenericLogicalFile
from hs_file_types.models.geofeature import GeoFeatureLogicalFile
from hs_file_types.models.netcdf import NetCDFLogicalFile
from hs_file_types.models.raster import GeoRasterLogicalFile
from hs_file_types.models.reftimeseries import RefTimeseriesLogicalFile
from hs_file_types.models.timeseries import TimeSeriesLogicalFile
from hs_file_types.models.model_instance import ModelInstanceLogicalFile
from hs_file_types.models.model_program import ModelProgramLogicalFile
from hs_file_types.models.fileset import FileSetLogicalFile
def delete_hanging_logical_files(logical_files):
count = 0
for lf in logical_files:
if not hasattr(lf, 'resource'):
lf.delete()
count = count + 1
elif not lf.files.all():
if lf.is_fileset:
# we allow fileset to not have any files
continue
elif lf.is_model_instance and lf.folder:
# we allow model instance based on folder to not have any files
continue
lf.delete()
count = count + 1
return count
class Command(BaseCommand):
help = "Removes Logical Files without a resource and a file"
def handle(self, *args, **options):
count = delete_hanging_logical_files(GenericLogicalFile.objects.all())
print(">> {} GenericLogicalFiles deleted".format(count))
count = delete_hanging_logical_files(GeoFeatureLogicalFile.objects.all())
print(">> {} GeoFeatureLogicalFile deleted".format(count))
count = delete_hanging_logical_files(NetCDFLogicalFile.objects.all())
print(">> {} NetCDFLogicalFile deleted".format(count))
count = delete_hanging_logical_files(GeoRasterLogicalFile.objects.all())
print(">> {} GeoRasterLogicalFile deleted".format(count))
count = delete_hanging_logical_files(RefTimeseriesLogicalFile.objects.all())
print(">> {} RefTimeseriesLogicalFile deleted".format(count))
count = delete_hanging_logical_files(TimeSeriesLogicalFile.objects.all())
print(">> {} TimeSeriesLogicalFile deleted".format(count))
count = delete_hanging_logical_files(ModelInstanceLogicalFile.objects.all())
print(">> {} ModelInstanceLogicalFile deleted".format(count))
count = delete_hanging_logical_files(ModelProgramLogicalFile.objects.all())
print(">> {} ModelProgramLogicalFile deleted".format(count))
count = delete_hanging_logical_files(FileSetLogicalFile.objects.all())
print(">> {} FileSetLogicalFile deleted".format(count))
|
hydroshare/hydroshare
|
hs_core/management/commands/delete_hanging_logical_files.py
|
Python
|
bsd-3-clause
| 2,806
|
[
"NetCDF"
] |
28e8ee6e4346af94cf01e1dcfa48337ac56678d7ad8ae3ceece8046e844ba124
|
# Awn Applet Library - Simplified APIs for programming applets for Awn.
#
# Copyright (C) 2007 - 2008 Pavel Panchekha <[email protected]>
# 2008 - 2010 onox <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import os
import sys
import pygtk
pygtk.require("2.0")
import gtk
from desktopagnostic import config, Color
from desktopagnostic.ui import ColorButton
import awn
from awn.extras import configbinder, __version__
import cairo
import cPickle as cpickle
import gobject
___file___ = sys.argv[0]
# Basically, __file__ = current file location
# sys.argv[0] = file name or called file
# Since awnlib is in site-packages, __file__ refers to something there
# For relative paths to work, we need a way of determining where the
# User applet is. So this bit of magic works.
bug_report_link = "https://launchpad.net/awn-extras/+filebug"
def create_frame(parent, label):
"""Create a frame with a bold title. To be used in a preferences window.
"""
vbox = gtk.VBox(spacing=6)
parent.add(vbox)
label = gtk.Label("<b>" + label + "</b>")
label.set_use_markup(True)
label.props.xalign = 0.0
vbox.add(label)
alignment = gtk.Alignment()
alignment.set_padding(0, 0, 12, 0)
vbox.add(alignment)
frame_vbox = gtk.VBox(spacing=6)
alignment.add(frame_vbox)
return frame_vbox
def add_cell_renderer_text(combobox):
"""Add a gtk.CellRendererText to the combobox. To be used if the combobox
has a gtk.ListStore model with a string as the first column.
"""
text = gtk.CellRendererText()
combobox.pack_start(text, True)
combobox.add_attribute(text, "text", 0)
def deprecated(old, new):
def decorator(f):
def wrapper(*args, **kwargs):
m = "\nawnlib warning in %s:\n\t%s is deprecated; use %s instead\n"
print m % (os.path.split(___file___)[1], old, new)
return f(*args, **kwargs)
return wrapper
return decorator
class KeyRingError:
def __init__(self, str):
self.msg = str
def __str__(self):
return self.msg
class Dialogs:
__special_dialogs = ("menu", "about", "preferences")
def __init__(self, parent):
"""Create an instance of Dialogs. Creates a context menu,
and an About dialog, which is added to the menu.
@param parent: The parent applet of the dialogs instance.
@type parent: L{Applet}
"""
self.__parent = parent
self.__register = {}
self.__current = None
self.menu = self.new("menu")
meta_keys = self.__parent.meta.keys()
# Create the About dialog if the applet provides the necessary metadata
if all([key in meta_keys for key in ("name", "author", "copyright-year")]):
about_dialog = self.new("about")
about_item = gtk.ImageMenuItem("_About %s" % self.__parent.meta["name"])
if gtk.gtk_version >= (2, 16, 0):
about_item.props.always_show_image = True
about_item.set_image(gtk.image_new_from_stock(gtk.STOCK_ABOUT, gtk.ICON_SIZE_MENU))
self.menu.append(about_item)
about_item.connect("activate", lambda w: self.toggle("about"))
def popup_menu_cb(widget, event):
self.toggle("menu", once=True, event=event)
parent.connect("context-menu-popup", popup_menu_cb)
def clicked_cb(widget, dialog_name):
if dialog_name in self.__register:
self.toggle(dialog_name)
parent.connect("clicked", clicked_cb, "main")
parent.connect("middle-clicked", clicked_cb, "secondary")
def new(self, dialog, title=None, focus=True):
"""Create a new AWN dialog.
@param dialog: The name to register the dialog under.
@type dialog: C{string}
@param title: The title of the new dialog
@type title: C{string}
@param focus: Whether to force the focus
@type focus: C{bool}
@return: The new menu or dialog
@rtype: C{gtk.Menu}, C{function}, or C{awn.AppletDialog}
"""
if dialog == "menu":
dlog = self.__parent.create_default_menu()
elif dialog == "about":
dlog = self.AboutDialog(self.__parent)
elif dialog == "preferences":
dlog = self.PreferencesDialog(self.__parent)
position = len(self.menu)
if "about" in self.__register:
position = position - 1
prefs_item = gtk.ImageMenuItem(stock_id=gtk.STOCK_PREFERENCES)
if gtk.gtk_version >= (2, 16, 0):
prefs_item.props.always_show_image = True
self.menu.insert(prefs_item, position)
prefs_item.connect("activate", lambda w: self.toggle(
"preferences", "show"))
else:
dlog = awn.Dialog(self.__parent)
self.register(dialog, dlog, focus)
if dialog not in self.__special_dialogs and title:
dlog.set_title(" " + title + " ")
return dlog
def register(self, dialog, dlog, focus=True):
"""Register a dialog.
Once a name has been registered, it cannot be registered again
until the dialog is explicitly unregistered.
@param dialog: The name to use for the dialog.
@type dialog: C{string}
@param dlog: The actual dialog or menu or function.
@type dlog: C{function}, C{gtk.Menu}, or C{awn.AppletDialog}
@param focus: True if the dialog should be hidden when focus is lost, False otherwise.
@type focus: C{bool}
"""
if dialog in self.__register:
raise RuntimeError("Dialog '%s' already registered" % dialog)
if focus and dialog not in self.__special_dialogs:
dlog.props.hide_on_unfocus = focus
self.__register[dialog] = dlog
def unregister(self, dialog):
"""Unregister a dialog.
@param dialog: The name to use for the dialog. Must not be equal
to the name of any of the special dialogs.
@type dialog: C{string}
"""
if dialog not in self.__register:
raise RuntimeError("Dialog '%s' not registered" % dialog)
if dialog in self.__special_dialogs:
raise RuntimeError("Unregistering special dialog '%s' is forbidden" % dialog)
del self.__register[dialog]
def toggle(self, dialog, force="", once=False, event=None):
"""Show or hide a dialog.
@param dialog: The dialog that should be shown.
@type dialog: C{string}
@param force: "Hide" or "Show". Whether to force the hiding or showing
of the dialog in question.
@type force: C{string}
@param once: Only show or hide one dialog. If a dialog is already
opened, and you request that another dialog be toggled, only the
open one is hidden. False by default.
@type once: C{bool}
@param event: The event that triggered the toggle.
@type event: C{gdk.Event}
"""
force = force.lower()
assert force in ("hide", "show", ""), "Force must be \"hide\", \"show\", or \"\""
assert dialog in self.__register, "Dialog '%s' must be registered" % dialog
if dialog == "menu":
self.__register["menu"].show_all()
self.__register["menu"].popup(None, None, None, event.button, event.time)
elif dialog == "about":
self.__register["about"].show()
self.__register["about"].deiconify()
else:
if force == "hide" or (self.__register[dialog].is_active() and force != "show"):
self.__register[dialog].hide()
self.__current = None
# Because the dialog is now hidden, show the tooltip again
self.__parent.tooltip.show()
else:
self.__parent.tooltip.hide()
if self.__current is not None and self.__current not in self.__special_dialogs:
current = self.__register[self.__current]
current_was_active = current.is_active()
current.hide()
if current_was_active and once:
self.__current = None
return
self.__register[dialog].show_all()
self.__current = dialog
if dialog == "preferences":
self.__register[dialog].deiconify()
def hide(self):
"""Hide the currently visible dialog.
"""
if self.__current is not None:
self.__register[self.__current].hide()
self.__current = None
def is_visible(self, dialog):
"""Return True if the specified dialog is visible, False otherwise.
"""
assert dialog in self.__register, "Dialog '%s' must be registered" % dialog
return self.__register[dialog].is_active()
class BaseDialog:
"""Base class for dialogs. Sets and updates the icon and hides
the dialog instead of letting it being destroyed.
"""
def __init__(self, parent):
self.__parent = parent
if "logo" in parent.meta:
self.update_logo_icon()
parent.connect_size_changed(self.update_logo_icon)
elif "theme" in parent.meta:
self.update_theme_icon()
parent.connect_size_changed(self.update_theme_icon)
# Connect some signals to be able to hide the window
self.connect("response", self.response_event)
self.connect("delete_event", self.delete_event)
def delete_event(self, widget, event):
return True
def response_event(self, widget, response):
if response < 0:
self.hide()
def update_logo_icon(self):
"""Update the logo to be of the same height as the panel.
"""
size = self.__parent.get_size()
self.set_icon(gtk.gdk.pixbuf_new_from_file_at_size( \
self.__parent.meta["logo"], size, size))
def update_theme_icon(self):
"""Updates the logo to be of the same height as the panel.
"""
self.set_icon(self.__parent.get_icon() \
.get_icon_at_size(self.__parent.get_size()))
class AboutDialog(BaseDialog, gtk.AboutDialog):
"""Applet's About dialog.
"""
def __init__(self, parent):
gtk.AboutDialog.__init__(self)
Dialogs.BaseDialog.__init__(self, parent)
self.__parent = parent
self.set_name(parent.meta["name"])
if "version" in parent.meta:
self.set_version(parent.meta["version"])
if "description" in parent.meta:
self.set_comments(parent.meta["description"])
copyright_info = (parent.meta["copyright-year"], parent.meta["author"])
self.set_copyright("Copyright \xc2\xa9 %s %s" % copyright_info)
if "authors" in parent.meta:
self.set_authors(parent.meta["authors"])
if "artists" in parent.meta:
self.set_artists(parent.meta["artists"])
if "logo" in parent.meta:
self.set_logo(gtk.gdk.pixbuf_new_from_file_at_size( \
parent.meta["logo"], 48, 48))
elif "theme" in parent.meta:
# It is assumed that the C{awn.Icons}
# object has been set via set_awn_icon() in C{Icon}
self.set_logo(parent.get_icon().get_icon_at_size(48))
class PreferencesDialog(BaseDialog, gtk.Dialog):
"""A Dialog window that has the title "<applet's name> Preferences",
uses the applet's logo as its icon and has a Close button.
"""
def __init__(self, parent):
gtk.Dialog.__init__(self, flags=gtk.DIALOG_NO_SEPARATOR)
Dialogs.BaseDialog.__init__(self, parent)
self.__parent = parent
self.set_resizable(False)
self.set_border_width(5)
self.set_title(parent.meta["name"] + " Preferences")
self.add_button(gtk.STOCK_CLOSE, gtk.RESPONSE_CLOSE)
class Tooltip:
def __init__(self, parent):
"""Create a new Tooltip object.
@param parent: The parent applet of the tooltip instance.
@type parent: L{Applet}
"""
self.__parent = parent
self.__tooltip = parent.get_icon().get_tooltip()
self.set(parent.meta["name"])
self.disable_toggle_on_click()
if parent.meta.has_option("no-tooltip"):
self.__tooltip.props.smart_behavior = False
def disable_toggle_on_click(self):
self.__tooltip.props.toggle_on_click = False
def is_visible(self):
return (self.__tooltip.flags() & gtk.VISIBLE) != 0
def show(self):
"""Show the applet tooltip.
"""
self.__tooltip.show()
def hide(self):
"""Hide the applet tooltip.
"""
self.__tooltip.hide()
def set(self, text):
"""Set the applet tooltip.
@param text: The new tooltip text. Defaults to "".
@type text: C{string}
"""
self.__parent.set_tooltip_text(text)
def connect_becomes_visible(self, callback):
assert callable(callback)
self.__tooltip.connect("map-event", lambda w, e: callback())
class Icon:
APPLET_SIZE = "applet-size"
def __init__(self, parent):
"""Create a new Icon object.
@param parent: The parent applet of the icon instance.
@type parent: L{Applet}
"""
self.__parent = parent
self.__previous_context = None
# Set the themed icon to set the C{awn.Icons} object
if "theme" in parent.meta:
# TODO does not handle multiple icons yet
self.theme(parent.meta["theme"])
def file(self, file, set=True, size=None):
"""Get an icon from a file location.
@param file: The path to the file. Can be relative or absolute.
@type file: C{string}
@param set: Whether to also set the icon. True by default.
@type set: C{bool}
@param size: Width and height of icon.
@type size: C{int}
@return: The resultant pixbuf or None (if C{set} is C{True})
@rtype: C{gtk.gdk.Pixbuf} or C{None}
"""
if file[0] != "/":
file = os.path.join(os.path.abspath(os.path.dirname(___file___)), file)
if size is None:
icon = gtk.gdk.pixbuf_new_from_file(file)
else:
if size is self.__class__.APPLET_SIZE:
size = self.__parent.get_size()
icon = gtk.gdk.pixbuf_new_from_file_at_size(file, size, size)
if set:
self.set(icon)
else:
return icon
def theme(self, name):
"""Set an icon from the default icon theme. The resultant
pixbuf will be returned.
@param name: The name of the theme icon.
@type name: C{string}
@return: The resultant pixbuf
@rtype: C{gtk.gdk.Pixbuf}
"""
return self.__parent.set_icon_name(name)
def set(self, icon):
"""Set a C{gtk.gdk.pixbuf} or C{cairo.Context} as your applet icon.
@param icon: The icon to set your applet icon to.
@type icon: C{gtk.gdk.Pixbuf} or C{cairo.Context}
"""
if isinstance(icon, cairo.Context):
self.__parent.set_icon_context(icon)
if self.__previous_context != icon:
del self.__previous_context
self.__previous_context = icon
else:
self.__parent.set_icon_pixbuf(icon)
def hide(self):
"""Hide the applet's icon.
"""
self.__parent.hide()
class Theme:
def __init__(self, parent):
"""Create a new Theme object.
@param parent: The parent applet of the theme instance.
@type parent: L{Applet}
"""
self.__parent = parent
self.__states = None
self.__icon_state = None
def set_states(self, states_icons):
self.__states, icons = zip(*states_icons.items())
self.__icon_state = None
self.__parent.set_icon_info(self.__states, icons)
def icon(self, state):
if self.__states is None or state not in self.__states:
raise RuntimeError("invalid state")
if state != self.__icon_state:
self.__icon_state = state
self.__parent.set_icon_state(state)
def theme(self, theme):
self.__parent.get_icon().override_gtk_theme(theme)
class Errors:
def __init__(self, parent):
"""Create a new Modules object.
@param parent: The parent applet of the icon instance.
@type parent: L{Applet}
"""
self.__parent = parent
def module(self, scope, name):
"""Tell the user that they need to install a module to use your applet.
This function will attempts to import the module, and if this is not
possible, alert the user. Otherwise, it will call your callback with
the module as the first (and only) argument
@param scope: The dictionary that contains the globals to
import the module into
@type scope: C{dict}
@param name: the name of the module that must be installed.
@type name: C{string}
"""
try:
""" Don't add the module to globals[name], otherwise
awn.check_dependencies() won't show an error dialog. """
scope[name] = __import__(name, scope)
except ImportError:
self.__parent.icon.theme("dialog-error")
self.__parent.tooltip.set("Python module %s not found" % name)
awn.check_dependencies(scope, name)
def set_error_icon_and_click_to_restart(self):
self.__parent.icon.theme("dialog-error")
def crash_applet(widget=None, event=None):
gtk.main_quit()
self.__parent.connect("clicked", crash_applet)
def general(self, error, callback=None, traceback=None):
"""Tell the user that an error has occured.
@param error: the error itself.
@type error: C{string} or C{Exception}
@param callback: The function called when the user closes the dialog
@type callback: C{function}
@param traceback: Formatted traceback, can be copied to clipboard
via button in dialog.
@type traceback: C{str}
"""
assert isinstance(error, Exception) or type(error) in (str, tuple)
if traceback is not None:
traceback = "".join(traceback)[:-1]
args = {"message": "", "url": None}
if isinstance(error, Exception):
error_type = type(error).__name__
error = str(error)
if traceback is not None:
print "\n".join(["-"*80, traceback, "-"*80])
summary = "%s in %s: %s" % (error_type, self.__parent.meta["name"], error)
if self.__parent.meta["version"] == __version__:
args["message"] = "Visit Launchpad and report the bug by following these steps:\n\n" \
+ "1) Paste the error summary text in the 'summary' field\n" \
+ "2) Press Continue and then check whether the bug has already been reported or not\n" \
+ "3) If you continue and report the bug, paste the following in the big textarea:\n" \
+ " - the traceback\n" \
+ " - applet version: '%s'\n" % self.__parent.meta["version"] \
+ " - other info requested by the guidelines found below the big textarea"
args["url"] = bug_report_link
else:
args["message"] = "Report this bug at the bug tracker of the %s applet." % self.__parent.meta["name"]
if "bug-report-url" in self.__parent.meta:
args["url"] = self.__parent.meta["bug-report-url"]
else:
error_type = "Error"
if isinstance(error, tuple):
args["message"] = error[1]
error = error[0]
dialog = self.ErrorDialog(self.__parent, error_type, error, **args)
if traceback is not None:
copy_traceback_button = gtk.Button("Copy traceback to clipboard")
copy_traceback_button.set_image(gtk.image_new_from_stock(gtk.STOCK_COPY, gtk.ICON_SIZE_MENU))
dialog.hbox.pack_start(copy_traceback_button, expand=False)
copy_summary_button = gtk.Button("Copy summary to clipboard")
copy_summary_button.set_image(gtk.image_new_from_stock(gtk.STOCK_COPY, gtk.ICON_SIZE_MENU))
dialog.hbox.pack_start(copy_summary_button, expand=False)
dialog.hbox.reorder_child(copy_traceback_button, 0)
dialog.hbox.reorder_child(copy_summary_button, 0)
def clicked_cb(widget, text):
clipboard = gtk.clipboard_get()
clipboard.set_text(text)
clipboard.store()
copy_traceback_button.connect("clicked", clicked_cb, traceback)
copy_summary_button.connect("clicked", clicked_cb, summary)
if callable(callback):
def response_cb(widget, response):
if response < 0:
callback()
dialog.connect("response", response_cb)
dialog.show_all()
class ErrorDialog(Dialogs.BaseDialog, gtk.MessageDialog):
"""A MessageDialog window that shows an error.
"""
def __init__(self, parent, error_type, title, message="", url=None):
gtk.MessageDialog.__init__(self, type=gtk.MESSAGE_ERROR, message_format=title)
Dialogs.BaseDialog.__init__(self, parent)
self.__parent = parent
self.set_skip_taskbar_hint(False)
self.set_title("%s in %s" % (error_type, parent.meta["name"]))
self.hbox = gtk.HBox(spacing=6)
self.action_area.add(self.hbox)
close_button = gtk.Button(stock=gtk.STOCK_CLOSE)
close_button.connect("clicked", lambda w: self.response(gtk.RESPONSE_CLOSE))
self.hbox.add(close_button)
if len(message) > 0:
self.format_secondary_markup(message)
if url is not None:
alignment = gtk.Alignment(xalign=0.5, xscale=0.0)
alignment.add(gtk.LinkButton(url))
self.vbox.pack_start(alignment, expand=False)
class Settings:
__setting_types = (bool, int, long, float, str, list, Color)
def __init__(self, parent):
"""Create a new Settings object. This object
can be used as a dictionary to retrieve and set values of
configuration keys. More importantly, this object provides
the methods get_binder() and load_bindings(), which should
be used to bind keys to their corresponding Gtk+ widgets,
and to make the keys available as GObject properties.
@param parent: The parent applet of the settings instance.
@type parent: L{Applet}
"""
type_parent = type(parent)
if type_parent in (Applet, config.Client):
self.__folder = config.GROUP_DEFAULT
elif type_parent is str:
self.__folder = parent
parent = None
self.__client = self.ConfigClient(self.__folder, parent)
def get_binder(self, builder):
"""Return an object that can be used to bind keys to their
corresponding Gtk+ widgets, which are to be retrieved
via the given C{gtk.Builder} instance.
@param key: Instance of C{gtk.Builder}, used to retrieve Gtk+ widgets
@type key: C{gtk.Builder}
@return: An object that provides the method bind() to bind keys
@rtype: C{object}
"""
return self.__client.get_config_binder(builder)
def load_bindings(self, object):
"""Load the bindings by creating a C{gobject.GObject} from the
descriptions given by the given binder object. This object
should be an object that was returned by get_binder(). The
"props" value (instance of C{gobject.GProps}) of the GObject will
be returned.
@param key: An object returned by get_binder()
@type key: C{object}
@return: The "props" value of the created GObject
@rtype: C{gobject.GProps}
"""
return self.__client.load_bindings(object)
def __getitem__(self, key):
"""Get a key from the currect directory.
@param key: A relative path to the correct key
@type key: C{string}
@return: The value of the key
@rtype: C{object}
"""
value = self.__client.get(key)
if type(value) is str and value[:9] == "!pickle;\n":
value = cpickle.loads(value[9:])
return value
def __setitem__(self, key, value):
"""Set or create a key from the currect directory.
@param key: A relative path to the correct key
@type key: C{string}
"""
unpickled_value = value
if type(value) not in self.__setting_types:
value = "!pickle;\n%s" % cpickle.dumps(value)
elif type(value) is long:
value = int(value)
self.__client.set(key, value)
def __contains__(self, key):
"""Test if a key exists in the current directory.
@param key: A relative path to the correct key
@type key: C{string}
"""
return self.__client.contains(key)
class ConfigClient:
def __init__(self, folder, client=None):
"""Create a new config client.
If the client is an C{Applet}, config instances will
automatically be removed if the applet is deleted.
@param folder: Folder to start with.
@type folder: C{string}
@param client: Applet used to construct a corresponding
config.Client or a preconstructed config.Client
@type client: C{None,Applet,config.Client}
"""
self.__config_object = None
type_client = type(client)
if client is None:
self.__client = awn.config_get_default(awn.PANEL_ID_DEFAULT)
elif type_client is Applet:
self.__client = awn.config_get_default_for_applet(client)
def applet_deleted_cb(applet):
self.__client.remove_instance()
client.connect("applet-deleted", applet_deleted_cb)
elif type_client is config.Client:
self.__client = client
else:
raise RuntimeError("Parameter 'client' must be None, an Applet, or a config.Client")
self.__folder = folder
def get_config_binder(self, builder):
if not isinstance(builder, gtk.Builder):
raise RuntimeError("Builder must be an instance of gtk.Builder")
return configbinder.get_config_binder(self.__client, self.__folder, builder)
def load_bindings(self, binder):
if self.__config_object is not None:
raise RuntimeError("Configuration object already set")
self.__config_object = binder.create_gobject()
return self.__config_object.props
def set(self, key, value):
"""Set an existing key's value.
@param key: The name of the key, relative to the current folder.
@type key: C{string}
@param value: The value to set the key to.
@type value: C{bool}, C{int}, C{float}, or C{string}
"""
try:
self.__config_object.set_property(key, value)
except:
try:
self.__client.set_value(self.__folder, key, value)
except:
raise ValueError("Could not set new value of '%s'" % key)
def get(self, key):
"""Get an existing key's value.
@param key: The name of the key, relative to the current folder.
@type key: C{string}
@return: The value of the key
@rtype: C{object}
"""
try:
return self.__config_object.get_property(key)
except:
try:
return self.__client.get_value(self.__folder, key)
except:
raise ValueError("'%s' does not exist" % key)
def contains(self, key):
"""Test if the key maps to a value.
@param key: The name of the key, relative to the current folder.
@type key: C{string}
@return: True if the key maps to a value, False otherwise
@rtype: C{bool}
"""
r = False
if self.__config_object is not None:
r = key in gobject.list_properties(self.__config_object)
if r:
return r
try:
self.__client.get_value(self.__folder, key)
except Exception, e:
if str(e).split(":", 1)[0] == "Could not find the key specified":
return False
return True
class Keyring:
def __init__(self, parent=None):
"""Create a new Keyring object. This includes importing the keyring
module and connecting to the daemon.
@param parent: The parent applet of the keyring instance.
@type parent: L{Applet}
"""
if parent is not None:
self.__parent = parent
self.__parent.errors.module(globals(), "gnomekeyring")
else:
awn.check_dependencies(globals(), "gnomekeyring")
if not gnomekeyring.is_available():
raise KeyRingError("Keyring not available")
keyring_list = gnomekeyring.list_keyring_names_sync()
if len(keyring_list) == 0:
raise KeyRingError("No keyrings available")
try:
gnomekeyring.get_default_keyring_sync()
except gnomekeyring.NoKeyringDaemonError:
raise KeyRingError("Had trouble connecting to daemon")
def new(self, name=None, pwd=None, attrs={}, type="generic"):
"""Create a new keyring key.
@param name: The display name of the key. If omitted, an empty key is
returned.
@type name: C{string}
@param pwd: The password stored in the key. If omitted, empty key is
returned.
@type pwd: C{string}
@param attrs: Other attributes stored in the key. By default: {}
@type attrs: C{dict}
@param type: The type of key. By default: "generic"
@type type: C{string}; "generic", "network", or "note"
@return: A new L{Key} object
@rtype: L{Key}
"""
k = self.Key()
if name and pwd:
k.set(name, pwd, attrs, type)
return k
def from_token(self, token):
"""Load the key with the given token.
@param token: The password token of the key
@type token: C{int} or C{long}
@return: A new L{Key} object
@rtype: L{Key}
"""
k = self.Key()
k.token = token
return k
class Key(object):
def __init__(self, token=0):
"""Create a new key.
@param keyring: The keyring module.
@type keyring: C{module}
@param token: The token of an already-existing key. Optional.
@type token: C{long}
"""
self.token = token
def set(self, name, pwd, attrs={}, type="generic"):
"""Create a new keyring key. Note that if another key
exists with the same name, it will be overwritten.
@param name: The display name of the key.
@type name: C{string}
@param pwd: The password stored in the key.
@type pwd: C{string}
@param attrs: Other attributes stored in the key. By default: {}
@type attrs: C{dict}
@param type: The type of key. By default: "generic"
@type type: C{string}; "generic", "network", or "note"
"""
if type == "network":
type = gnomekeyring.ITEM_NETWORK_PASSWORD
elif type == "note":
type = gnomekeyring.ITEM_NOTE
else: # Generic included
type = gnomekeyring.ITEM_GENERIC_SECRET
self.token = gnomekeyring.item_create_sync(None, type, name, \
attrs, pwd, True)
def delete(self):
"""Delete the current key. Will also reset the token. Note that
"del [Key]" will not delete the key itself; that would be too
destructive. delete() MUST be called manually.
"""
gnomekeyring.item_delete_sync(None, self.token)
self.token = 0
def __get(self):
return gnomekeyring.item_get_info_sync(None, self.token)
def __getAttrs(self):
return gnomekeyring.item_get_attributes_sync(None, self.token)
def __setAttrs(self, a):
return gnomekeyring.item_set_attributes_sync(None, self.token, a)
def __getName(self):
return self.__get().get_display_name()
def __setName(self, name):
self.__get().set_display_name(name)
def __getPass(self):
return self.__get().get_secret()
def __setPass(self, passwd):
self.__get().set_secret(passwd)
attrs = property(__getAttrs, __setAttrs)
"""
@ivar: The other attributes stored in the Key. Can be used like any
property.
"""
name = property(__getName, __setName)
"""
@ivar: The display name of the Key. Can be used like any property
"""
password = property(__getPass, __setPass)
"""
@ivar: The password stored in the Key. Can be used like any property.
"""
class Timing:
"""Provides utilities to register a function to be called periodically
or once after a specified delay.
"""
def __init__(self, parent):
"""Create a new Timing object.
@param parent: The parent applet of the timing instance.
@type parent: L{Applet}
"""
self.__parent = parent
def register(self, callback, seconds, start=True):
"""Register a function to be called periodically.
@param callback: Function to be called.
@type callback: C{function}
@param seconds: Number of seconds within each call.
@type seconds: C{float} or C{int}
@param start: Whether to start the callback automatically
@type start: C{bool}
@return: A L{Callback} object for the C{callback} parameter
@rtype: L{Callback}
"""
def callback_wrapper():
callback()
return True
cb = self.Callback(callback_wrapper, seconds)
if start:
cb.start()
return cb
def delay(self, callback, seconds, start=True):
"""Delay the execution of the given callback.
@param callback: Function
@type callback: C{function}
@param seconds: Number of seconds to delay function call
@type seconds: C{float} or C{int}
@return: A L{Callback} object for the C{callback} parameter
@rtype: L{Callback}
"""
def callback_wrapper():
callback()
return False
cb = self.Callback(callback_wrapper, seconds)
if start:
cb.start()
return cb
class Callback:
"""Wrapper around a callback function to provide ways to start and
stop the function, to change the interval or to test if the callback
is scheduled to run.
"""
def __init__(self, callback, seconds):
"""Create a new C{Callback} object.
@param callback: The function to wrap the Callback around.
@type callback: C{function}
@param seconds: Number of seconds within each call.
@type seconds: C{float} or C{int}
"""
assert seconds > 0.0
self.__callback = callback
self.__seconds = seconds
self.__timer_id = None
def is_started(self):
"""Return True if the callback has been scheduled to run after
each interval, False if the callback is stopped.
@return: True if the callback has been scheduled, False otherwise
@rtype: L{bool}
"""
return self.__timer_id is not None
def start(self):
"""Start executing the callback periodically.
@return: True if the callback was started, False otherwise
@rtype: L{bool}
"""
if self.__timer_id is not None:
return False
if int(self.__seconds) == self.__seconds:
self.__timer_id = gobject.timeout_add_seconds(int(self.__seconds), self.__callback)
else:
self.__timer_id = gobject.timeout_add(int(self.__seconds * 1000), self.__callback)
return True
def stop(self):
"""Stop the callback from running again if it was scheduled
to run.
@return: True if the callback was stopped, False otherwise
@rtype: L{bool}
"""
if self.__timer_id is None:
return False
gobject.source_remove(self.__timer_id)
self.__timer_id = None
return True
def change_interval(self, seconds):
"""Change the interval and restart the callback if it was scheduled
to run.
@param seconds: Number of seconds within each call.
@type seconds: C{float} or C{int}
"""
assert seconds > 0.0
self.__seconds = seconds
# Restart if the callback was scheduled to run
if self.stop():
self.start()
class Notify:
def __init__(self, parent):
"""Create a new Notify object.
@param parent: The parent applet of the notify instance.
@type parent: L{Applet}
"""
self.__parent = parent
awn.check_dependencies(globals(), "pynotify")
pynotify.init(parent.meta["short"])
def __del__(self):
pynotify.uninit()
def send(self, *args, **kwargs):
"""Show a new notification via libnotify.
@param subject: The subject of your message. If blank, "Message from
[applet name]" is used.
@type subject: C{string}
@param body: The main body of your message. Blank by default.
@type body: C{string}
@param icon: The full absolute path to the name of the icon to use.
@type icon: C{string}
@param timeout: Timeout in seconds after which the message closes
@type timeout: C{int}
"""
notification = self.Notification(self.__parent, *args, **kwargs)
notification.show()
def create_notification(self, *args, **kwargs):
"""Return a notification that can be shown via show().
@param subject: The subject of your message. If blank, "Message from
[applet name]" is used.
@type subject: C{string}
@param body: The main body of your message. Blank by default.
@type body: C{string}
@param icon: The full absolute path to the name of the icon to use.
@type icon: C{string}
@param timeout: Timeout in seconds after which the message closes
@type timeout: C{int}
@return: a notification object
@rtype: C{self.Notification}
"""
return self.Notification(self.__parent, *args, **kwargs)
class Notification:
"""An object that manages a libnotify notification.
"""
def __init__(self, parent, subject=None, body="", icon="", timeout=0):
if subject is None:
subject = '"Message From %s"' % parent.meta["name"]
self.__notification = pynotify.Notification(subject, body, icon)
if timeout > 0:
self.__notification.set_timeout(timeout * 1000)
def show(self):
self.__notification.show()
class Effects:
def __init__(self, parent):
"""Create a new Effects object.
@param parent: The parent applet of the effects instance.
@type parent: L{Applet}
"""
self.__effects = parent.get_icon().get_effects()
def attention(self):
"""Launch the notify effect.
Should be used when the user's attention is required.
"""
self.__effects.start("attention")
def launch(self):
"""Launch the launch effect.
Should be used when launching another program.
"""
self.__effects.start("launching")
class Meta:
def __init__(self, parent, info={}, options=()):
"""Create a new Meta object.
@param parent: The parent applet of the meta instance.
@type parent: L{Applet}
@param info: Values for the meta dictionary
@type info: C{dict}
@param options: Options to set. Format:
(option", "option", ("option": True|False), ("option":
("suboption", "suboption", ("suboption": True|False), ...)))
"""
assert "name" in info
self.__parent = parent
self.__info = info
self.__options = {}
self.options(options)
def update(self, info):
"""Update the meta instance with new information.
@param info: Updated values for the meta dictionary
@type info: C{dict}
"""
self.__info.update(info)
def options(self, opts):
"""Update the options the applet has set
@param opts: Options to set
@type opts: C{list} or C{tuple}
"""
self.__options.update(self.__parse_options(opts))
def has_option(self, option):
"""Check if the applet has set a specific option.
@param option: Option to check. Format: "option/suboption/suboption"
@type option: C{str}
"""
option = option.split("/")
srch = self.__options
for i in option:
if i not in srch or not srch[i]:
return False
elif srch[i] == True: # tuples evaluate to True
return True
else:
srch = srch[i]
return True
def __parse_options(self, options):
t = {}
for i in options:
if type(i) is str:
t[i] = True
elif type(i) in (tuple, list):
if type(i[1]) is bool:
t[i[0]] = i[1]
elif type(i[1]) in (tuple, list):
t[i[0]] = f(i[1])
return t
def __getitem__(self, key):
"""Get a key from the dictionary.
@param key: The key
@type key: C{string}
"""
return self.__info[key]
def __setitem__(self, key, value):
"""Set a key in the dictionary.
@param key: The key
@type key: C{string}
@param value: The value
@type value: C{string}
"""
self.__info[key] = value
def __delitem__(self, key):
"""Delete a key from the dictionary.
@param key: The key
@type key: C{string}
"""
del self.__info[key]
def keys(self):
"""Return a list of keys from the dictionary.
"""
return self.__info.keys()
def __contains__(self, key):
"""Return True if the dictionary contains the key, False otherwise.
@param key: The key
@type key: C{string}
"""
return key in self.__info
class Applet(awn.AppletSimple, object):
def __init__(self, uid, panel_id, meta={}, options=[]):
"""Create a new instance of the Applet object.
@param uid: The unique identifier of the applet
@type uid: C{string}
@param orient: The orientation of the applet. 0 means that the AWN bar
is on the bottom of the screen.
@type orient: C{int}
@param height: The height of the applet.
@type height: C{int}
@param meta: The meta information to be passed to the Meta constructor
@type meta: C{dict}
"""
awn.AppletSimple.__init__(self, meta["short"], uid, panel_id)
self.uid = uid
# Create all required child-objects, others will be lazy-loaded
self.meta = Meta(self, meta, options)
self.icon = Icon(self)
self.tooltip = Tooltip(self)
# Dialogs depends on settings
self.dialog = Dialogs(self)
def connect_size_changed(self, callback):
self.connect("size-changed", lambda w, e: callback())
def __getmodule(module):
"""Return a getter that lazy-loads a module, represented by a
single instantiated class.
@param module: The class of the module to initialize and get
@type module: C{class}
"""
instance = {}
def getter(self):
if module not in instance:
instance[module] = module(self)
return instance[module]
return property(getter)
settings = __getmodule(Settings)
theme = __getmodule(Theme)
timing = __getmodule(Timing)
errors = __getmodule(Errors)
keyring = __getmodule(Keyring)
notify = __getmodule(Notify)
effects = __getmodule(Effects)
def init_start(applet_class, meta={}, options=[]):
"""Do the work to create a new applet, and then start the applet.
This makes the icon appear on the bar and starts GTK+.
The callable applet_class parameter is called and given an instance of
C{Applet}. It can then set an icon, tooltip, dialogs, and other things,
before GTK+ starts, which makes the icon appear on the AWN panel.
@param applet_class A callable, used to do some initialization
@type applet_class: C{callable}
@param meta: The meta-information to pass to the constructor
@type meta: C{dict}
@param options: Options to set for the new applet
@type options: C{list} or C{tuple}
@return: The newly created applet.
@rtype: L{Applet}
"""
assert callable(applet_class)
gobject.threads_init()
awn.init(sys.argv[1:])
applet = Applet(awn.uid, awn.panel_id, meta, options)
try:
applet_class(applet)
except Exception, e:
applet.errors.set_error_icon_and_click_to_restart()
import traceback
traceback = traceback.format_exception(type(e), e, sys.exc_traceback)
applet.errors.general(e, traceback=traceback, callback=gtk.main_quit)
awn.embed_applet(applet)
gtk.main()
|
gilir/awn-extras-debian
|
shared/python/awnlib.py
|
Python
|
gpl-3.0
| 47,971
|
[
"VisIt"
] |
3b06477e669cd450240c0572b1c0ac1b6a43ede672fe10984fe018643b3e75b7
|
#!/usr/bin/env python3
"""
Copyright 2020 Paul Willworth <[email protected]>
This file is part of Galaxy Harvester.
Galaxy Harvester is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Galaxy Harvester is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with Galaxy Harvester. If not, see <http://www.gnu.org/licenses/>.
"""
import os
import sys
import cgi
from http import cookies
import dbSession
import pymysql
import ghShared
import ghNames
import ghObjectRecipe
import ghLists
import dbShared
import resource
import schematics
from jinja2 import Environment, FileSystemLoader
def calcIngredientQuality(conn, spawnID, schematicID, user):
# Calculate the quality of ingredient resource for schematic
resQuality = None
if spawnID != None:
qualityData = schematics.getQualityData(conn, schematicID)
qualityResSum = 0
qualityResCount = 0.0
spawn = resource.getResource(conn, 1, user, spawnID, None, None)
for prop in qualityData:
for item in prop:
if getattr(spawn.stats, item[0]) != None:
qualityResCount += item[1]
qualityResSum += getattr(spawn.stats, item[0])*item[1]
if qualityResCount > 0:
resQuality = qualityResSum/qualityResCount
else:
resQuality = None
return resQuality
def main():
# Get current url
try:
url = os.environ['SCRIPT_NAME']
except KeyError:
url = ''
form = cgi.FieldStorage()
uiTheme = ''
# Get Cookies
useCookies = 1
C = cookies.SimpleCookie()
try:
C.load(os.environ['HTTP_COOKIE'])
except KeyError:
useCookies = 0
if useCookies:
try:
currentUser = C['userID'].value
except KeyError:
currentUser = ''
try:
loginResult = C['loginAttempt'].value
except KeyError:
loginResult = 'success'
try:
sid = C['gh_sid'].value
except KeyError:
sid = form.getfirst('gh_sid', '')
try:
uiTheme = C['uiTheme'].value
except KeyError:
uiTheme = ''
try:
galaxy = C['galaxy'].value
except KeyError:
galaxy = form.getfirst('galaxy', ghShared.DEFAULT_GALAXY)
else:
currentUser = ''
loginResult = form.getfirst('loginAttempt', '')
sid = form.getfirst('gh_sid', '')
galaxy = form.getfirst('galaxy', ghShared.DEFAULT_GALAXY)
# escape input to prevent sql injection
sid = dbShared.dbInsertSafe(sid)
# Get a session
logged_state = 0
linkappend = ''
disableStr = ''
if loginResult == None:
loginResult = 'success'
sess = dbSession.getSession(sid)
if (sess != ''):
logged_state = 1
currentUser = sess
if (uiTheme == ''):
uiTheme = dbShared.getUserAttr(currentUser, 'themeName')
if (useCookies == 0):
linkappend = 'gh_sid=' + sid
else:
disableStr = ' disabled="disabled"'
if (uiTheme == ''):
uiTheme = 'crafter'
# Get recipe id from path
path = []
if 'PATH_INFO' in os.environ:
path = os.environ['PATH_INFO'].split('/')[1:]
path = [p for p in path if p != '']
recipeHTML = ''
slotHTML = ''
schemImageName = ''
schematicDetailsHTML = ''
ingTypes = ''
ingGroups = ''
pageType = 'recipe'
if len(path) > 0:
recipeID = dbShared.dbInsertSafe(path[0])
url = url + '/' + recipeID
r = ghObjectRecipe.schematicRecipe()
if logged_state == 1:
# Look up recipe info
try:
conn = dbShared.ghConn()
cursor = conn.cursor()
except Exception:
recipeHTML = "Error: could not connect to database"
if (cursor and recipeID.isdigit()):
cursor.execute('SELECT recipeID, userID, tRecipe.schematicID, recipeName, (SELECT imageName FROM tSchematicImages si WHERE si.schematicID=tRecipe.schematicID AND si.imageType=1) AS schemImage, schematicName, complexity FROM tRecipe INNER JOIN tSchematic ON tRecipe.schematicID = tSchematic.schematicID WHERE recipeID=' + recipeID + ';')
row = cursor.fetchone()
if (row != None):
if row[1] == currentUser:
# main recipe data
if (row[4] != None):
schemImageName = row[4]
else:
schemImageName = 'none.jpg'
r.recipeID = row[0]
r.schematicID = row[2]
r.recipeName = row[3]
r.schematicImage = schemImageName
# schematic quality data
schematicDetailsHTML = '<div><a href="' + ghShared.BASE_SCRIPT_URL + 'schematics.py/' + row[2] + '" title="Go to schematic page.">' + row[5] + '</a></div>'
schematicDetailsHTML += '<div>Complexity: ' + str(row[6]) + '</div>'
expGroup = ''
expProp = ''
schematicDetailsHTML += '<td valign="top"><h3>Qualities</h3><ul id="qualitiesList" style="margin-top:6px;">'
expCursor = conn.cursor()
expCursor.execute('SELECT tSchematicQualities.expQualityID, expProperty, expGroup, statName, statWeight, weightTotal FROM tSchematicQualities INNER JOIN tSchematicResWeights ON tSchematicQualities.expQualityID = tSchematicResWeights.expQualityID WHERE schematicID="' + r.schematicID + '" ORDER BY expGroup, expProperty, statName;')
expRow = expCursor.fetchone()
while (expRow != None):
if (expGroup != expRow[2]):
tmpName = expRow[2].replace('_',' ')
schematicDetailsHTML = schematicDetailsHTML + '<li class="groupText">' + tmpName + '</li>'
expGroup = expRow[2]
if (expProp != expRow[1]):
tmpName = expRow[1].replace('_',' ')
schematicDetailsHTML = schematicDetailsHTML + '<li class="schemQualityProperty altText">' + tmpName + '</li>'
expProp = expRow[1]
schematicDetailsHTML += '<li class="schemQualityItem" tag="' + expRow[3] + ':' + str((expRow[4]*1.0/expRow[5])*100) + '"><span class="inlineBlock" style="width:100px;">' + ghNames.getStatName(expRow[3]) + (': </span><span>%.0f' % ((expRow[4]*1.0/expRow[5])*100)) + '%</span></li>'
expRow = expCursor.fetchone()
expCursor.close()
# Look up ingredient data
ri = None
sqlStr = 'SELECT si.ingredientName, ingredientResource, ingredientObject, ingredientQuantity, ingredientContribution, rt.containerType tcontainer, rg.containerType gcontainer, rt.resourceTypeName, rg.groupName, ingredientQuality FROM tSchematicIngredients si LEFT JOIN (SELECT ingredientName, ingredientResource, ingredientQuality FROM tRecipeIngredients WHERE recipeID=' + str(r.recipeID) + ') ri ON si.ingredientName = ri.ingredientName LEFT JOIN tResourceType rt ON si.ingredientObject = rt.resourceType LEFT JOIN tResourceGroup rg ON si.ingredientObject = rg.resourceGroup WHERE schematicID="' + r.schematicID + '" ORDER BY ingredientQuantity DESC, si.ingredientName'
ingCursor = conn.cursor()
ingCursor.execute(sqlStr)
ingRow = ingCursor.fetchone()
while (ingRow != None):
if ingRow[5] == None:
if ingRow[6] == None:
container = 'default'
objectName = ingRow[2].rpartition('/')[2].replace('_',' ')
if objectName[-4:] == '.iff':
objectName = objectName[:-4]
else:
ingGroups += '"' + ingRow[2] + '",'
container = ingRow[6]
objectName = ingRow[8]
else:
ingTypes += '"' + ingRow[2] + '",'
container = ingRow[5]
objectName = ingRow[7]
# get details of ingredient resource for schematic
resDetails = ''
if ingRow[1] != None and (ingRow[5] != None or ingRow[6] != None):
spawn = resource.getResource(conn, logged_state, currentUser, ingRow[1], None, None)
resDetails = 'Loaded with: ' + spawn.spawnName + ', ' + spawn.resourceTypeName + '<br />' + spawn.getStatList()
r.recipeIngredients.append(ghObjectRecipe.recipeIngredient(ingRow[2], ingRow[1], ingRow[0], ingRow[3], container, objectName, ingRow[9], resDetails))
ingRow = ingCursor.fetchone()
ingCursor.close()
if ingTypes != '':
ingTypes = ingTypes[:-1]
if ingGroups != '':
ingGroups = ingGroups[:-1]
slotHTML = r.getIngredientSlots()
else:
recipeHTML = "That is not your recipe."
else:
recipeHTML = "The recipe ID given could not be found."
cursor.close()
else:
# Render recipe home if any non number in sub path
pageType = 'home'
conn.close()
else:
recipeHTML = "You must be logged in to manage recipes."
else:
recipeHTML = 'You have not specified a recipe to edit, would you like to create a new one?<div style="float:right;"><button type=button value="New Recipe" class="ghButton" onclick="addRecipe();">New Recipe</button></div>'
pictureName = dbShared.getUserAttr(currentUser, 'pictureName')
print('Content-type: text/html\n')
env = Environment(loader=FileSystemLoader('templates'))
env.globals['BASE_SCRIPT_URL'] = ghShared.BASE_SCRIPT_URL
env.globals['MOBILE_PLATFORM'] = ghShared.getMobilePlatform(os.environ['HTTP_USER_AGENT'])
template = env.get_template('recipe.html')
print(template.render(uiTheme=uiTheme, loggedin=logged_state, currentUser=currentUser, loginResult=loginResult, linkappend=linkappend, url=url, pictureName=pictureName, imgNum=ghShared.imgNum, galaxyList=ghLists.getGalaxyList(), professionList=ghLists.getProfessionList(galaxy), recipeHTML=recipeHTML, slotHTML=slotHTML, schematicDetailsHTML=schematicDetailsHTML, ingTypes=ingTypes, ingGroups=ingGroups, pageType=pageType, recipeID=r.recipeID, recipeName=r.recipeName, schemImageName=schemImageName, enableCAPTCHA=ghShared.RECAPTCHA_ENABLED, siteidCAPTCHA=ghShared.RECAPTCHA_SITEID))
if __name__ == "__main__":
main()
|
pwillworth/galaxyharvester
|
html/recipe.py
|
Python
|
gpl-3.0
| 9,692
|
[
"Galaxy"
] |
70793a8d4187d63133d5eee97d26a7b4428bd210e3450fa6ac1b6f34dbea97d5
|
import torch
import torch.nn as nn
from mushroom_rl.utils.features import uniform_grid
from mushroom_rl.utils.torch import to_float_tensor, to_int_tensor
class GaussianRBFTensor(nn.Module):
"""
Pytorch module to implement a gaussian radial basis function.
"""
def __init__(self, mu, scale, dim, use_cuda):
"""
Constructor.
Args:
mu (np.ndarray): centers of the gaussian RBFs;
scale (np.ndarray): scales for the RBFs;
dim (np.ndarray): list of dimension to be considered for the computation of the features;
use_cuda (bool): whether to use cuda for the computation or not.
"""
self._mu = to_float_tensor(mu, use_cuda)
self._scale = to_float_tensor(scale, use_cuda)
if dim is not None:
self._dim = to_int_tensor(dim, use_cuda)
else:
self._dim = None
self._use_cuda = use_cuda
def forward(self, x):
if self._use_cuda:
x = x.cuda()
if self._dim is not None:
x = torch.index_select(x, 1, self._dim)
x = x.unsqueeze(1).repeat(1, self._mu.shape[0], 1)
delta = x - self._mu.repeat(x.shape[0], 1, 1)
return torch.exp(-torch.sum(delta**2 / self._scale, -1)).squeeze(-1)
@staticmethod
def generate(n_centers, low, high, dimensions=None, use_cuda=False):
"""
Factory method that generates the list of dictionaries to build the
tensors representing a set of uniformly spaced Gaussian radial basis
functions with a 25% overlap.
Args:
n_centers (list): list of the number of radial basis functions to be
used for each dimension;
low (np.ndarray): lowest value for each dimension;
high (np.ndarray): highest value for each dimension;
dimensions (list, None): list of the dimensions of the input to be
considered by the feature. The number of dimensions must match
the number of elements in ``n_centers`` and ``low``;
use_cuda (bool): whether to use cuda for the computation or not.
Returns:
The list of dictionaries as described above.
"""
n_features = len(low)
assert len(n_centers) == n_features
assert len(low) == len(high)
assert dimensions is None or n_features == len(dimensions)
mu, scale = uniform_grid(n_centers, low, high)
tensor_list = [GaussianRBFTensor(mu, scale, dimensions, use_cuda)]
return tensor_list
@property
def size(self):
return self._mu.shape[0]
|
carloderamo/mushroom
|
mushroom_rl/features/tensors/gaussian_tensor.py
|
Python
|
mit
| 2,669
|
[
"Gaussian"
] |
fd6dfe75a5204aa05357f37bca916fcf8b8cb6df8798daee72668b7f761d55ae
|
"""
Integration with native distribution package managers.
@since: 0.28
"""
# Copyright (C) 2009, Thomas Leonard
# See the README file for details, or visit http://0install.net.
from zeroinstall import _, logger
import os, platform, re, subprocess, sys
from zeroinstall.injector import namespaces, model, arch, qdom
from zeroinstall.support import basedir, portable_rename, intern
_dotted_ints = '[0-9]+(?:\.[0-9]+)*'
# This matches a version number that would be a valid Zero Install version without modification
_zeroinstall_regexp = '(?:%s)(?:-(?:pre|rc|post|)(?:%s))*' % (_dotted_ints, _dotted_ints)
# This matches the interesting bits of distribution version numbers
# (first matching group is for Java-style 6b17 or 7u9 syntax, or "major")
_version_regexp = '(?:[a-z])?({ints}[bu])?({zero})(-r{ints})?'.format(zero = _zeroinstall_regexp, ints = _dotted_ints)
_PYTHON_URI = 'http://repo.roscidus.com/python/python'
# We try to do updates atomically without locking, but we don't worry too much about
# duplicate entries or being a little out of sync with the on-disk copy.
class Cache(object):
def __init__(self, cache_leaf, source, format):
"""Maintain a cache file (e.g. ~/.cache/0install.net/injector/$name).
If the size or mtime of $source has changed, or the cache
format version if different, reset the cache first."""
self.cache_leaf = cache_leaf
self.source = source
self.format = format
self.cache_dir = basedir.save_cache_path(namespaces.config_site,
namespaces.config_prog)
self.cached_for = {} # Attributes of source when cache was created
try:
self._load_cache()
except Exception as ex:
logger.info(_("Failed to load cache (%s). Flushing..."), ex)
self.flush()
def flush(self):
# Wipe the cache
try:
info = os.stat(self.source)
mtime = int(info.st_mtime)
size = info.st_size
except Exception as ex:
logger.warning("Failed to stat %s: %s", self.source, ex)
mtime = size = 0
self.cache = {}
import tempfile
tmp = tempfile.NamedTemporaryFile(mode = 'wt', dir = self.cache_dir, delete = False)
tmp.write("mtime=%d\nsize=%d\nformat=%d\n\n" % (mtime, size, self.format))
tmp.close()
portable_rename(tmp.name, os.path.join(self.cache_dir, self.cache_leaf))
self._load_cache()
# Populate self.cache from our saved cache file.
# Throws an exception if the cache doesn't exist or has the wrong format.
def _load_cache(self):
self.cache = cache = {}
with open(os.path.join(self.cache_dir, self.cache_leaf)) as stream:
for line in stream:
line = line.strip()
if not line:
break
key, value = line.split('=', 1)
if key in ('mtime', 'size', 'format'):
self.cached_for[key] = int(value)
self._check_valid()
for line in stream:
key, value = line.split('=', 1)
cache[key] = value[:-1]
# Check the source file hasn't changed since we created the cache
def _check_valid(self):
info = os.stat(self.source)
if self.cached_for['mtime'] != int(info.st_mtime):
raise Exception("Modification time of %s has changed" % self.source)
if self.cached_for['size'] != info.st_size:
raise Exception("Size of %s has changed" % self.source)
if self.cached_for.get('format', None) != self.format:
raise Exception("Format of cache has changed")
def get(self, key):
try:
self._check_valid()
except Exception as ex:
logger.info(_("Cache needs to be refreshed: %s"), ex)
self.flush()
return None
else:
return self.cache.get(key, None)
def put(self, key, value):
cache_path = os.path.join(self.cache_dir, self.cache_leaf)
self.cache[key] = value
try:
with open(cache_path, 'a') as stream:
stream.write('%s=%s\n' % (key, value))
except Exception as ex:
logger.warning("Failed to write to cache %s: %s=%s: %s", cache_path, key, value, ex)
def try_cleanup_distro_version(version):
"""Try to turn a distribution version string into one readable by Zero Install.
We do this by stripping off anything we can't parse.
@return: the part we understood, or None if we couldn't parse anything
@rtype: str"""
if ':' in version:
version = version.split(':')[1] # Skip 'epoch'
version = version.replace('_', '-')
if '~' in version:
version, suffix = version.split('~', 1)
if suffix.startswith('pre'):
suffix = suffix[3:]
suffix = '-pre' + (try_cleanup_distro_version(suffix) or '')
else:
suffix = ''
match = re.match(_version_regexp, version)
if match:
major, version, revision = match.groups()
if major is not None:
version = major[:-1] + '.' + version
if revision is not None:
version = '%s-%s' % (version, revision[2:])
return version + suffix
return None
class Distribution(object):
"""Represents a distribution with which we can integrate.
Sub-classes should specialise this to integrate with the package managers of
particular distributions. This base class ignores the native package manager.
@since: 0.28
@ivar name: the default value for Implementation.distro_name for our implementations
@type name: str
"""
_packagekit = None
def get_package_info(self, package, factory):
"""Get information about the given package.
Add zero or more implementations using the factory (typically at most two
will be added; the currently installed version and the latest available).
@param package: package name (e.g. "gimp")
@type package: str
@param factory: function for creating new DistributionImplementation objects from IDs
@type factory: str -> L{model.DistributionImplementation}
"""
return
def get_score(self, distribution):
"""Indicate how closely the host distribution matches this one.
The <package-implementation> with the highest score is passed
to L{Distribution.get_package_info}. If several elements get
the same score, get_package_info is called for all of them.
@param distribution: a distribution name
@type distribution: str
@return: an integer, or -1 if there is no match at all
@rtype: int
"""
return 0
def get_feed(self, master_feed):
"""Generate a feed containing information about distribution packages.
This should immediately return a feed containing an implementation for the
package if it's already installed. Information about versions that could be
installed using the distribution's package manager can be added asynchronously
later (see L{fetch_candidates}).
@param master_feed: feed containing the <package-implementation> elements
@type master_feed: L{model.ZeroInstallFeed}
@rtype: L{model.ZeroInstallFeed}"""
feed = model.ZeroInstallFeed(None)
feed.url = 'distribution:' + master_feed.url
for item, item_attrs, depends in master_feed.get_package_impls(self):
package = item_attrs.get('package', None)
if package is None:
raise model.InvalidInterface(_("Missing 'package' attribute on %s") % item)
new_impls = []
def factory(id, only_if_missing = False, installed = True):
assert id.startswith('package:')
if id in feed.implementations:
if only_if_missing:
return None
logger.warning(_("Duplicate ID '%s' for DistributionImplementation"), id)
impl = model.DistributionImplementation(feed, id, self, item)
feed.implementations[id] = impl
new_impls.append(impl)
impl.installed = installed
impl.metadata = item_attrs
impl.requires = depends
if 'run' not in impl.commands:
item_main = item_attrs.get('main', None)
if item_main:
if item_main.startswith('/'):
impl.main = item_main
else:
raise model.InvalidInterface(_("'main' attribute must be absolute, but '%s' doesn't start with '/'!") %
item_main)
impl.upstream_stability = model.packaged
return impl
self.get_package_info(package, factory)
for impl in new_impls:
self.fixup(package, impl)
if impl.installed:
self.installed_fixup(impl)
if master_feed.url == _PYTHON_URI and os.name != "nt":
# Hack: we can support Python on platforms with unsupported package managers
# by adding the implementation of Python running us now to the list.
python_version = '.'.join([str(v) for v in sys.version_info if isinstance(v, int)])
impl_id = 'package:host:python:' + python_version
assert impl_id not in feed.implementations
impl = model.DistributionImplementation(feed, impl_id, self, distro_name = 'host')
impl.installed = True
impl.version = model.parse_version(python_version)
impl.main = sys.executable
impl.upstream_stability = model.packaged
impl.machine = host_machine # (hopefully)
feed.implementations[impl_id] = impl
elif master_feed.url == 'http://repo.roscidus.com/python/python-gobject' and os.name != "nt":
# Likewise, we know that there is a native python-gobject available for our Python
from zeroinstall import gobject
impl_id = 'package:host:python-gobject:' + '.'.join(str(x) for x in gobject.pygobject_version)
assert impl_id not in feed.implementations
impl = model.DistributionImplementation(feed, impl_id, self, distro_name = 'host')
impl.installed = True
impl.version = [list(gobject.pygobject_version)]
impl.upstream_stability = model.packaged
impl.machine = host_machine # (hopefully)
# Requires our version of Python too
restriction_element = qdom.Element(namespaces.XMLNS_IFACE, 'restricts', {'interface': _PYTHON_URI, 'distribution': 'host'})
impl.requires.append(model.process_depends(restriction_element, None))
feed.implementations[impl_id] = impl
return feed
def fetch_candidates(self, master_feed):
"""Collect information about versions we could install using
the distribution's package manager. On success, the distribution
feed in iface_cache is updated.
@return: a L{tasks.Blocker} if the task is in progress, or None if not"""
if self.packagekit.available:
package_names = [item.getAttribute("package") for item, item_attrs, depends in master_feed.get_package_impls(self)]
return self.packagekit.fetch_candidates(package_names)
@property
def packagekit(self):
"""For use by subclasses.
@rtype: L{packagekit.PackageKit}"""
if not self._packagekit:
from zeroinstall.injector import packagekit
self._packagekit = packagekit.PackageKit()
return self._packagekit
def fixup(self, package, impl):
"""Some packages require special handling (e.g. Java). This is called for each
package that was added by L{get_package_info} after it returns. The default
method does nothing.
@param package: the name of the package
@param impl: the constructed implementation"""
pass
def installed_fixup(self, impl):
"""Called when an installed package is added (after L{fixup}), or when installation
completes. This is useful to fix up the main value.
@type impl: L{DistributionImplementation}
@since: 1.11"""
pass
def get_score(self, distro_name):
return int(distro_name == self.name)
class WindowsDistribution(Distribution):
name = 'Windows'
def get_package_info(self, package, factory):
def _is_64bit_windows():
p = sys.platform
from win32process import IsWow64Process
if p == 'win64' or (p == 'win32' and IsWow64Process()): return True
elif p == 'win32': return False
else: raise Exception(_("WindowsDistribution may only be used on the Windows platform"))
def _read_hklm_reg(key_name, value_name):
from win32api import RegOpenKeyEx, RegQueryValueEx, RegCloseKey
from win32con import HKEY_LOCAL_MACHINE, KEY_READ
KEY_WOW64_64KEY = 0x0100
KEY_WOW64_32KEY = 0x0200
if _is_64bit_windows():
try:
key32 = RegOpenKeyEx(HKEY_LOCAL_MACHINE, key_name, 0, KEY_READ | KEY_WOW64_32KEY)
(value32, _) = RegQueryValueEx(key32, value_name)
RegCloseKey(key32)
except:
value32 = ''
try:
key64 = RegOpenKeyEx(HKEY_LOCAL_MACHINE, key_name, 0, KEY_READ | KEY_WOW64_64KEY)
(value64, _) = RegQueryValueEx(key64, value_name)
RegCloseKey(key64)
except:
value64 = ''
else:
try:
key32 = RegOpenKeyEx(HKEY_LOCAL_MACHINE, key_name, 0, KEY_READ)
(value32, _) = RegQueryValueEx(key32, value_name)
RegCloseKey(key32)
except:
value32 = ''
value64 = ''
return (value32, value64)
def find_java(part, win_version, zero_version):
reg_path = r"SOFTWARE\JavaSoft\{part}\{win_version}".format(part = part, win_version = win_version)
(java32_home, java64_home) = _read_hklm_reg(reg_path, "JavaHome")
for (home, arch) in [(java32_home, 'i486'), (java64_home, 'x86_64')]:
if os.path.isfile(home + r"\bin\java.exe"):
impl = factory('package:windows:%s:%s:%s' % (package, zero_version, arch))
impl.machine = arch
impl.version = model.parse_version(zero_version)
impl.upstream_stability = model.packaged
impl.main = home + r"\bin\java.exe"
def find_netfx(win_version, zero_version):
reg_path = r"SOFTWARE\Microsoft\NET Framework Setup\NDP\{win_version}".format(win_version = win_version)
(netfx32_install, netfx64_install) = _read_hklm_reg(reg_path, "Install")
for (install, arch) in [(netfx32_install, 'i486'), (netfx64_install, 'x86_64')]:
impl = factory('package:windows:%s:%s:%s' % (package, zero_version, arch))
impl.installed = (install == 1)
impl.machine = arch
impl.version = model.parse_version(zero_version)
impl.upstream_stability = model.packaged
impl.main = "" # .NET executables do not need a runner on Windows but they need one elsewhere
def find_netfx_release(win_version, release_version, zero_version):
reg_path = r"SOFTWARE\Microsoft\NET Framework Setup\NDP\{win_version}".format(win_version = win_version)
(netfx32_install, netfx64_install) = _read_hklm_reg(reg_path, "Install")
(netfx32_release, netfx64_release) = _read_hklm_reg(reg_path, "Release")
for (install, release, arch) in [(netfx32_install, netfx32_release, 'i486'), (netfx64_install, netfx64_release, 'x86_64')]:
impl = factory('package:windows:%s:%s:%s' % (package, zero_version, arch))
impl.installed = (install == 1 and release != '' and release >= release_version)
impl.machine = arch
impl.version = model.parse_version(zero_version)
impl.upstream_stability = model.packaged
impl.main = "" # .NET executables do not need a runner on Windows but they need one elsewhere
if package == 'openjdk-6-jre':
find_java("Java Runtime Environment", "1.6", '6')
elif package == 'openjdk-6-jdk':
find_java("Java Development Kit", "1.6", '6')
elif package == 'openjdk-7-jre':
find_java("Java Runtime Environment", "1.7", '7')
elif package == 'openjdk-7-jdk':
find_java("Java Development Kit", "1.7", '7')
elif package == 'netfx':
find_netfx("v2.0.50727", '2.0')
find_netfx("v3.0", '3.0')
find_netfx("v3.5", '3.5')
find_netfx("v4\\Full", '4.0')
find_netfx_release("v4\\Full", 378389, '4.5')
find_netfx("v5", '5.0')
elif package == 'netfx-client':
find_netfx("v4\\Client", '4.0')
find_netfx_release("v4\\Client", 378389, '4.5')
class DarwinDistribution(Distribution):
"""@since: 1.11"""
name = 'Darwin'
def get_package_info(self, package, factory):
def java_home(version, arch):
null = os.open(os.devnull, os.O_WRONLY)
child = subprocess.Popen(["/usr/libexec/java_home", "--failfast", "--version", version, "--arch", arch],
stdout = subprocess.PIPE, stderr = null, universal_newlines = True)
home = child.stdout.read().strip()
child.stdout.close()
child.wait()
return home
def find_java(part, jvm_version, zero_version):
for arch in ['i386', 'x86_64']:
home = java_home(jvm_version, arch)
if os.path.isfile(home + "/bin/java"):
impl = factory('package:darwin:%s:%s:%s' % (package, zero_version, arch))
impl.machine = arch
impl.version = model.parse_version(zero_version)
impl.upstream_stability = model.packaged
impl.main = home + "/bin/java"
if package == 'openjdk-6-jre':
find_java("Java Runtime Environment", "1.6", '6')
elif package == 'openjdk-6-jdk':
find_java("Java Development Kit", "1.6", '6')
elif package == 'openjdk-7-jre':
find_java("Java Runtime Environment", "1.7", '7')
elif package == 'openjdk-7-jdk':
find_java("Java Development Kit", "1.7", '7')
def get_output(args):
child = subprocess.Popen(args, stdout = subprocess.PIPE, universal_newlines = True)
return child.communicate()[0]
def get_version(program):
stdout = get_output([program, "--version"])
return stdout.strip().split('\n')[0].split()[-1] # the last word of the first line
def find_program(file):
if os.path.isfile(file) and os.access(file, os.X_OK):
program_version = try_cleanup_distro_version(get_version(file))
impl = factory('package:darwin:%s:%s' % (package, program_version), True)
if impl:
impl.installed = True
impl.version = model.parse_version(program_version)
impl.upstream_stability = model.packaged
impl.machine = host_machine # (hopefully)
impl.main = file
if package == 'gnupg':
find_program("/usr/local/bin/gpg")
elif package == 'gnupg2':
find_program("/usr/local/bin/gpg2")
class CachedDistribution(Distribution):
"""For distributions where querying the package database is slow (e.g. requires running
an external command), we cache the results.
@since: 0.39
@deprecated: use Cache instead
"""
def __init__(self, db_status_file):
"""@param db_status_file: update the cache when the timestamp of this file changes"""
self._status_details = os.stat(db_status_file)
self.versions = {}
self.cache_dir = basedir.save_cache_path(namespaces.config_site,
namespaces.config_prog)
try:
self._load_cache()
except Exception as ex:
logger.info(_("Failed to load distribution database cache (%s). Regenerating..."), ex)
try:
self.generate_cache()
self._load_cache()
except Exception as ex:
logger.warning(_("Failed to regenerate distribution database cache: %s"), ex)
def _load_cache(self):
"""Load {cache_leaf} cache file into self.versions if it is available and up-to-date.
Throws an exception if the cache should be (re)created."""
with open(os.path.join(self.cache_dir, self.cache_leaf), 'rt') as stream:
cache_version = None
for line in stream:
if line == '\n':
break
name, value = line.split(': ')
if name == 'mtime' and int(value) != int(self._status_details.st_mtime):
raise Exception(_("Modification time of package database file has changed"))
if name == 'size' and int(value) != self._status_details.st_size:
raise Exception(_("Size of package database file has changed"))
if name == 'version':
cache_version = int(value)
else:
raise Exception(_('Invalid cache format (bad header)'))
if cache_version is None:
raise Exception(_('Old cache format'))
versions = self.versions
for line in stream:
package, version, zi_arch = line[:-1].split('\t')
versionarch = (version, intern(zi_arch))
if package not in versions:
versions[package] = [versionarch]
else:
versions[package].append(versionarch)
def _write_cache(self, cache):
#cache.sort() # Might be useful later; currently we don't care
import tempfile
fd, tmpname = tempfile.mkstemp(prefix = 'zeroinstall-cache-tmp',
dir = self.cache_dir)
try:
stream = os.fdopen(fd, 'wt')
stream.write('version: 2\n')
stream.write('mtime: %d\n' % int(self._status_details.st_mtime))
stream.write('size: %d\n' % self._status_details.st_size)
stream.write('\n')
for line in cache:
stream.write(line + '\n')
stream.close()
portable_rename(tmpname,
os.path.join(self.cache_dir,
self.cache_leaf))
except:
os.unlink(tmpname)
raise
# Maps machine type names used in packages to their Zero Install versions
# (updates to this might require changing the reverse Java mapping)
_canonical_machine = {
'all' : '*',
'any' : '*',
'noarch' : '*',
'(none)' : '*',
'x86_64': 'x86_64',
'amd64': 'x86_64',
'i386': 'i386',
'i486': 'i486',
'i586': 'i586',
'i686': 'i686',
'ppc64': 'ppc64',
'ppc': 'ppc',
}
host_machine = arch.canonicalize_machine(platform.uname()[4])
def canonical_machine(package_machine):
machine = _canonical_machine.get(package_machine.lower(), None)
if machine is None:
# Safe default if we can't understand the arch
return host_machine.lower()
return machine
class DebianDistribution(Distribution):
"""A dpkg-based distribution."""
name = 'Debian'
cache_leaf = 'dpkg-status.cache'
def __init__(self, dpkg_status):
self.dpkg_cache = Cache('dpkg-status.cache', dpkg_status, 2)
self.apt_cache = {}
def _query_installed_package(self, package):
null = os.open(os.devnull, os.O_WRONLY)
child = subprocess.Popen(["dpkg-query", "-W", "--showformat=${Version}\t${Architecture}\t${Status}\n", "--", package],
stdout = subprocess.PIPE, stderr = null,
universal_newlines = True) # Needed for Python 3
os.close(null)
stdout, stderr = child.communicate()
child.wait()
for line in stdout.split('\n'):
if not line: continue
version, debarch, status = line.split('\t', 2)
if not status.endswith(' installed'): continue
clean_version = try_cleanup_distro_version(version)
if debarch.find("-") != -1:
debarch = debarch.split("-")[-1]
if clean_version:
return '%s\t%s' % (clean_version, canonical_machine(debarch.strip()))
else:
logger.warning(_("Can't parse distribution version '%(version)s' for package '%(package)s'"), {'version': version, 'package': package})
return '-'
def get_package_info(self, package, factory):
# Add any already-installed package...
installed_cached_info = self._get_dpkg_info(package)
if installed_cached_info != '-':
installed_version, machine = installed_cached_info.split('\t')
impl = factory('package:deb:%s:%s:%s' % (package, installed_version, machine))
impl.version = model.parse_version(installed_version)
if machine != '*':
impl.machine = machine
else:
installed_version = None
# Add any uninstalled candidates (note: only one of these two methods will add anything)
# From PackageKit...
self.packagekit.get_candidates(package, factory, 'package:deb')
# From apt-cache...
cached = self.apt_cache.get(package, None)
if cached:
candidate_version = cached['version']
candidate_arch = cached['arch']
if candidate_version and candidate_version != installed_version:
impl = factory('package:deb:%s:%s:%s' % (package, candidate_version, candidate_arch), installed = False)
impl.version = model.parse_version(candidate_version)
if candidate_arch != '*':
impl.machine = candidate_arch
def install(handler):
raise model.SafeException(_("This program depends on '%s', which is a package that is available through your distribution. "
"Please install it manually using your distribution's tools and try again. Or, install 'packagekit' and I can "
"use that to install it.") % package)
impl.download_sources.append(model.DistributionSource(package, cached['size'], install, needs_confirmation = False))
def fixup(self, package, impl):
if impl.id.startswith('package:deb:openjdk-6-jre:') or \
impl.id.startswith('package:deb:openjdk-7-jre:'):
# Debian marks all Java versions as pre-releases
# See: http://bugs.debian.org/cgi-bin/bugreport.cgi?bug=685276
impl.version = model.parse_version(impl.get_version().replace('-pre', '.'))
def installed_fixup(self, impl):
# Hack: If we added any Java implementations, find the corresponding JAVA_HOME...
if impl.id.startswith('package:deb:openjdk-6-jre:'):
java_version = '6-openjdk'
elif impl.id.startswith('package:deb:openjdk-7-jre:'):
java_version = '7-openjdk'
else:
return
if impl.machine == 'x86_64':
java_arch = 'amd64'
else:
java_arch = impl.machine
java_bin = '/usr/lib/jvm/java-%s-%s/jre/bin/java' % (java_version, java_arch)
if not os.path.exists(java_bin):
# Try without the arch...
java_bin = '/usr/lib/jvm/java-%s/jre/bin/java' % java_version
if not os.path.exists(java_bin):
logger.info("Java binary not found (%s)", java_bin)
if impl.main is None:
java_bin = '/usr/bin/java'
else:
return
impl.commands["run"] = model.Command(qdom.Element(namespaces.XMLNS_IFACE, 'command',
{'path': java_bin, 'name': 'run'}), None)
def _get_dpkg_info(self, package):
installed_cached_info = self.dpkg_cache.get(package)
if installed_cached_info == None:
installed_cached_info = self._query_installed_package(package)
self.dpkg_cache.put(package, installed_cached_info)
return installed_cached_info
def fetch_candidates(self, master_feed):
package_names = [item.getAttribute("package") for item, item_attrs, depends in master_feed.get_package_impls(self)]
if self.packagekit.available:
return self.packagekit.fetch_candidates(package_names)
# No PackageKit. Use apt-cache directly.
for package in package_names:
# Check to see whether we could get a newer version using apt-get
try:
null = os.open(os.devnull, os.O_WRONLY)
child = subprocess.Popen(['apt-cache', 'show', '--no-all-versions', '--', package], stdout = subprocess.PIPE, stderr = null, universal_newlines = True)
os.close(null)
arch = version = size = None
for line in child.stdout:
line = line.strip()
if line.startswith('Version: '):
version = line[9:]
version = try_cleanup_distro_version(version)
elif line.startswith('Architecture: '):
arch = canonical_machine(line[14:].strip())
elif line.startswith('Size: '):
size = int(line[6:].strip())
if version and arch:
cached = {'version': version, 'arch': arch, 'size': size}
else:
cached = None
child.stdout.close()
child.wait()
except Exception as ex:
logger.warning("'apt-cache show %s' failed: %s", package, ex)
cached = None
# (multi-arch support? can there be multiple candidates?)
self.apt_cache[package] = cached
class RPMDistribution(CachedDistribution):
"""An RPM-based distribution."""
name = 'RPM'
cache_leaf = 'rpm-status.cache'
def generate_cache(self):
cache = []
child = subprocess.Popen(["rpm", "-qa", "--qf=%{NAME}\t%{VERSION}-%{RELEASE}\t%{ARCH}\n"],
stdout = subprocess.PIPE, universal_newlines = True)
for line in child.stdout:
package, version, rpmarch = line.split('\t', 2)
if package == 'gpg-pubkey':
continue
zi_arch = canonical_machine(rpmarch.strip())
clean_version = try_cleanup_distro_version(version)
if clean_version:
cache.append('%s\t%s\t%s' % (package, clean_version, zi_arch))
else:
logger.warning(_("Can't parse distribution version '%(version)s' for package '%(package)s'"), {'version': version, 'package': package})
self._write_cache(cache)
child.stdout.close()
child.wait()
def get_package_info(self, package, factory):
# Add installed versions...
versions = self.versions.get(package, [])
for version, machine in versions:
impl = factory('package:rpm:%s:%s:%s' % (package, version, machine))
impl.version = model.parse_version(version)
if machine != '*':
impl.machine = machine
# Add any uninstalled candidates found by PackageKit
self.packagekit.get_candidates(package, factory, 'package:rpm')
def installed_fixup(self, impl):
# OpenSUSE uses _, Fedora uses .
impl_id = impl.id.replace('_', '.')
# Hack: If we added any Java implementations, find the corresponding JAVA_HOME...
if impl_id.startswith('package:rpm:java-1.6.0-openjdk:'):
java_version = '1.6.0-openjdk'
elif impl_id.startswith('package:rpm:java-1.7.0-openjdk:'):
java_version = '1.7.0-openjdk'
else:
return
# On Fedora, unlike Debian, the arch is x86_64, not amd64
java_bin = '/usr/lib/jvm/jre-%s.%s/bin/java' % (java_version, impl.machine)
if not os.path.exists(java_bin):
# Try without the arch...
java_bin = '/usr/lib/jvm/jre-%s/bin/java' % java_version
if not os.path.exists(java_bin):
logger.info("Java binary not found (%s)", java_bin)
if impl.main is None:
java_bin = '/usr/bin/java'
else:
return
impl.commands["run"] = model.Command(qdom.Element(namespaces.XMLNS_IFACE, 'command',
{'path': java_bin, 'name': 'run'}), None)
def fixup(self, package, impl):
# OpenSUSE uses _, Fedora uses .
package = package.replace('_', '.')
if package in ('java-1.6.0-openjdk', 'java-1.7.0-openjdk',
'java-1.6.0-openjdk-devel', 'java-1.7.0-openjdk-devel'):
if impl.version[0][0] == 1:
# OpenSUSE uses 1.6 to mean 6
del impl.version[0][0]
class SlackDistribution(Distribution):
"""A Slack-based distribution."""
name = 'Slack'
def __init__(self, packages_dir):
self._packages_dir = packages_dir
def get_package_info(self, package, factory):
# Add installed versions...
for entry in os.listdir(self._packages_dir):
name, version, arch, build = entry.rsplit('-', 3)
if name == package:
zi_arch = canonical_machine(arch)
clean_version = try_cleanup_distro_version("%s-%s" % (version, build))
if not clean_version:
logger.warning(_("Can't parse distribution version '%(version)s' for package '%(package)s'"), {'version': version, 'package': name})
continue
impl = factory('package:slack:%s:%s:%s' % \
(package, clean_version, zi_arch))
impl.version = model.parse_version(clean_version)
if zi_arch != '*':
impl.machine = zi_arch
# Add any uninstalled candidates found by PackageKit
self.packagekit.get_candidates(package, factory, 'package:slack')
class ArchDistribution(Distribution):
"""An Arch Linux distribution."""
name = 'Arch'
def __init__(self, packages_dir):
self._packages_dir = os.path.join(packages_dir, "local")
def get_package_info(self, package, factory):
# Add installed versions...
for entry in os.listdir(self._packages_dir):
name, version, build = entry.rsplit('-', 2)
if name == package:
gotarch = False
with open(os.path.join(self._packages_dir, entry, "desc"), 'rt') as stream:
for line in stream:
if line == "%ARCH%\n":
gotarch = True
continue
if gotarch:
arch = line.strip()
break
zi_arch = canonical_machine(arch)
clean_version = try_cleanup_distro_version("%s-%s" % (version, build))
if not clean_version:
logger.warning(_("Can't parse distribution version '%(version)s' for package '%(package)s'"), {'version': version, 'package': name})
continue
impl = factory('package:arch:%s:%s:%s' % \
(package, clean_version, zi_arch))
impl.version = model.parse_version(clean_version)
if zi_arch != '*':
impl.machine = zi_arch
# Add any uninstalled candidates found by PackageKit
self.packagekit.get_candidates(package, factory, 'package:arch')
class GentooDistribution(Distribution):
name = 'Gentoo'
def __init__(self, pkgdir):
self._pkgdir = pkgdir
def get_package_info(self, package, factory):
# Add installed versions...
_version_start_reqexp = '-[0-9]'
if package.count('/') != 1: return
category, leafname = package.split('/')
category_dir = os.path.join(self._pkgdir, category)
match_prefix = leafname + '-'
if not os.path.isdir(category_dir): return
for filename in os.listdir(category_dir):
if filename.startswith(match_prefix) and filename[len(match_prefix)].isdigit():
with open(os.path.join(category_dir, filename, 'PF'), 'rt') as stream:
name = stream.readline().strip()
match = re.search(_version_start_reqexp, name)
if match is None:
logger.warning(_('Cannot parse version from Gentoo package named "%(name)s"'), {'name': name})
continue
else:
version = try_cleanup_distro_version(name[match.start() + 1:])
if category == 'app-emulation' and name.startswith('emul-'):
__, __, machine, __ = name.split('-', 3)
else:
with open(os.path.join(category_dir, filename, 'CHOST'), 'rt') as stream:
machine, __ = stream.readline().split('-', 1)
machine = arch.canonicalize_machine(machine)
impl = factory('package:gentoo:%s:%s:%s' % \
(package, version, machine))
impl.version = model.parse_version(version)
impl.machine = machine
# Add any uninstalled candidates found by PackageKit
self.packagekit.get_candidates(package, factory, 'package:gentoo')
class PortsDistribution(Distribution):
name = 'Ports'
def __init__(self, pkgdir):
self._pkgdir = pkgdir
def get_package_info(self, package, factory):
_name_version_regexp = '^(.+)-([^-]+)$'
nameversion = re.compile(_name_version_regexp)
for pkgname in os.listdir(self._pkgdir):
pkgdir = os.path.join(self._pkgdir, pkgname)
if not os.path.isdir(pkgdir): continue
#contents = open(os.path.join(pkgdir, '+CONTENTS')).readline().strip()
match = nameversion.search(pkgname)
if match is None:
logger.warning(_('Cannot parse version from Ports package named "%(pkgname)s"'), {'pkgname': pkgname})
continue
else:
name = match.group(1)
if name != package:
continue
version = try_cleanup_distro_version(match.group(2))
machine = host_machine
impl = factory('package:ports:%s:%s:%s' % \
(package, version, machine))
impl.version = model.parse_version(version)
impl.machine = machine
class MacPortsDistribution(CachedDistribution):
name = 'MacPorts'
def __init__(self, db_status_file):
super(MacPortsDistribution, self).__init__(db_status_file)
self.darwin = DarwinDistribution()
cache_leaf = 'macports-status.cache'
def generate_cache(self):
cache = []
child = subprocess.Popen(["port", "-v", "installed"],
stdout = subprocess.PIPE, universal_newlines = True)
for line in child.stdout:
if not line.startswith(" "):
continue
if line.strip().count(" ") > 1:
package, version, extra = line.split(None, 2)
else:
package, version = line.split()
extra = ""
if not extra.startswith("(active)"):
continue
version = version.lstrip('@')
version = re.sub(r"\+.*", "", version) # strip variants
zi_arch = '*'
clean_version = try_cleanup_distro_version(version)
if clean_version:
match = re.match(r" platform='([^' ]*)( \d+)?' archs='([^']*)'", extra)
if match:
platform, major, archs = match.groups()
for arch in archs.split():
zi_arch = canonical_machine(arch)
cache.append('%s\t%s\t%s' % (package, clean_version, zi_arch))
else:
cache.append('%s\t%s\t%s' % (package, clean_version, zi_arch))
else:
logger.warning(_("Can't parse distribution version '%(version)s' for package '%(package)s'"), {'version': version, 'package': package})
self._write_cache(cache)
child.stdout.close()
child.wait()
def get_package_info(self, package, factory):
self.darwin.get_package_info(package, factory)
# Add installed versions...
versions = self.versions.get(package, [])
for version, machine in versions:
impl = factory('package:macports:%s:%s:%s' % (package, version, machine))
impl.version = model.parse_version(version)
if machine != '*':
impl.machine = machine
def get_score(self, distro_name):
# We support both sources of packages.
# In theory, we should route 'Darwin' package names to DarwinDistribution, and
# Mac Ports names to MacPortsDistribution. But since we only use Darwin for Java,
# having one object handle both is OK.
return int(distro_name in ('Darwin', 'MacPorts'))
class CygwinDistribution(CachedDistribution):
"""A Cygwin-based distribution."""
name = 'Cygwin'
cache_leaf = 'cygcheck-status.cache'
def generate_cache(self):
cache = []
zi_arch = canonical_machine(arch)
for line in os.popen("cygcheck -c -d"):
if line == "Cygwin Package Information\r\n":
continue
if line == "\n":
continue
package, version = line.split()
if package == "Package" and version == "Version":
continue
clean_version = try_cleanup_distro_version(version)
if clean_version:
cache.append('%s\t%s\t%s' % (package, clean_version, zi_arch))
else:
logger.warning(_("Can't parse distribution version '%(version)s' for package '%(package)s'"), {'version': version, 'package': package})
self._write_cache(cache)
def get_package_info(self, package, factory):
# Add installed versions...
versions = self.versions.get(package, [])
for version, machine in versions:
impl = factory('package:cygwin:%s:%s:%s' % (package, version, machine))
impl.version = model.parse_version(version)
if machine != '*':
impl.machine = machine
_host_distribution = None
def get_host_distribution():
"""Get a Distribution suitable for the host operating system.
Calling this twice will return the same object.
@rtype: L{Distribution}"""
global _host_distribution
if not _host_distribution:
dpkg_db_status = '/var/lib/dpkg/status'
rpm_db_packages = '/var/lib/rpm/Packages'
_slack_db = '/var/log/packages'
_arch_db = '/var/lib/pacman'
_pkg_db = '/var/db/pkg'
_macports_db = '/opt/local/var/macports/registry/registry.db'
_cygwin_log = '/var/log/setup.log'
if sys.prefix == "/sw":
dpkg_db_status = os.path.join(sys.prefix, dpkg_db_status)
rpm_db_packages = os.path.join(sys.prefix, rpm_db_packages)
if os.name == "nt":
_host_distribution = WindowsDistribution()
elif os.path.isdir(_pkg_db):
if sys.platform.startswith("linux"):
_host_distribution = GentooDistribution(_pkg_db)
elif sys.platform.startswith("freebsd"):
_host_distribution = PortsDistribution(_pkg_db)
elif os.path.isfile(_macports_db):
_host_distribution = MacPortsDistribution(_macports_db)
elif os.path.isfile(_cygwin_log) and sys.platform == "cygwin":
_host_distribution = CygwinDistribution(_cygwin_log)
elif os.access(dpkg_db_status, os.R_OK) \
and os.path.getsize(dpkg_db_status) > 0:
_host_distribution = DebianDistribution(dpkg_db_status)
elif os.path.isfile(rpm_db_packages):
_host_distribution = RPMDistribution(rpm_db_packages)
elif os.path.isdir(_slack_db):
_host_distribution = SlackDistribution(_slack_db)
elif os.path.isdir(_arch_db):
_host_distribution = ArchDistribution(_arch_db)
elif sys.platform == "darwin":
_host_distribution = DarwinDistribution()
else:
_host_distribution = Distribution()
return _host_distribution
|
dsqmoore/0install
|
zeroinstall/injector/distro.py
|
Python
|
lgpl-2.1
| 38,221
|
[
"VisIt"
] |
f5a4087099f4f6b61b404382c6588c989209e69ba1e9ccda7be12bb87ad2bc3e
|
import os
import shutil
import os.path as op
import pysam
# from collections import Counter
from bcbio.utils import splitext_plus, file_exists, safe_makedir, chdir
from bcbio.provenance import do
from bcbio.distributed.transaction import file_transaction, tx_tmpdir
from bcbio import broad
from bcbio.bam import index
from bcbio.pipeline import datadict as dd
from bcbio.pipeline import config_utils
from ichwrapper import log
def _set_quality(in_bam):
"""
change all quality to 255
"""
bam = pysam.AlignmentFile(in_bam, "rb")
out_file = op.splitext(in_bam)[0] + "_normqual.bam"
if file_exists(out_file):
return out_file
with pysam.AlignmentFile(out_file, "wb", template=bam) as out_handle:
for read in bam.fetch():
read.mapping_quality = 255
out_handle.write(read)
return out_file
def _align(in_fastq, sample, workdir, genome_index, is_directional, bowtie2, reference, config):
""" align with bismark. this is actually not used. the align is in ngsalign.bismark.align """
bismark = do.find_cmd("bismark")
resources = config_utils.get_resources("bismark")
num_cores = 1
if resources and resources.get("bismark_threads"):
num_cores = resources.get("bismark_threads")
else:
num_cores = max(int(config['algorithm'].get('cores', 1) / 2), 1)
bowtie_threads = 1
if resources and resources.get("bowtie_threads"):
bowtie_threads = resources.get("bowtie_threads")
basename = sample
if is_directional:
is_directional = ""
else:
is_directional = "--non_directional"
cmd = "{bismark} -n 1 -o {tx_dir} --basename {sample} --unmapped {is_directional} {genome_index} {in_fastq}"
if bowtie2:
cmd = "{bismark} --bowtie2 --parallel {num_cores} -p {bowtie_threads} -o {tx_dir} --basename {sample} --unmapped {is_directional} {genome_index} {in_fastq}"
out_dir = op.join(workdir, sample)
out_bam = op.join(out_dir, sample + ".bam")
with chdir(workdir):
if not file_exists(out_bam):
with tx_tmpdir() as tx_dir:
cmd = cmd.format(**locals())
log.logger.debug(cmd)
do.run(cmd, "bismark in %s" % in_fastq)
shutil.move(tx_dir, out_dir)
broad_runner = broad.runner_from_config(config)
# out_bam, _ = broad_runner.run_fn("picard_formatconverter", out_sam)
names = {'rg': in_fastq, 'library': 'BS_LIB', 'pl': 'Illumina', 'pu': 'R1', 'sm': in_fastq, 'sample': sample}
out_fix_bam = broad_runner.run_fn("picard_fix_rgs", out_bam, names)
order_bam = splitext_plus(out_fix_bam)[0] + "_order.bam"
broad_runner.run_fn("picard_reorder", out_fix_bam, reference, order_bam)
index(order_bam, config)
if bowtie2:
order_bam = _set_quality(order_bam)
index(order_bam, config)
return order_bam
def create_bam(data, args):
"""
aligner and conversion to BAM file
"""
workdir = safe_makedir("align")
sample = data['name']
# workdir = op.join("align", sample)
data['final_bam'] = _align(data['trimmed'], sample, op.abspath(workdir),
args.index, args.is_directional, args.bowtie2,
args.reference, data['config'])
data['order_bam'] = data['final_bam']
return data
|
lbeltrame/bcbio-nextgen
|
bcbio/wgbsseq/align.py
|
Python
|
mit
| 3,378
|
[
"pysam"
] |
d7fdea6cf3b9c7311b2b7397323fbbbef6f2a1219007fee29938f69b6209c5c6
|
# -*- coding: utf-8 -*-
#########################################################################
#
# Copyright (C) 2012 OpenPlans
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#########################################################################
from django.utils.translation import ugettext_lazy as _
LINK_TYPES = ['original', 'data', 'image', 'metadata', 'html',
'OGC:WMS', 'OGC:WFS', 'OGC:WCS',]
HIERARCHY_LEVELS = (
('series', _('series')),
('software', _('computer program or routine')),
('featureType', _('feature type')),
('model', _('copy or imitation of an existing or hypothetical object')),
('collectionHardware', _('collection hardware')),
('collectionSession', _('collection session')),
('nonGeographicDataset', _('non-geographic data')),
('propertyType', _('property type')),
('fieldSession', _('field session')),
('dataset', _('dataset')),
('service', _('service interfaces')),
('attribute', _('attribute class')),
('attributeType', _('characteristic of a feature')),
('tile', _('tile or spatial subset of geographic data')),
('feature', _('feature')),
('dimensionGroup', _('dimension group')),
)
UPDATE_FREQUENCIES = (
('unknown', _('frequency of maintenance for the data is not known')),
('continual', _('data is repeatedly and frequently updated')),
('notPlanned', _('there are no plans to update the data')),
('daily', _('data is updated each day')),
('annually', _('data is updated every year')),
('asNeeded', _('data is updated as deemed necessary')),
('monthly', _('data is updated each month')),
('fortnightly', _('data is updated every two weeks')),
('irregular',
_('data is updated in intervals that are uneven in duration')),
('weekly', _('data is updated on a weekly basis')),
('biannually', _('data is updated twice each year')),
('quarterly', _('data is updated every three months')),
)
CONTACT_FIELDS = [
'name',
'organization',
'position',
'voice',
'facsimile',
'delivery_point',
'city',
'administrative_area',
'postal_code',
'country',
'email',
'role'
]
DEFAULT_SUPPLEMENTAL_INFORMATION = _(
_('No information provided')
)
COUNTRIES = (
('AFG', _('Afghanistan')),
('ALA', _('Aland Islands')),
('ALB', _('Albania')),
('DZA', _('Algeria')),
('ASM', _('American Samoa')),
('AND', _('Andorra')),
('AGO', _('Angola')),
('AIA', _('Anguilla')),
('ATG', _('Antigua and Barbuda')),
('ARG', _('Argentina')),
('ARM', _('Armenia')),
('ABW', _('Aruba')),
('AUS', _('Australia')),
('AUT', _('Austria')),
('AZE', _('Azerbaijan')),
('BHS', _('Bahamas')),
('BHR', _('Bahrain')),
('BGD', _('Bangladesh')),
('BRB', _('Barbados')),
('BLR', _('Belarus')),
('BEL', _('Belgium')),
('BLZ', _('Belize')),
('BEN', _('Benin')),
('BMU', _('Bermuda')),
('BTN', _('Bhutan')),
('BOL', _('Bolivia')),
('BIH', _('Bosnia and Herzegovina')),
('BWA', _('Botswana')),
('BRA', _('Brazil')),
('VGB', _('British Virgin Islands')),
('BRN', _('Brunei Darussalam')),
('BGR', _('Bulgaria')),
('BFA', _('Burkina Faso')),
('BDI', _('Burundi')),
('KHM', _('Cambodia')),
('CMR', _('Cameroon')),
('CAN', _('Canada')),
('CPV', _('Cape Verde')),
('CYM', _('Cayman Islands')),
('CAF', _('Central African Republic')),
('TCD', _('Chad')),
('CIL', _('Channel Islands')),
('CHL', _('Chile')),
('CHN', _('China')),
('HKG', _('China - Hong Kong')),
('MAC', _('China - Macao')),
('COL', _('Colombia')),
('COM', _('Comoros')),
('COG', _('Congo')),
('COK', _('Cook Islands')),
('CRI', _('Costa Rica')),
('CIV', _('Cote d\'Ivoire')),
('HRV', _('Croatia')),
('CUB', _('Cuba')),
('CYP', _('Cyprus')),
('CZE', _('Czech Republic')),
('PRK', _('Democratic People\'s Republic of Korea')),
('COD', _('Democratic Republic of the Congo')),
('DNK', _('Denmark')),
('DJI', _('Djibouti')),
('DMA', _('Dominica')),
('DOM', _('Dominican Republic')),
('ECU', _('Ecuador')),
('EGY', _('Egypt')),
('SLV', _('El Salvador')),
('GNQ', _('Equatorial Guinea')),
('ERI', _('Eritrea')),
('EST', _('Estonia')),
('ETH', _('Ethiopia')),
('FRO', _('Faeroe Islands')),
('FLK', _('Falkland Islands (Malvinas)')),
('FJI', _('Fiji')),
('FIN', _('Finland')),
('FRA', _('France')),
('GUF', _('French Guiana')),
('PYF', _('French Polynesia')),
('GAB', _('Gabon')),
('GMB', _('Gambia')),
('GEO', _('Georgia')),
('DEU', _('Germany')),
('GHA', _('Ghana')),
('GIB', _('Gibraltar')),
('GRC', _('Greece')),
('GRL', _('Greenland')),
('GRD', _('Grenada')),
('GLP', _('Guadeloupe')),
('GUM', _('Guam')),
('GTM', _('Guatemala')),
('GGY', _('Guernsey')),
('GIN', _('Guinea')),
('GNB', _('Guinea-Bissau')),
('GUY', _('Guyana')),
('HTI', _('Haiti')),
('VAT', _('Holy See (Vatican City)')),
('HND', _('Honduras')),
('HUN', _('Hungary')),
('ISL', _('Iceland')),
('IND', _('India')),
('IDN', _('Indonesia')),
('IRN', _('Iran')),
('IRQ', _('Iraq')),
('IRL', _('Ireland')),
('IMN', _('Isle of Man')),
('ISR', _('Israel')),
('ITA', _('Italy')),
('JAM', _('Jamaica')),
('JPN', _('Japan')),
('JEY', _('Jersey')),
('JOR', _('Jordan')),
('KAZ', _('Kazakhstan')),
('KEN', _('Kenya')),
('KIR', _('Kiribati')),
('KWT', _('Kuwait')),
('KGZ', _('Kyrgyzstan')),
('LAO', _('Lao People\'s Democratic Republic')),
('LVA', _('Latvia')),
('LBN', _('Lebanon')),
('LSO', _('Lesotho')),
('LBR', _('Liberia')),
('LBY', _('Libyan Arab Jamahiriya')),
('LIE', _('Liechtenstein')),
('LTU', _('Lithuania')),
('LUX', _('Luxembourg')),
('MKD', _('Macedonia')),
('MDG', _('Madagascar')),
('MWI', _('Malawi')),
('MYS', _('Malaysia')),
('MDV', _('Maldives')),
('MLI', _('Mali')),
('MLT', _('Malta')),
('MHL', _('Marshall Islands')),
('MTQ', _('Martinique')),
('MRT', _('Mauritania')),
('MUS', _('Mauritius')),
('MYT', _('Mayotte')),
('MEX', _('Mexico')),
('FSM', _('Micronesia, Federated States of')),
('MCO', _('Monaco')),
('MNG', _('Mongolia')),
('MNE', _('Montenegro')),
('MSR', _('Montserrat')),
('MAR', _('Morocco')),
('MOZ', _('Mozambique')),
('MMR', _('Myanmar')),
('NAM', _('Namibia')),
('NRU', _('Nauru')),
('NPL', _('Nepal')),
('NLD', _('Netherlands')),
('ANT', _('Netherlands Antilles')),
('NCL', _('New Caledonia')),
('NZL', _('New Zealand')),
('NIC', _('Nicaragua')),
('NER', _('Niger')),
('NGA', _('Nigeria')),
('NIU', _('Niue')),
('NFK', _('Norfolk Island')),
('MNP', _('Northern Mariana Islands')),
('NOR', _('Norway')),
('PSE', _('Occupied Palestinian Territory')),
('OMN', _('Oman')),
('PAK', _('Pakistan')),
('PLW', _('Palau')),
('PAN', _('Panama')),
('PNG', _('Papua New Guinea')),
('PRY', _('Paraguay')),
('PER', _('Peru')),
('PHL', _('Philippines')),
('PCN', _('Pitcairn')),
('POL', _('Poland')),
('PRT', _('Portugal')),
('PRI', _('Puerto Rico')),
('QAT', _('Qatar')),
('KOR', _('Republic of Korea')),
('MDA', _('Republic of Moldova')),
('REU', _('Reunion')),
('ROU', _('Romania')),
('RUS', _('Russian Federation')),
('RWA', _('Rwanda')),
('BLM', _('Saint-Barthelemy')),
('SHN', _('Saint Helena')),
('KNA', _('Saint Kitts and Nevis')),
('LCA', _('Saint Lucia')),
('MAF', _('Saint-Martin (French part)')),
('SPM', _('Saint Pierre and Miquelon')),
('VCT', _('Saint Vincent and the Grenadines')),
('WSM', _('Samoa')),
('SMR', _('San Marino')),
('STP', _('Sao Tome and Principe')),
('SAU', _('Saudi Arabia')),
('SEN', _('Senegal')),
('SRB', _('Serbia')),
('SYC', _('Seychelles')),
('SLE', _('Sierra Leone')),
('SGP', _('Singapore')),
('SVK', _('Slovakia')),
('SVN', _('Slovenia')),
('SLB', _('Solomon Islands')),
('SOM', _('Somalia')),
('ZAF', _('South Africa')),
('SSD', _('South Sudan')),
('ESP', _('Spain')),
('LKA', _('Sri Lanka')),
('SDN', _('Sudan')),
('SUR', _('Suriname')),
('SJM', _('Svalbard and Jan Mayen Islands')),
('SWZ', _('Swaziland')),
('SWE', _('Sweden')),
('CHE', _('Switzerland')),
('SYR', _('Syrian Arab Republic')),
('TJK', _('Tajikistan')),
('THA', _('Thailand')),
('TLS', _('Timor-Leste')),
('TGO', _('Togo')),
('TKL', _('Tokelau')),
('TON', _('Tonga')),
('TTO', _('Trinidad and Tobago')),
('TUN', _('Tunisia')),
('TUR', _('Turkey')),
('TKM', _('Turkmenistan')),
('TCA', _('Turks and Caicos Islands')),
('TUV', _('Tuvalu')),
('UGA', _('Uganda')),
('UKR', _('Ukraine')),
('ARE', _('United Arab Emirates')),
('GBR', _('United Kingdom')),
('TZA', _('United Republic of Tanzania')),
('USA', _('United States of America')),
('VIR', _('United States Virgin Islands')),
('URY', _('Uruguay')),
('UZB', _('Uzbekistan')),
('VUT', _('Vanuatu')),
('VEN', _('Venezuela (Bolivarian Republic of)')),
('VNM', _('Viet Nam')),
('WLF', _('Wallis and Futuna Islands')),
('ESH', _('Western Sahara')),
('YEM', _('Yemen')),
('ZMB', _('Zambia')),
('ZWE', _('Zimbabwe')),
)
# Taken from http://www.w3.org/WAI/ER/IG/ert/iso639.htm
ALL_LANGUAGES = (
('abk', 'Abkhazian'),
('aar', 'Afar'),
('afr', 'Afrikaans'),
('amh', 'Amharic'),
('ara', 'Arabic'),
('asm', 'Assamese'),
('aym', 'Aymara'),
('aze', 'Azerbaijani'),
('bak', 'Bashkir'),
('ben', 'Bengali'),
('bih', 'Bihari'),
('bis', 'Bislama'),
('bre', 'Breton'),
('bul', 'Bulgarian'),
('bel', 'Byelorussian'),
('cat', 'Catalan'),
('cos', 'Corsican'),
('dan', 'Danish'),
('dzo', 'Dzongkha'),
('eng', 'English'),
('fra', 'French'),
('epo', 'Esperanto'),
('est', 'Estonian'),
('fao', 'Faroese'),
('fij', 'Fijian'),
('fin', 'Finnish'),
('fry', 'Frisian'),
('glg', 'Gallegan'),
('kal', 'Greenlandic'),
('grn', 'Guarani'),
('guj', 'Gujarati'),
('hau', 'Hausa'),
('heb', 'Hebrew'),
('hin', 'Hindi'),
('hun', 'Hungarian'),
('ind', 'Indonesian'),
('ina', 'Interlingua (International Auxiliary language Association)'),
('iku', 'Inuktitut'),
('ipk', 'Inupiak'),
('ita', 'Italian'),
('jpn', 'Japanese'),
('kan', 'Kannada'),
('kas', 'Kashmiri'),
('kaz', 'Kazakh'),
('khm', 'Khmer'),
('kin', 'Kinyarwanda'),
('kir', 'Kirghiz'),
('kor', 'Korean'),
('kur', 'Kurdish'),
('oci', 'Langue d \'Oc (post 1500)'),
('lao', 'Lao'),
('lat', 'Latin'),
('lav', 'Latvian'),
('lin', 'Lingala'),
('lit', 'Lithuanian'),
('mlg', 'Malagasy'),
('mlt', 'Maltese'),
('mar', 'Marathi'),
('mol', 'Moldavian'),
('mon', 'Mongolian'),
('nau', 'Nauru'),
('nep', 'Nepali'),
('nor', 'Norwegian'),
('ori', 'Oriya'),
('orm', 'Oromo'),
('pan', 'Panjabi'),
('pol', 'Polish'),
('por', 'Portuguese'),
('pus', 'Pushto'),
('que', 'Quechua'),
('roh', 'Rhaeto-Romance'),
('run', 'Rundi'),
('rus', 'Russian'),
('smo', 'Samoan'),
('sag', 'Sango'),
('san', 'Sanskrit'),
('scr', 'Serbo-Croatian'),
('sna', 'Shona'),
('snd', 'Sindhi'),
('sin', 'Singhalese'),
('ssw', 'Siswant'),
('slv', 'Slovenian'),
('som', 'Somali'),
('sot', 'Sotho'),
('spa', 'Spanish'),
('sun', 'Sudanese'),
('swa', 'Swahili'),
('tgl', 'Tagalog'),
('tgk', 'Tajik'),
('tam', 'Tamil'),
('tat', 'Tatar'),
('tel', 'Telugu'),
('tha', 'Thai'),
('tir', 'Tigrinya'),
('tog', 'Tonga (Nyasa)'),
('tso', 'Tsonga'),
('tsn', 'Tswana'),
('tur', 'Turkish'),
('tuk', 'Turkmen'),
('twi', 'Twi'),
('uig', 'Uighur'),
('ukr', 'Ukrainian'),
('urd', 'Urdu'),
('uzb', 'Uzbek'),
('vie', 'Vietnamese'),
('vol', 'Volapük'),
('wol', 'Wolof'),
('xho', 'Xhosa'),
('yid', 'Yiddish'),
('yor', 'Yoruba'),
('zha', 'Zhuang'),
('zul', 'Zulu'),
)
CHARSETS = (
('', 'None/Unknown'),
('UTF-8', 'UTF-8/Unicode'),
('ISO-8859-1', 'Latin1/ISO-8859-1'),
('ISO-8859-2', 'Latin2/ISO-8859-2'),
('ISO-8859-3', 'Latin3/ISO-8859-3'),
('ISO-8859-4', 'Latin4/ISO-8859-4'),
('ISO-8859-5', 'Latin5/ISO-8859-5'),
('ISO-8859-6', 'Latin6/ISO-8859-6'),
('ISO-8859-7', 'Latin7/ISO-8859-7'),
('ISO-8859-8', 'Latin8/ISO-8859-8'),
('ISO-8859-9', 'Latin9/ISO-8859-9'),
('ISO-8859-10','Latin10/ISO-8859-10'),
('ISO-8859-13','Latin13/ISO-8859-13'),
('ISO-8859-14','Latin14/ISO-8859-14'),
('ISO8859-15','Latin15/ISO-8859-15'),
('Big5', 'BIG5'),
('EUC-JP','EUC-JP'),
('EUC-KR','EUC-KR'),
('GBK','GBK'),
('GB18030','GB18030'),
('Shift_JIS','Shift_JIS'),
('KOI8-R','KOI8-R'),
('KOI8-U','KOI8-U'),
('windows-874', 'Windows CP874'),
('windows-1250', 'Windows CP1250'),
('windows-1251', 'Windows CP1251'),
('windows-1252', 'Windows CP1252'),
('windows-1253', 'Windows CP1253'),
('windows-1254', 'Windows CP1254'),
('windows-1255', 'Windows CP1255'),
('windows-1256', 'Windows CP1256'),
('windows-1257', 'Windows CP1257'),
('windows-1258', 'Windows CP1258')
)
|
GISPPU/GrenadaLandInformation
|
geonode/base/enumerations.py
|
Python
|
gpl-3.0
| 14,305
|
[
"BWA"
] |
13974b6c1099c6cdc0d59b3ac411d0c97ed5855f16bbc0166ce05ec61c38d916
|
#!/usr/bin/env python
"""
SPAdes Contig Graph
This is a tool to combine the assembly graph and contigs made by the SPAdes
assembler. For more information, go to:
https://github.com/rrwick/spades_contig_graph
Author: Ryan Wick
email: [email protected]
"""
from __future__ import division
from __future__ import print_function
import sys
import os
import argparse
import shutil
import subprocess
from distutils import spawn
def main():
args = get_arguments()
links = load_graph_links(args.graph)
contigs = load_contigs(args.contigs)
paths = load_paths(args.paths, links)
build_graph(contigs, paths, links)
if args.connection_priority:
check_for_blast()
segment_sequences, segment_depths = load_graph_sequences(args.graph)
graph_overlap = get_graph_overlap(links, segment_sequences)
contigs = split_contigs(contigs, links, segment_sequences, graph_overlap)
contigs = merge_contigs(contigs, graph_overlap)
recalculate_contig_depths(contigs, segment_sequences, segment_depths, graph_overlap)
renumber_contigs(contigs)
save_graph_to_file(contigs, args.output)
if args.paths_out:
save_paths_to_file(contigs, args.paths_out)
def get_arguments():
parser = argparse.ArgumentParser(description='SPAdes Contig Graph: a tool for creating FASTG contig graphs from SPAdes assemblies')
parser.add_argument('graph', help='The assembly_graph.fastg file made by SPAdes')
parser.add_argument('contigs', help='A contigs or scaffolds fasta file made by SPAdes')
parser.add_argument('paths', help='The paths file which corresponds to the contigs or scaffolds file')
parser.add_argument('output', help='The graph file made by this program')
parser.add_argument('-p', '--paths_out', action='store', help='Save the paths to this file (default: do not save paths)', default='')
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument('-c', '--connection_priority', action='store_true', help='Prioritise graph connections over segment length')
group.add_argument('-l', '--length_priority', action='store_true', help='Prioritise segment length over graph connections')
return parser.parse_args()
def save_graph_to_file(contigs, graph_filename):
print('Saving graph.......... ', end='')
sys.stdout.flush()
output_file = open(graph_filename, 'w')
for contig in contigs:
output_file.write(contig.get_header_with_links())
output_file.write(contig.get_sequence_with_line_breaks())
print('done')
def save_paths_to_file(contigs, paths_filename):
print('Saving paths.......... ', end='')
sys.stdout.flush()
output_paths_file = open(paths_filename, 'w')
for contig in contigs:
output_paths_file.write(contig.fullname + '\n')
output_paths_file.write(contig.path.get_paths_with_line_breaks())
print('done')
def check_for_blast():
makeblastdb_path = spawn.find_executable('makeblastdb')
blastn_path = spawn.find_executable('blastn')
blast_installed = (makeblastdb_path != None and blastn_path != None)
if not blast_installed:
print('Error: could not find BLAST program', file=sys.stderr)
quit()
def load_contigs(contig_filename):
print('Loading contigs....... ', end='')
sys.stdout.flush()
try:
contigs = load_contigs_2(contig_filename)
except Exception:
print('\nError: could not load ' + contig_filename, file=sys.stderr)
quit()
print('done')
return contigs
def load_contigs_2(contig_filename):
"""
This function takes a contig filename and returns a list of Contig objects.
It expects a file of just foward contigs, but it creates both forward and
reverse complement Contig objects.
"""
contigs = []
contig_file = open(contig_filename, 'r')
name = ''
sequence = ''
for line in contig_file:
line = line.strip()
if not line:
continue
if line[0] == '>': # Header line = start of new contig
if len(name) > 0: # If a contig is currently in memory...
contig = Contig(name, sequence)
contigs.append(contig)
contigs.append(make_reverse_complement_contig(contig))
name = ''
sequence = ''
name = line[1:]
else: # Not a header line = sequence
sequence += line
if len(name) > 0: # Save the last contig
contig = Contig(name, sequence)
contigs.append(contig)
contigs.append(make_reverse_complement_contig(contig))
return contigs
def load_graph_links(graph_filename):
print('Loading graph......... ', end='')
sys.stdout.flush()
try:
links = load_graph_links_2(graph_filename)
except Exception:
print('\nError: could not load ' + graph_filename, file=sys.stderr)
quit()
print('done')
return links
def load_graph_links_2(graph_filename):
"""
This function takes a graph filename and returns a dictionary of the graph
links.
The dictionary key is the starting graph segment.
The dictionary value is a list of the ending graph segments.
"""
links = {}
graph_file = open(graph_filename, 'r')
for line in graph_file:
line = line.strip()
if not line:
continue
if line[0] != '>':
continue
if line[-1] == ';':
line = line[:-1]
line_parts = line.split(':')
start = get_number_from_sequence_name(line_parts[0])
if start not in links:
links[start] = []
start_rev_comp = get_opposite_sequence_number(start)
if start_rev_comp not in links:
links[start] = []
if len(line_parts) < 2:
continue
ending_segments = line_parts[1].split(',')
ends = []
for ending_segment in ending_segments:
ends.append(get_number_from_sequence_name(ending_segment))
# Add the links to the dictionary in the forward direction.
for end in ends:
if end not in links[start]:
links[start].append(end)
# Add the links to the dictionary in the reverse direction.
for end in ends:
end_rev_comp = get_opposite_sequence_number(end)
if end_rev_comp not in links:
links[end_rev_comp] = []
if start_rev_comp not in links[end_rev_comp]:
links[end_rev_comp].append(start_rev_comp)
return links
def build_graph(contigs, paths, links):
"""
Add the paths to each contig object, so each contig knows its graph path,
and add the links to each contig object, turning the contigs into a
graph.
"""
print('Building graph........ ', end='')
sys.stdout.flush()
add_paths_to_contigs(contigs, paths)
add_links_to_contigs(contigs, links, True, False)
print('done')
def load_graph_sequences(graph_filename):
try:
segment_sequences, segment_depths = load_graph_sequences_2(graph_filename)
except Exception:
print('\nError: could not determine graph sequences and depths', file=sys.stderr)
quit()
return segment_sequences, segment_depths
def load_graph_sequences_2(graph_filename):
"""
This function takes a graph filename and returns two dictionaries:
Graph sequences:
The dictionary key is the graph segment names.
The dictionary value is the graph segment sequence string.
Graph depth:
The dictionary key is the graph segment names.
The dictionary value is the graph segment read depth (cov).
"""
sequences = {}
depths = {}
graph_file = open(graph_filename, 'r')
name = ''
sequence = ''
depth = 1.0
for line in graph_file:
line = line.strip()
if not line:
continue
# Header lines indicate the start of a new contig.
if line[0] == '>':
# If a sequence is currently stored, save it now.
if len(name) > 0:
sequences[name] = sequence
depths[name] = depth
name = ''
sequence = ''
depth = 1.0
if line[-1] == ';':
line = line[:-1]
line_parts = line.split(':')
name, depth = get_number_and_depth_from_sequence_name(line_parts[0])
# If not a header line, we assume this is a sequence line.
else:
sequence += line
# Save the last contig.
if len(name) > 0:
sequences[name] = sequence
depths[name] = depth
return sequences, depths
def load_paths(path_filename, links):
print('Loading paths......... ', end='')
sys.stdout.flush()
try:
paths = load_paths_2(path_filename, links)
except Exception:
print('\nError: could not load ' + path_filename, file=sys.stderr)
quit()
print('done')
return paths
def load_paths_2(path_filename, links):
"""
This function takes a path filename and returns a dictionary.
The dictionary key is the contig name.
The dictionary value is a Path object.
"""
paths = {}
path_file = open(path_filename, 'r')
contig_number = ''
path_segments = []
for line in path_file:
line = line.strip()
if not line:
continue
# Lines starting with 'NODE' are the start of a new path
if len(line) > 3 and line[0:4] == 'NODE':
# If a path is currently stored, save it now.
if len(contig_number) > 0:
paths[contig_number] = Path(path_segments)
contig_number = ''
path_segments = []
positive = line[-1] != "'"
line_parts = line.split('_')
contig_number = line_parts[1]
if positive:
contig_number += '+'
else:
contig_number += '-'
# If not a node name line, we assume this is a path line.
else:
path_line = line
# Replace a semicolon at the end of a line with a placeholder
# segment called 'gap_SEG' where SEG will be the name of the
# preceding segment.
if path_line[-1] == ';':
path_line = path_line[0:-1]
if contig_number.endswith('+'):
path_line += ',gap_POS'
else:
path_line += ',gap_NEG'
# Add the path to the path list
if len(path_line) > 0:
path_segments.extend(path_line.split(','))
# Save the last contig.
if len(contig_number) > 0:
paths[contig_number] = Path(path_segments)
# Now we go through the paths and rename our gap segments. Anything named
# gap_POS will get the name of the preceding path segment. Anything named
# gap_NEG will get the name of the following path segment.
for path in paths.itervalues():
for i in range(len(path.segment_list)):
segment = path.segment_list[i]
if segment == 'gap_POS':
path.segment_list[i] = 'gap_' + path.segment_list[i-1]
elif segment == 'gap_NEG':
path.segment_list[i] = 'gap_' + path.segment_list[i+1]
# Now we must go through all of the paths we just made and add any links
# for new gap segments.
for path in paths.itervalues():
for i in range(len(path.segment_list) - 1):
j = i + 1
s_1 = path.segment_list[i]
s_2 = path.segment_list[j]
if s_1.startswith('gap') or s_2.startswith('gap'):
if s_1 in links:
links[s_1].append(s_2)
else:
links[s_1] = [s_2]
return paths
def get_number_from_sequence_name(sequence_name):
name_parts = sequence_name.split('_')
number = name_parts[1]
if sequence_name[-1] == "'":
number += '-'
else:
number += '+'
return number
def get_number_and_depth_from_sequence_name(sequence_name):
name_parts = sequence_name.split('_')
number = name_parts[1]
if sequence_name[-1] == "'":
number += '-'
else:
number += '+'
depth_string = name_parts[5]
if depth_string[-1] == "'":
depth_string = depth_string[:-1]
depth = float(depth_string)
return number, depth
def get_opposite_sequence_number(number):
if number[-1] == '+':
return number[0:-1] + '-'
else:
return number[0:-1] + '+'
def add_paths_to_contigs(contigs, paths):
"""
This function adds the path information to the contig objects, so each contig
knows its graph path.
"""
for contig in contigs:
contig.path = paths[contig.get_number_with_sign()]
def add_links_to_contigs(contigs, links, clear, dead_ends_only):
"""
This function uses the contents of the contig paths to add link
information to the contigs.
"""
if clear:
for contig in contigs:
contig.outgoing_linked_contigs = []
contig.incoming_linked_contigs = []
# If we are only adding links for dead ends, then we make sets to easily
# tell if a contig has no connections.
if dead_ends_only:
no_outgoing_links = set()
no_incoming_links = set()
for contig in contigs:
if not contig.outgoing_linked_contigs:
no_outgoing_links.add(contig)
if not contig.incoming_linked_contigs:
no_incoming_links.add(contig)
# For each contig, we take the last graph segment, find the segments that
# it leads to, and then find the contigs which start with that next
# segment. These make up the links to the current contig.
for contig_1 in contigs:
ending_segment = contig_1.path.get_last_segment()
following_segments = links[ending_segment]
for following_segment in following_segments:
for contig_2 in contigs:
starting_segment = contig_2.path.get_first_segment()
if following_segment == starting_segment:
if dead_ends_only and contig_1 not in no_outgoing_links and contig_2 not in no_incoming_links:
continue
contig_1.outgoing_linked_contigs.append(contig_2)
contig_2.incoming_linked_contigs.append(contig_1)
# This process can create duplicate linked contigs, so we now go through
# them to remove the duplicates.
for contig in contigs:
contig.outgoing_linked_contigs = list(set(contig.outgoing_linked_contigs))
contig.incoming_linked_contigs = list(set(contig.incoming_linked_contigs))
def make_reverse_complement_contig(contig):
"""
This function takes a contig and returns its reverse complement contig.
"""
rev_comp_contig_fullname = contig.fullname
# Add or remove the ' as necessary
if rev_comp_contig_fullname[-1] == "'":
rev_comp_contig_fullname = rev_comp_contig_fullname[0:-1]
else:
rev_comp_contig_fullname = rev_comp_contig_fullname + "'"
rev_comp_sequence = make_reverse_complement_sequence(contig.sequence)
rev_comp_contig = Contig(rev_comp_contig_fullname, rev_comp_sequence)
rev_comp_contig.path = make_reverse_complement_path(contig.path)
return rev_comp_contig
def make_reverse_complement_path(path):
rev_segment_list = []
for segment in reversed(path.segment_list):
rev_segment_list.append(get_opposite_sequence_number(segment))
return Path(rev_segment_list)
def make_reverse_complement_sequence(forward_sequence):
rev_comp = ''
for i in reversed(range(len(forward_sequence))):
base = forward_sequence[i]
if base == 'A': rev_comp += 'T'
elif base == 'T': rev_comp += 'A'
elif base == 'G': rev_comp += 'C'
elif base == 'C': rev_comp += 'G'
elif base == 'a': rev_comp += 't'
elif base == 't': rev_comp += 'a'
elif base == 'g': rev_comp += 'c'
elif base == 'c': rev_comp += 'g'
elif base == 'R': rev_comp += 'Y'
elif base == 'Y': rev_comp += 'R'
elif base == 'S': rev_comp += 'S'
elif base == 'W': rev_comp += 'W'
elif base == 'K': rev_comp += 'M'
elif base == 'M': rev_comp += 'K'
elif base == 'r': rev_comp += 'y'
elif base == 'y': rev_comp += 'r'
elif base == 's': rev_comp += 's'
elif base == 'w': rev_comp += 'w'
elif base == 'k': rev_comp += 'm'
elif base == 'm': rev_comp += 'k'
elif base == 'B': rev_comp += 'V'
elif base == 'D': rev_comp += 'H'
elif base == 'H': rev_comp += 'D'
elif base == 'V': rev_comp += 'B'
elif base == 'b': rev_comp += 'v'
elif base == 'd': rev_comp += 'h'
elif base == 'h': rev_comp += 'd'
elif base == 'v': rev_comp += 'b'
elif base == 'N': rev_comp += 'N'
elif base == 'n': rev_comp += 'n'
elif base == '.': rev_comp += '.'
elif base == '-': rev_comp += '-'
elif base == '?': rev_comp += '?'
else: rev_comp += 'N'
return rev_comp
def split_contigs(contigs, links, segment_sequences, graph_overlap):
print('Splitting contigs..... ', end='')
sys.stdout.flush()
contigs = split_contigs_2(contigs, links, segment_sequences, graph_overlap)
add_links_to_contigs(contigs, links, False, True)
print('done')
return contigs
def split_contigs_2(contigs, links, segment_sequences, graph_overlap):
"""
This function splits contigs as necessary to maintain all graph connections.
Specifically, it looks for graph segments which are connected to the end of
one contig and occur in the middle of a second contig. In such cases, the
second contig is split to allow for the connection.
"""
# Find all missing links. A missing link is defined as a link in the
# assembly graph which is not represented somewhere in the contig graph.
# 'Represented somewhere' includes both within a contig and between two
# linked contigs.
links_in_contigs = set()
for contig in contigs:
links_in_contig = contig.get_links_in_this_contig_and_to_other_contigs()
for link in links_in_contig:
links_in_contigs.add(link)
all_graph_links = set()
for start, ends in links.iteritems():
for end in ends:
all_graph_links.add((start, end))
missing_links = []
for graph_link in all_graph_links:
if graph_link not in links_in_contigs:
missing_links.append(graph_link)
# In order for these links to be present in the graph, we need to split
# contigs such that the start segments of missing links are on the ends
# of contigs and the end segments of missing links are on the starts of
# contigs.
segments_which_must_be_on_contig_ends = []
segments_which_must_be_on_contig_starts = []
for missing_link in missing_links:
segments_which_must_be_on_contig_ends.append(missing_link[0])
segments_which_must_be_on_contig_starts.append(missing_link[1])
# Create a reverse links dictionary.
reverse_links = {}
for start, ends in links.iteritems():
for end in ends:
if end not in reverse_links:
reverse_links[end] = []
reverse_links[end].append(start)
# Compile lists of all segments which reside on contigs dead ends.
dead_end_end_segments = []
dead_end_start_segments = []
for contig in contigs:
if not contig.outgoing_linked_contigs:
dead_end_end = contig.path.get_last_segment()
dead_end_end_segments.append(dead_end_end)
dead_end_start_segments.append(get_opposite_sequence_number(dead_end_end))
# Find all graph segments which are connected to these dead end segments.
# These will need to be on contig ends, to allow for these connections.
segments_which_must_be_on_contig_ends = []
for segment in dead_end_start_segments:
if segment in reverse_links:
segments_which_must_be_on_contig_ends.extend(reverse_links[segment])
segments_which_must_be_on_contig_starts = []
for segment in dead_end_end_segments:
if segment in links:
segments_which_must_be_on_contig_starts.extend(links[segment])
# Remove any duplicates from the segments lists just made.
segments_which_must_be_on_contig_starts = list(set(segments_which_must_be_on_contig_starts))
segments_which_must_be_on_contig_ends = list(set(segments_which_must_be_on_contig_ends))
# Before we split the contigs, we need to remember all of the links present
# so they can be remade.
links_before_splits = {}
for contig in contigs:
start = contig.get_number_with_sign()
ends = []
for outgoing_linked_contig in contig.outgoing_linked_contigs:
ends.append(outgoing_linked_contig.get_number_with_sign())
links_before_splits[start] = ends
# Now split contigs, as necessary.
new_positive_contigs = []
next_contig_number = 1
old_nums_to_new_nums = {}
for contig in contigs:
# We only split positive contigs and will make the negative complement
# after we are done.
if not contig.is_positive():
continue
# We need to keep track of the mapping from old contig numbers to new
# numbers, as this will be needed for reconnecting contigs.
contig_number = contig.number
old_nums_to_new_nums[contig_number] = []
# This will contain the locations at which the contig must be split.
# It is a list of integers which are indices for path segments that
# must be at the start of the contig.
split_points = []
for segment in segments_which_must_be_on_contig_starts:
split_points.extend(contig.find_segment_locations(segment))
for segment in segments_which_must_be_on_contig_ends:
split_points.extend(contig.find_segment_locations_plus_one(segment))
# Remove duplicates and sort split points.
split_points = sorted(list(set(split_points)))
# If the first split point is zero, then remove it, as there is no need
# to split a contig at its start.
if split_points and split_points[0] == 0:
split_points = split_points[1:]
# If the last split point is one past the end, then remove it.
if split_points and split_points[-1] == contig.path.get_segment_count():
split_points = split_points[:-1]
# If there are splits to be done, then we make the new contigs!
if split_points:
contig.determine_all_segment_locations(segment_sequences, graph_overlap)
for split_point in reversed(split_points):
contig_part_1, contig_part_2 = split_contig(contig, split_point, next_contig_number)
# If the split was successful, then both contig_part_1 and
# contig_part_2 are new Contig objects. If unsuccessful, then
# they are None.
if contig_part_1 is not None:
new_positive_contigs.append(contig_part_2)
old_nums_to_new_nums[contig_number] = [contig_part_2.number] + old_nums_to_new_nums[contig_number]
contig = contig_part_1
next_contig_number += 1
new_positive_contigs.append(contig)
# If there weren't any split points, then we don't have to split the
# contig.
else:
new_positive_contigs.append(contig)
# At this point, the last contig added will have a number of 0, so we
# need to renumber it.
new_positive_contigs[-1].renumber(next_contig_number)
next_contig_number += 1
old_nums_to_new_nums[contig_number] = [new_positive_contigs[-1].number] + old_nums_to_new_nums[contig_number]
# Now we make the reverse complements for all of our new contigs.
new_contigs = []
for contig in new_positive_contigs:
new_contigs.append(contig)
new_contigs.append(make_reverse_complement_contig(contig))
# Now we have to put together the links in new contig numbers. First we
# Create the internal links between parts of a split contig.
links_after_splits = {}
for new_num in old_nums_to_new_nums.itervalues():
for i in range(len(new_num) - 1):
start = str(new_num[i]) + '+'
end = str(new_num[i+1]) + '+'
links_after_splits[start] = [end]
start = str(new_num[i+1]) + '-'
end = str(new_num[i]) + '-'
links_after_splits[start] = [end]
# Add the external links between contigs. To do this we need to
# translate old contig numbers to new contig numbers.
for start, ends in links_before_splits.iteritems():
start_sign = start[-1]
start_num = int(start[:-1])
if start_sign == '+':
new_start_num = old_nums_to_new_nums[start_num][-1]
else:
new_start_num = old_nums_to_new_nums[start_num][0]
new_start = str(new_start_num) + start_sign
new_ends = []
for end in ends:
end_sign = end[-1]
end_num = int(end[:-1])
if end_sign == '+':
new_end_num = old_nums_to_new_nums[end_num][0]
else:
new_end_num = old_nums_to_new_nums[end_num][-1]
new_end = str(new_end_num) + end_sign
new_ends.append(new_end)
links_after_splits[new_start] = new_ends
# Also make the links in reverse direction.
reverse_links_after_splits = {}
for start, ends in links_after_splits.iteritems():
for end in ends:
if end in reverse_links_after_splits:
reverse_links_after_splits[end].append(start)
else:
reverse_links_after_splits[end] = [start]
# Now we add the links back to our new contigs.
new_contig_dict = {}
for contig in new_contigs:
new_contig_dict[contig.get_number_with_sign()] = contig
for contig in new_contigs:
contig.incoming_linked_contigs = []
contig.outgoing_linked_contigs = []
contig_num = contig.get_number_with_sign()
if contig_num in links_after_splits:
for outgoing_num in links_after_splits[contig_num]:
contig.outgoing_linked_contigs.append(new_contig_dict[outgoing_num])
if contig_num in reverse_links_after_splits:
for incoming_num in reverse_links_after_splits[contig_num]:
contig.incoming_linked_contigs.append(new_contig_dict[incoming_num])
return new_contigs
def split_contig(contig, split_point, next_contig_number):
"""
This function takes a contig and returns two contigs, split at the split
point. The split point is an index for the segment for the segment in the
contig's path.
"""
# Determine the new contig paths.
new_contig_1_path = Path(contig.path.segment_list[:split_point])
new_contig_2_path = Path(contig.path.segment_list[split_point:])
# Get the coordinates for the new contig paths.
new_contig_1_path_coordinates = contig.path.contig_coordinates[:split_point]
new_contig_2_path_coordinates = contig.path.contig_coordinates[split_point:]
new_contig_1_first_segment_coordinates = new_contig_1_path_coordinates[0]
new_contig_1_last_segment_coordinates = new_contig_1_path_coordinates[-1]
new_contig_2_first_segment_coordinates = new_contig_2_path_coordinates[0]
new_contig_2_last_segment_coordinates = new_contig_2_path_coordinates[-1]
# Check to see if any of the important coordinates are absent, as will be
# the case if this program was unable to find the segment in the contig.
# In this case, the split fails.
if new_contig_1_first_segment_coordinates[0] is None or \
new_contig_1_last_segment_coordinates[1] is None or \
new_contig_2_first_segment_coordinates[0] is None or \
new_contig_2_last_segment_coordinates[1] is None:
return None, None
# Determine exact coordinate for the new segments.
# The indices are bit confusing here, as the contig coordinates are 1-based
# with an inclusive end (because that's how BLAST does it). To get to
# 0-based and exclusive end (for Python), we subtract one from the start.
new_contig_1_seq_start = new_contig_1_first_segment_coordinates[0] - 1
new_contig_1_seq_end = new_contig_1_last_segment_coordinates[1]
new_contig_1_sequence = contig.sequence[new_contig_1_seq_start:new_contig_1_seq_end]
new_contig_2_seq_start = new_contig_2_first_segment_coordinates[0] - 1
new_contig_2_seq_end = new_contig_2_last_segment_coordinates[1]
new_contig_2_sequence = contig.sequence[new_contig_2_seq_start:new_contig_2_seq_end]
# Give the next contig number to the second piece, as the first one may
# have to be split further. It will be renumbered, if necessary, later.
new_contig_1_name = 'NODE_0_length_' + str(len(new_contig_1_sequence)) + '_cov_' + str(contig.cov)
new_contig_2_name = 'NODE_' + str(next_contig_number) + '_length_' + str(len(new_contig_2_sequence)) + '_cov_' + str(contig.cov)
# The first contig is the one that will potentially be split again, so it
# still needs to have contig coordinates in its path.
new_contig_1 = Contig(new_contig_1_name, new_contig_1_sequence)
new_contig_1.path = new_contig_1_path
new_contig_1.path.contig_coordinates = new_contig_1_path_coordinates
new_contig_2 = Contig(new_contig_2_name, new_contig_2_sequence)
new_contig_2.path = new_contig_2_path
return new_contig_1, new_contig_2
def does_overlap_work(s_1, s_2, overlap):
"""
Test a single overlap between two sequences.
"""
return s_1[-overlap:] == s_2[:overlap]
def get_possible_overlaps(s_1, s_2, possible_overlaps):
"""
Try the possible overlaps in the given list and return those that work.
"""
new_possible_overlaps = []
for possible_overlap in possible_overlaps:
if does_overlap_work(s_1, s_2, possible_overlap):
new_possible_overlaps.append(possible_overlap)
return new_possible_overlaps
def get_graph_overlap(links, segment_sequences):
"""
Figure out the graph overlap size.
"""
if not links:
return 0
# Determine the shortest segment in the graph, as this will be the maximum
# possible overlap.
segment_lengths = []
for sequence in segment_sequences.itervalues():
segment_lengths.append(len(sequence))
shortest_segment_sequence = min(segment_lengths)
# Now we loop through each overlap looking at the segment pairs.
possible_overlaps = range(1, shortest_segment_sequence + 1)
for start, ends in links.iteritems():
if start.startswith('gap'):
continue
s_1 = segment_sequences[start]
for ending_segment in ends:
if ending_segment.startswith('gap'):
continue
s_2 = segment_sequences[ending_segment]
possible_overlaps = get_possible_overlaps(s_1, s_2, possible_overlaps)
# If no overlaps work, then we return 0.
# This shouldn't happen, as every SPAdes graph should have overlaps.
if len(possible_overlaps) == 0:
return 0
# If only one overlap works, then that's our answer!
if len(possible_overlaps) == 1:
return possible_overlaps[0]
# If the code gets here, that means we have tried every segment pair and
# there are still multiple possible overlaps. This shouldn't happen in
# anything but tiny graphs or seriously pathological cases.
print('Error: failed to correctly determine graph overlap', file=sys.stderr)
return 0
def save_sequence_to_fasta_file(sequence, sequence_name, filename):
fasta_file = open(filename, 'w')
fasta_file.write('>' + sequence_name)
fasta_file.write('\n')
fasta_file.write(sequence)
fasta_file.write('\n')
def merge_contigs(contigs, graph_overlap):
print('Merging contigs....... ', end='')
sys.stdout.flush()
contigs = merge_identical_contigs(contigs)
contigs = merge_linear_runs(contigs, graph_overlap)
print('done')
return contigs
def merge_identical_contigs(contigs):
"""
Find contigs which are made of the exact same graph segments as each other
and merge them.
"""
contig_groups = []
# Group contigs into collections with the exact same segment list.
for contig in contigs:
for contig_group in contig_groups:
if contig.path.segment_list == contig_group[0].path.segment_list:
contig_group.append(contig)
break
else:
contig_groups.append([contig])
# For the first contig in each group, give it the linked contigs of its
# entire group.
old_num_to_new_contig = {}
new_contigs = []
for contig_group in contig_groups:
all_incoming_linked_contig_numbers = set()
all_outgoing_linked_contig_numbers = set()
first_contig_in_group = contig_group[0]
for contig in contig_group:
for incoming_linked_contig in contig.incoming_linked_contigs:
all_incoming_linked_contig_numbers.add(incoming_linked_contig.get_number_with_sign())
for outgoing_linked_contig in contig.outgoing_linked_contigs:
all_outgoing_linked_contig_numbers.add(outgoing_linked_contig.get_number_with_sign())
old_num_to_new_contig[contig.get_number_with_sign()] = first_contig_in_group
first_contig_in_group.incoming_linked_contigs = list(all_incoming_linked_contig_numbers)
first_contig_in_group.outgoing_linked_contigs = list(all_outgoing_linked_contig_numbers)
new_contigs.append(first_contig_in_group)
# Now for each of the new contigs, we must convert the linked contig lists
# from numbers to actual contigs.
for contig in new_contigs:
new_incoming_linked_contigs = set()
new_outgoing_linked_contigs = set()
for incoming_linked_contig_old_number in contig.incoming_linked_contigs:
new_incoming_linked_contigs.add(old_num_to_new_contig[incoming_linked_contig_old_number])
for outgoing_linked_contig_old_number in contig.outgoing_linked_contigs:
new_outgoing_linked_contigs.add(old_num_to_new_contig[outgoing_linked_contig_old_number])
contig.incoming_linked_contigs = list(new_incoming_linked_contigs)
contig.outgoing_linked_contigs = list(new_outgoing_linked_contigs)
return new_contigs
def merge_linear_runs(contigs, graph_overlap):
"""
Find and merge simple linear runs of contigs.
"""
merge_happened = True
while merge_happened:
contig_dict = {}
for contig in contigs:
contig_dict[contig.get_number_with_sign()] = contig
for contig in contigs:
# Make sure that this contig has one downstream contig and that
# only has one upstream contig.
if len(contig.outgoing_linked_contigs) != 1:
continue
next_contig = contig.outgoing_linked_contigs[0]
if len(next_contig.incoming_linked_contigs) != 1 or \
next_contig.incoming_linked_contigs[0] != contig:
continue
# Make sure this contig isn't just looping back to itself.
if contig == next_contig:
continue
# Make sure that the reverse complement contigs are also properly
# simple and linear.
rev_comp_contig = contig_dict[get_opposite_sequence_number(contig.get_number_with_sign())]
rev_comp_next_contig = contig_dict[get_opposite_sequence_number(next_contig.get_number_with_sign())]
if len(rev_comp_next_contig.outgoing_linked_contigs) != 1 or \
len(rev_comp_contig.incoming_linked_contigs) != 1 or \
rev_comp_next_contig.outgoing_linked_contigs[0] != rev_comp_contig or \
rev_comp_contig.incoming_linked_contigs[0] != rev_comp_next_contig:
continue
# Make sure that the contig is not simply looping back onto its own
# reverse complement.
if next_contig == rev_comp_contig or contig == rev_comp_next_contig:
continue
# If the code got here, then we can merge contig and next_contig
# (and their reverse complements).
contigs = merge_two_contigs(contigs, graph_overlap, contig, next_contig, rev_comp_contig, rev_comp_next_contig)
break
else:
merge_happened = False
return contigs
def merge_two_contigs(all_contigs, graph_overlap, contig_1, contig_2, contig_1_rev_comp, contig_2_rev_comp):
largest_contig_number = 0
for contig in all_contigs:
contig_num = contig.number
if contig_num > largest_contig_number:
largest_contig_number = contig_num
merged_contig_num = largest_contig_number + 1
merged_contig_sequence = contig_1.sequence[:-graph_overlap] + contig_2.sequence
merged_contig_sequence_rev_comp = contig_2_rev_comp.sequence + contig_1_rev_comp.sequence[graph_overlap:]
merged_contig_name = "NODE_" + str(merged_contig_num) + "_length_" + str(len(merged_contig_sequence)) + "_cov_1.0"
merged_contig_name_rev_comp = "NODE_" + str(merged_contig_num) + "_length_" + str(len(merged_contig_sequence_rev_comp)) + "_cov_1.0'"
merged_contig = Contig(merged_contig_name, merged_contig_sequence)
merged_contig_rev_comp = Contig(merged_contig_name_rev_comp, merged_contig_sequence_rev_comp)
# Copy the connections over to the new merged contigs.
merged_contig.incoming_linked_contigs = contig_1.incoming_linked_contigs
merged_contig.outgoing_linked_contigs = contig_2.outgoing_linked_contigs
merged_contig_rev_comp.incoming_linked_contigs = contig_2_rev_comp.incoming_linked_contigs
merged_contig_rev_comp.outgoing_linked_contigs = contig_1_rev_comp.outgoing_linked_contigs
# Copy the path segments over to the new merged contigs.
merged_contig.path.segment_list = contig_1.path.segment_list + contig_2.path.segment_list
merged_contig_rev_comp.path.segment_list = contig_2_rev_comp.path.segment_list + contig_1_rev_comp.path.segment_list
# Build a new list of contigs, and while we're looping through, we can fix
# up any links to the merged contig.
new_contigs = []
for contig in all_contigs:
if contig == contig_1 or contig == contig_2 or contig == contig_1_rev_comp or contig == contig_2_rev_comp:
continue
new_incoming_linked_contigs = []
for incoming_linked_contig in contig.incoming_linked_contigs:
if incoming_linked_contig == contig_2:
new_incoming_linked_contigs.append(merged_contig)
elif incoming_linked_contig == contig_1_rev_comp:
new_incoming_linked_contigs.append(merged_contig_rev_comp)
else:
new_incoming_linked_contigs.append(incoming_linked_contig)
contig.incoming_linked_contigs = new_incoming_linked_contigs
new_outgoing_linked_contigs = []
for outgoing_linked_contig in contig.outgoing_linked_contigs:
if outgoing_linked_contig == contig_1:
new_outgoing_linked_contigs.append(merged_contig)
elif outgoing_linked_contig == contig_2_rev_comp:
new_outgoing_linked_contigs.append(merged_contig_rev_comp)
else:
new_outgoing_linked_contigs.append(outgoing_linked_contig)
contig.outgoing_linked_contigs = new_outgoing_linked_contigs
new_contigs.append(contig)
new_contigs.append(merged_contig)
new_contigs.append(merged_contig_rev_comp)
return new_contigs
def recalculate_contig_depths(contigs, sequences, depths, graph_overlap):
"""
This function recalculates contig depths using the depths of the graph
segments which make up the contigs.
For this function I tried to copy what SPAdes does: it seems to define a
contig's depth as the weighted average of the graph segments in the contig.
The weight for the average is the segment length minus the overlap.
Notably, SPAdes does not seem to divide up the available depth for a
segment which appears in multiple places in the contigs, so I don't do that
either.
"""
print('Calculating depth..... ', end='')
sys.stdout.flush()
for contig in contigs:
total_length = 0
total_depth_times_length = 0.0
for segment in contig.path.segment_list:
if segment.startswith('gap'):
continue
# Get the segment depth and length. In some odd cases, SPAdes does
# not save both segments in a complementary pair, so we may have to
# look for the complementary segment.
if segment in depths:
depth = depths[segment]
else:
depth = depths[get_opposite_sequence_number(segment)]
adjusted_depth = depth
if segment in sequences:
length = len(sequences[segment])
else:
length = len(sequences[get_opposite_sequence_number(segment)])
adjusted_length = length - graph_overlap
total_length += adjusted_length
total_depth_times_length += adjusted_depth * adjusted_length
if total_length > 0:
final_depth = total_depth_times_length / total_length
contig.cov = final_depth
contig.rebuild_full_name()
print('done')
def renumber_contigs(contigs):
print('Renumbering contigs... ', end='')
sys.stdout.flush()
# Sort from contigs big to small, so contig 1 is the largest.
positive_contigs = []
for contig in contigs:
if contig.is_positive():
positive_contigs.append(contig)
sorted_contigs = sorted(positive_contigs, key=lambda contig: len(contig.sequence), reverse=True)
# Create the new number mapping.
old_nums_to_new_nums = {}
i = 1
for contig in sorted_contigs:
old_nums_to_new_nums[contig.number] = i
i += 1
# Assign new numbers and sort using them.
for contig in contigs:
contig.renumber(old_nums_to_new_nums[contig.number])
contigs.sort(key=lambda contig: (contig.number, contig.get_sign()))
print('done')
def get_best_blast_alignment(blast_alignments, segment_length, expected_reference_start):
"""
Find and return the best of the given BLAST alignments. 'Best' is defined
as covering the entirety of the alignment, being high identity and having
an appropriate start position.
"""
if not blast_alignments:
return None
# Sort by alignment length and identity.
sorted_alignments = sorted(blast_alignments, key=lambda alignment: (alignment.get_query_length(), alignment.percent_identity), reverse=True)
# If we have an expected reference start to work with, then we find the
# first alignment in the list which occurs very near the expected reference
# start.
if expected_reference_start is not None:
for alignment in sorted_alignments:
discrepancy = abs(alignment.get_query_start_in_reference() - expected_reference_start)
if discrepancy < 5:
return alignment
# If we don't have an expected reference start to work with, then we are
# more limited. We can only give a result if there is a single full length
# alignment.
else:
full_length_alignments = []
for alignment in sorted_alignments:
fraction_present = alignment.get_query_length() / segment_length
if fraction_present == 1.0:
full_length_alignments.append(alignment)
if len(full_length_alignments) == 1:
return full_length_alignments[0]
# If the code got here, then no good match was found.
return None
# This class holds a contig: its name, sequence and length.
class Contig:
def __init__(self, name, sequence):
self.fullname = name
name_parts = name.split('_')
self.number = int(name_parts[1])
cov_string = name_parts[5]
if cov_string[-1] == "'":
cov_string = cov_string[:-1]
self.cov = float(cov_string)
self.sequence = sequence
self.outgoing_linked_contigs = []
self.incoming_linked_contigs = []
self.path = Path()
def __str__(self):
return self.fullname
def __repr__(self):
return self.fullname
def renumber(self, new_number):
self.number = new_number
self.rebuild_full_name()
def rebuild_full_name(self):
"""
Remake the contig's full name using its current number, sequence and
depth.
"""
positive = self.is_positive()
self.fullname = 'NODE_' + str(self.number) + '_length_' + str(len(self.sequence)) + '_cov_' + str(self.cov)
if not positive:
self.fullname += "'"
def get_number_with_sign(self):
"""
Return the contig number in string form with a + or - at the end.
"""
num = str(self.number)
if self.is_positive():
num += '+'
else:
num += '-'
return num
def get_sign(self):
if self.is_positive():
return '+'
else:
return '-'
def is_positive(self):
return self.fullname[-1] != "'"
def get_header_with_links(self):
"""
Produce a FASTG header for the contig, with links and a line break at
the end.
"""
header_with_edges = '>' + self.fullname
if len(self.outgoing_linked_contigs) > 0:
header_with_edges += ':'
for linked_contig in self.outgoing_linked_contigs:
header_with_edges += linked_contig.fullname + ','
header_with_edges = header_with_edges[0:-1]
header_with_edges += ';\n'
return header_with_edges
def get_sequence_with_line_breaks(self):
sequence_remaining = self.sequence
sequence_with_line_breaks = ''
while len(sequence_remaining) > 60:
sequence_with_line_breaks += sequence_remaining[0:60] + '\n'
sequence_remaining = sequence_remaining[60:]
sequence_with_line_breaks += sequence_remaining + '\n'
return sequence_with_line_breaks
def find_segment_locations(self, segment):
return self.path.find_segment_locations(segment)
def find_segment_locations_plus_one(self, segment):
segment_locations = self.path.find_segment_locations(segment)
return [x+1 for x in segment_locations]
def determine_all_segment_locations(self, segment_sequences, graph_overlap):
"""
Determine the start and end coordinates of each segment in the contig.
This information is necessary before the contig can be split.
"""
# Create a temporary directory for doing BLAST searches.
if not os.path.exists('spades_contig_graph-temp'):
os.makedirs('spades_contig_graph-temp')
save_sequence_to_fasta_file(self.sequence, self.fullname, 'spades_contig_graph-temp/contig.fasta')
# Create a BLAST database for the contig.
makeblastdb_command = ['makeblastdb', '-dbtype', 'nucl', '-in', 'spades_contig_graph-temp/contig.fasta']
process = subprocess.Popen(makeblastdb_command, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = process.communicate()
# Check that makeblastdb ran okay
if len(err) > 0:
print('\nmakeblastdb encountered an error:', file=sys.stderr)
print(err, file=sys.stderr)
quit()
# This value tracks where we expect the next segment to start, in
# contig coordinates. When it is set to None, that means we don't
# know.
expected_segment_start_in_contig = 1
for i in range(len(self.path.segment_list)):
segment = self.path.segment_list[i]
# Don't deal with assembly gaps just yet - we'll give them contig
# start/end coordinates after we've finished with the real
# segments.
if segment.startswith('gap'):
expected_segment_start_in_contig = None
continue
segment_sequence = segment_sequences[segment]
segment_length = len(segment_sequence)
save_sequence_to_fasta_file(segment_sequence, segment, 'spades_contig_graph-temp/segment.fasta')
# BLAST for the segment in the contig
sys.stdout.flush()
blastn_command = ['blastn', '-task', 'blastn', '-db', 'spades_contig_graph-temp/contig.fasta', '-query', 'spades_contig_graph-temp/segment.fasta', '-outfmt', '6 length pident sstart send qstart qend']
process = subprocess.Popen(blastn_command, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = process.communicate()
# Check that blastn ran okay
if len(err) > 0:
print('\nblastn encountered an error:', file=sys.stderr)
print(err, file=sys.stderr)
quit()
# Save the alignments in Python objects.
alignment_strings = out.splitlines()
blast_alignments = []
for alignment_string in alignment_strings:
alignment = BlastAlignment(alignment_string, segment_length)
blast_alignments.append(alignment)
# Get the alignment (if any) that best matches the segment sequence
# and position.
best_alignment = get_best_blast_alignment(blast_alignments, segment_length, expected_segment_start_in_contig)
# If we found an alignment, use it to determine the query's start
# and end coordinates in the contig.
if best_alignment is not None:
segment_start_in_contig = best_alignment.get_query_start_in_reference()
segment_end_in_contig = best_alignment.get_query_end_in_reference()
# If we failed to find an alignment, then we don't have segment
# coordinates and will be unable to split the contig at this point.
else:
segment_start_in_contig = None
segment_end_in_contig = None
self.path.contig_coordinates[i] = (segment_start_in_contig, segment_end_in_contig)
# Update the expected segment start for the next segment.
# Hopefully we can use the alignment to do this precisely.
if best_alignment is not None:
expected_segment_start_in_contig = (best_alignment.reference_end - graph_overlap + 1)
# If there is no alignment with which we can predict the next
# segment start, we can guess using the segment length.
elif expected_segment_start_in_contig is not None:
expected_segment_start_in_contig += (segment_length - graph_overlap)
os.remove('spades_contig_graph-temp/segment.fasta')
shutil.rmtree('spades_contig_graph-temp')
# Now we have to go back and assign contig start/end positions for any
# gap segments.
segment_count = len(self.path.segment_list)
for i in range(segment_count):
segment = self.path.segment_list[i]
if not segment.startswith('gap'):
continue
gap_start_in_contig = 1
gap_end_in_contig = len(self.sequence)
if i > 0:
previous_segment_end = self.path.contig_coordinates[i - 1][1]
if previous_segment_end is not None:
gap_start_in_contig = previous_segment_end - graph_overlap + 1
if gap_start_in_contig < 1:
gap_start_in_contig = 1
else:
gap_start_in_contig = None
if i < segment_count - 1:
next_segment_start = self.path.contig_coordinates[i + 1][0]
if next_segment_start is not None:
gap_end_in_contig = next_segment_start + graph_overlap - 1
if gap_end_in_contig > len(self.sequence):
gap_end_in_contig = len(self.sequence)
else:
gap_end_in_contig = None
self.path.contig_coordinates[i] = (gap_start_in_contig, gap_end_in_contig)
def get_links_to_other_contigs(self):
"""
Return a list of the links from this contig to any other contig.
"""
links_to_other_contigs = []
for outgoing_linked_contig in self.outgoing_linked_contigs:
links_to_other_contigs.append((self.path.get_last_segment(), outgoing_linked_contig.path.get_first_segment()))
for incoming_linked_contig in self.incoming_linked_contigs:
links_to_other_contigs.append((incoming_linked_contig.path.get_last_segment(), self.path.get_first_segment()))
return list(set(links_to_other_contigs))
def get_links_in_this_contig_and_to_other_contigs(self):
links = self.path.get_all_links()
links.extend(self.get_links_to_other_contigs())
return list(set(links))
# This class holds a path: the lists of graph segments making up a contig.
class Path:
def __init__(self, segment_list=[]):
self.segment_list = segment_list
self.contig_coordinates = [(0, 0) for i in range(len(segment_list))]
def get_first_segment(self):
return self.segment_list[0]
def get_last_segment(self):
return self.segment_list[-1]
def __str__(self):
return str(self.segment_list) + ', ' + str(self.contig_coordinates)
def __repr__(self):
return str(self.segment_list) + ', ' + str(self.contig_coordinates)
def get_segment_count(self):
return len(self.segment_list)
def find_segment_locations(self, segment):
locations = []
for i in range(len(self.segment_list)):
if segment == self.segment_list[i]:
locations.append(i)
return locations
def get_paths_with_line_breaks(self):
output = ''
for segment in self.segment_list:
if segment.startswith('gap'):
output += ';\n'
else:
output += segment + ','
return output[:-1] + '\n'
# This function returns a list of tuple for each link in the path.
def get_all_links(self):
links = []
for i in range(len(self.segment_list) - 1):
s_1 = self.segment_list[i]
s_2 = self.segment_list[i + 1]
links.append((s_1, s_2))
return links
class BlastAlignment:
def __init__(self, blast_string, query_length):
blast_string_parts = blast_string.split('\t')
self.percent_identity = float(blast_string_parts[1])
self.reference_start = int(blast_string_parts[2])
self.reference_end = int(blast_string_parts[3])
self.query_start = int(blast_string_parts[4])
self.query_end = int(blast_string_parts[5])
self.query_length = query_length
def get_query_length(self):
return self.query_end - self.query_start + 1
def get_query_start_in_reference(self):
query_missing_bases_at_start = self.query_start - 1
return self.reference_start - query_missing_bases_at_start
def get_query_end_in_reference(self):
query_missing_bases_at_end = self.query_length - self.query_end
return self.reference_end + query_missing_bases_at_end
def __lt__(self, other):
return self.get_query_start_in_reference() < other.get_query_start_in_reference()
def __str__(self):
return 'reference: ' + str(self.reference_start) + ' to ' + str(self.reference_end) + \
', query: ' + str(self.query_start) + ' to ' + str(self.query_end) + \
', identity: ' + str(self.percent_identity) + '%'
def __repr__(self):
return 'reference: ' + str(self.reference_start) + ' to ' + str(self.reference_end) + \
', query: ' + str(self.query_start) + ' to ' + str(self.query_end) + \
', identity: ' + str(self.percent_identity) + '%'
# Standard boilerplate to call the main() function to begin the program.
if __name__ == '__main__':
main()
|
rrwick/SPAdes-Contig-Graph
|
spades_contig_graph.py
|
Python
|
gpl-3.0
| 56,862
|
[
"BLAST"
] |
a85d537680c7214d07508164fe044abdb2f6d8cd7b0b67285690b8bf983b7826
|
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Deleting model 'GeoMapping'
db.delete_table('profiles_geomapping')
# Removing M2M table for field to_record on 'GeoMapping'
db.delete_table('profiles_geomapping_to_record')
# Adding M2M table for field mappings on 'GeoRecord'
db.create_table('profiles_georecord_mappings', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('from_georecord', models.ForeignKey(orm['profiles.georecord'], null=False)),
('to_georecord', models.ForeignKey(orm['profiles.georecord'], null=False))
))
db.create_unique('profiles_georecord_mappings', ['from_georecord_id', 'to_georecord_id'])
def backwards(self, orm):
# Adding model 'GeoMapping'
db.create_table('profiles_geomapping', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('from_record', self.gf('django.db.models.fields.related.ForeignKey')(related_name='mappings_as_from', to=orm['profiles.GeoRecord'])),
))
db.send_create_signal('profiles', ['GeoMapping'])
# Adding M2M table for field to_record on 'GeoMapping'
db.create_table('profiles_geomapping_to_record', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('geomapping', models.ForeignKey(orm['profiles.geomapping'], null=False)),
('georecord', models.ForeignKey(orm['profiles.georecord'], null=False))
))
db.create_unique('profiles_geomapping_to_record', ['geomapping_id', 'georecord_id'])
# Removing M2M table for field mappings on 'GeoRecord'
db.delete_table('profiles_georecord_mappings')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'profiles.datadomain': {
'Meta': {'object_name': 'DataDomain'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'indicators': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['profiles.Indicator']", 'through': "orm['profiles.IndicatorDomain']", 'symmetrical': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '20'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '20', 'db_index': 'True'})
},
'profiles.datasource': {
'Meta': {'object_name': 'DataSource'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'implementation': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'})
},
'profiles.geolevel': {
'Meta': {'object_name': 'GeoLevel'},
'data_sources': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['profiles.DataSource']", 'symmetrical': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '200'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['profiles.GeoLevel']", 'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '200', 'db_index': 'True'})
},
'profiles.georecord': {
'Meta': {'unique_together': "(('level', 'geo_id', 'custom_name', 'owner'),)", 'object_name': 'GeoRecord'},
'components': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'components_rel_+'", 'blank': 'True', 'to': "orm['profiles.GeoRecord']"}),
'custom_name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'geo_id': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'level': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['profiles.GeoLevel']"}),
'mappings': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'mappings_rel_+'", 'blank': 'True', 'to': "orm['profiles.GeoRecord']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['profiles.GeoRecord']", 'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'db_index': 'True', 'unique': 'True', 'max_length': '100', 'blank': 'True'})
},
'profiles.indicator': {
'Meta': {'object_name': 'Indicator'},
'data_source': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['profiles.DataSource']"}),
'formula': ('django.db.models.fields.TextField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'levels': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['profiles.GeoLevel']", 'symmetrical': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '100', 'db_index': 'True'})
},
'profiles.indicatordata': {
'Meta': {'unique_together': "(('indicator', 'record', 'time'),)", 'object_name': 'IndicatorData'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'indicator': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['profiles.Indicator']"}),
'moe': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'record': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['profiles.GeoRecord']"}),
'time': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['profiles.Time']", 'null': 'True'}),
'value': ('django.db.models.fields.DecimalField', [], {'max_digits': '10', 'decimal_places': '2'})
},
'profiles.indicatordomain': {
'Meta': {'object_name': 'IndicatorDomain'},
'default': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'domain': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['profiles.DataDomain']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'indicator': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['profiles.Indicator']"})
},
'profiles.indicatorpart': {
'Meta': {'object_name': 'IndicatorPart'},
'data_source': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['profiles.DataSource']"}),
'formula': ('django.db.models.fields.TextField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'indicator': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['profiles.Indicator']"}),
'time': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['profiles.Time']"})
},
'profiles.time': {
'Meta': {'object_name': 'Time'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'sort': ('django.db.models.fields.DecimalField', [], {'max_digits': '5', 'decimal_places': '1'})
}
}
complete_apps = ['profiles']
|
ProvidencePlan/Profiles
|
communityprofiles/profiles/oldmigrations/0007_auto__del_geomapping.py
|
Python
|
mit
| 11,245
|
[
"MOE"
] |
180ce7b876ed648a7abb416bd3858d927aed80ae3539618e4039e2e7897cc7f7
|
#!/usr/bin/env python
from __future__ import absolute_import
#----------------------------------------------------------------------
# Copyright (c) 2013-2015 Raytheon BBN Technologies
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and/or hardware specification (the "Work") to
# deal in the Work without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Work, and to permit persons to whom the Work
# is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Work.
#
# THE WORK IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE WORK OR THE USE OR OTHER DEALINGS
# IN THE WORK.
#----------------------------------------------------------------------
'''Main stitching workhorse. Handles calling the stitching service, orchestrating
parsing RSpecs and creating objects. See doStitching().'''
import copy
import datetime
import json
import logging
import os
import string
import sys
import time
from .. import oscript as omni
from .util import OmniError, naiveUTC
from .util import credparsing as credutils
from .util.files import readFile
from .util import handler_utils
from .util.json_encoding import DateTimeAwareJSONEncoder
from . import stitch
from .stitch import defs
from .stitch.ManifestRSpecCombiner import combineManifestRSpecs
from .stitch.objects import Aggregate, Link, Node, LinkProperty
from .stitch.RSpecParser import RSpecParser
from .stitch import scs
from .stitch.workflow import WorkflowParser
from .stitch.utils import StitchingError, StitchingCircuitFailedError, stripBlankLines, isRSpecStitchingSchemaV2, prependFilePrefix, StitchingStoppedError
from .stitch.VLANRange import *
from ..geni.util import rspec_schema
from ..geni.util.rspec_util import is_rspec_string, is_rspec_of_type, rspeclint_exists, validate_rspec
from ..geni.util.urn_util import URN, urn_to_string_format
from ..sfa.trust import gid
from ..sfa.util.xrn import urn_to_hrn, get_leaf
DCN_AM_TYPE = 'dcn' # geni_am_type value from AMs that use the DCN codebase
ORCA_AM_TYPE = 'orca' # geni_am_type value from AMs that use the Orca codebase
PG_AM_TYPE = 'protogeni' # geni_am_type / am_type from ProtoGENI based AMs
GRAM_AM_TYPE = 'gram' # geni_am_type value from AMs that use the GRAM codebase
FOAM_AM_TYPE = 'foam' # geni_am_type value from some AMs that use the FOAM codebase
OESS_AM_TYPE = 'oess' # geni_am_type value from AMs that use the OESS codebase
# Max # of times to call the stitching service
MAX_SCS_CALLS = 5
# File in which we save the slice cred so omni calls don't have to keep re-fetching it
# Valid substitutions: %username, %slicename, %slicehrn
SLICECRED_FILENAME = 'slice-%slicehrn-for-%username-cred.xml'
def urn_to_clean_hrn( urn ):
hrn, type = urn_to_hrn( urn )
hrn = handler_utils.remove_bad_characters( hrn )
return hrn, type
# The main stitching class. Holds all the state about our attempt at doing stitching.
class StitchingHandler(object):
'''Workhorse class to do stitching. See doStitching().'''
def __init__(self, opts, config, logger):
self.logger = logger
config['logger'] = logger
self.omni_config = config['omni']
self.config = config
self.parsedSCSRSpec = None
self.lastException = None
self.ams_to_process = []
self.opts = opts # command line options as parsed
self.slicecred = None # Cached slice credential to avoid re-fetching
self.savedSliceCred = None # path to file with slice cred if any
self.parsedURNNewAggs = [] # Aggs added from parsed URNs
# Get the framework
if not self.opts.debug:
# First, suppress all but WARN+ messages on console
lvl = logging.INFO
handlers = logger.handlers
if len(handlers) == 0:
handlers = logging.getLogger().handlers
for handler in handlers:
if isinstance(handler, logging.StreamHandler):
lvl = handler.level
handler.setLevel(logging.WARN)
break
self.framework = omni.load_framework(self.config, self.opts)
if not self.opts.debug:
handlers = logger.handlers
if len(handlers) == 0:
handlers = logging.getLogger().handlers
for handler in handlers:
if isinstance(handler, logging.StreamHandler):
handler.setLevel(lvl)
break
# FIXME: How many times is right to go back to the SCS
self.maxSCSCalls = MAX_SCS_CALLS
# Remember we got the extra info for this AM
self.amURNsAddedInfo = []
if self.opts.timeout == 0:
self.config['timeoutTime'] = datetime.datetime.max
self.logger.debug("Requested no timeout for stitcher.")
else:
self.config['timeoutTime'] = datetime.datetime.utcnow() + datetime.timedelta(minutes=self.opts.timeout)
self.logger.debug("Stitcher run will timeout at %s UTC.", self.config['timeoutTime'])
def doStitching(self, args):
'''Main stitching function.'''
# Parse the commandline args
# Hand off to omni if this is not a command stitcher handles
# Parse the request rspec
# Check if this request is bound, multiAM, uses GRE links, includes stitched links
# If the request is not a bound multi-AM RSpec, hand off to Omni
# - ensure the -a args are set to match the RSpec
# Check this stitching request is safe, and we have a valid slice
# Create the SCS instance if needed
# Then call mainStitchingLoop() to do the real work of calling the SCS and then
# getting each aggregate to make a reservation.
# On keyboard interrupt and delete any partial reservation
# On success, create and save the combined manifest RSpec, and
# pull out summary resource expiration information and a summary of the run,
# and return (pretty string, combined manifest rspec)
# On error, log something appropriate and exit
# Always be sure to clean up temporary files
# Parse the commandline args
# Hand off to omni if this is not a command stitcher handles
# Get request RSpec
request = None
command = None
self.slicename = None
if len(args) > 0:
command = args[0]
if len(args) > 1:
self.slicename = args[1]
if command and command.strip().lower() in ('describe', 'listresources', 'delete', 'deletesliver') and self.slicename:
if (not self.opts.aggregate or len(self.opts.aggregate) == 0) and not self.opts.useSliceAggregates:
self.addAggregateOptions(args)
if not self.opts.aggregate or len(self.opts.aggregate) == 0:
# Call the CH to get AMs in this slice
oldUSA = self.opts.useSliceAggregates
self.opts.useSliceAggregates = True
self.opts.sliceName = self.slicename
(aggs, message) = handler_utils._listaggregates(self)
self.opts.useSliceAggregates = oldUSA
if len(aggs) > 0:
self.opts.aggregate = []
for agg in aggs.values():
self.logger.debug("Adding AM %s retrieved from CH", agg)
self.opts.aggregate.append(agg)
else:
self.logger.debug("No AMs from CH: %s", message)
if not self.opts.aggregate or len(self.opts.aggregate) == 0:
# No resources known to be in any AMs. Try again specifying explicit -a arguments.
msg = "No known reservations at any aggregates. Try again with explicit -a arguments."
self.logger.info(msg)
return (msg, None)
if self.opts.aggregate and len(self.opts.aggregate) == 1:
# Omni can handle this
self.logger.debug("Passing call to Omni...")
return self.passToOmni(args)
self.opts.useSliceAggregates = False
if command.strip().lower() in ('describe', 'listresources'):
# This is a case of multiple AMs whose manifests should be combined
return self.rebuildManifest()
# elif command.strip().lower() in ('delete', 'deletesliver'):
else:
# Lets someone use stitcher to delete at multiple AMs when the API version is mixed
return self.doDelete()
if not command or command.strip().lower() not in ('createsliver', 'allocate'):
# Stitcher only handles createsliver or allocate. Hand off to Omni.
if self.opts.fakeModeDir:
msg = "In fake mode. Otherwise would call Omni with args %r" % args
self.logger.info(msg)
return (msg, None)
else:
self.logger.debug("Passing call to Omni...")
# Add -a options from the saved file, if none already supplied
self.addAggregateOptions(args)
return self.passToOmni(args)
# End of block to check the command
if len(args) > 2:
request = args[2]
if len(args) > 3:
self.logger.warn("Arguments %s ignored", args[3:])
#self.logger.debug("Command=%s, slice=%s, rspec=%s", command, self.slicename, request)
# Parse the RSpec
requestString = ""
if request:
self.rspecParser = RSpecParser(self.logger)
self.parsedUserRequest = None
try:
# read the rspec into a string, and add it to the rspecs dict
requestString = handler_utils._derefRSpecNick(self, request)
except Exception, exc:
msg = "Unable to read rspec file '%s': %s" % (request, str(exc))
if self.opts.devmode:
self.logger.warn(msg)
else:
raise OmniError(msg)
# # Test if the rspec is really json containing an RSpec, and pull out the right thing
# requestString = amhandler.self._maybeGetRSpecFromStruct(requestString)
# confirmGoodRequest
self.confirmGoodRSpec(requestString)
self.logger.debug("Valid GENI v3 request RSpec")
# parseRequest
self.parsedUserRequest = self.rspecParser.parse(requestString)
else:
raise OmniError("No request RSpec found, or slice name missing!")
# Examine the RSpec to see what kind of request it is
self.isStitching = self.mustCallSCS(self.parsedUserRequest)
self.isGRE = self.hasGRELink(self.parsedUserRequest)
self.isMultiAM = False
# If any node is unbound, then all AMs will try to allocate it. So bail
unboundNode = self.getUnboundNode()
self.isBound = (unboundNode is None)
if self.isBound:
self.logger.debug("Request appears to be fully bound")
if (self.isGRE or self.isStitching) and not self.isMultiAM:
self.logger.debug("Nodes seemed to list <2 AMs, but rspec appears GRE or stitching, so it is multi AM")
self.isMultiAM = True
# FIXME:
# If it is bound, make sure all the implied AMs are known (have a URL)
# FIXME:
# If any node is unbound: Check that there is exactly 1 -a AM that is not one of the AMs a node is bound to, and then
# edit the request to bind the nodes to that AM.
if self.isBound and not self.isMultiAM and self.opts.fixedEndpoint:
self.logger.debug("Got --fixedEndpoint, so pretend this is multi AM")
self.isMultiAM = True
# If this is not a bound multi AM RSpec, just let Omni handle this.
if not self.isBound or not self.isMultiAM:
self.logger.info("Not a bound multi-aggregate request - let Omni handle this.")
# Check the -a arguments and compare with the AMs inferred from the request RSpec
# Log on problems and try to set the -a arguments appropriately
self.cleanDashAArgs(unboundNode)
if self.opts.noReservation:
self.logger.info("Not reserving resources")
sys.exit()
# Try to force a call that falls through to omni to log at info level,
# or whatever level the main stitcher is using on the console
ologger = logging.getLogger("omni")
myLevel = logging.INFO
handlers = self.logger.handlers
if len(handlers) == 0:
handlers = logging.getLogger().handlers
for handler in handlers:
if isinstance(handler, logging.StreamHandler):
myLevel = handler.level
break
for handler in ologger.handlers:
if isinstance(handler, logging.StreamHandler):
handler.setLevel(myLevel)
break
# Warning: If this is createsliver and you specified multiple aggregates,
# then omni only contacts 1 aggregate. That is likely not what you wanted.
return omni.call(args, self.opts)
# End of block to let Omni handle unbound or single AM requests
# self.logger.debug("Edited request RSpec: %s", self.parsedUserRequest.getLinkEditedDom().toprettyxml())
if self.opts.explicitRSpecVersion:
self.logger.info("All manifest RSpecs will be in GENI v3 format")
self.opts.explicitRSpecVersion = False
self.opts.rspectype = ["GENI", '3']
# FIXME: Confirm request is not asking for any loops
self.confirmSafeRequest()
# Remove any -a arguments from the opts so that when we later call omni
# the right thing happens
self.opts.aggregate = []
# FIXME: Maybe use threading to parallelize confirmSliceOK and the 1st SCS call?
# Get username for slicecred filename
self.username = get_leaf(handler_utils._get_user_urn(self.logger, self.framework.config))
if not self.username:
raise OmniError("Failed to find your username to name your slice credential")
# Ensure the slice is valid before all those Omni calls use it
(sliceurn, sliceexp) = self.confirmSliceOK()
# Here is where we used to add the expires attribute. No
# longer necessary (nor a good idea).
# Create the SCS instance if it will be needed
if self.isStitching and not self.opts.noSCS:
if not "geni-scs.net.internet2.edu:8443" in self.opts.scsURL:
self.logger.info("Using SCS at %s", self.opts.scsURL)
self.scsService = scs.Service(self.opts.scsURL, key=self.framework.key, cert=self.framework.cert, timeout=self.opts.ssltimeout, verbose=self.opts.verbosessl)
self.scsCalls = 0
if self.isStitching and self.opts.noSCS:
self.logger.info("Not calling SCS on stitched topology per commandline option.")
# Create singleton that knows about default sliver expirations by AM type
defs.DefaultSliverExpirations.getInstance(self.config, self.logger)
# Compare the list of AMs in the request with AMs known
# to the SCS. Any that the SCS does not know means the request
# cannot succeed if those are AMs in a stitched link
# self.checkSCSAMs()
# Call SCS and then do reservations at AMs, deleting or retrying SCS as needed
# Note that it does this with mainStitchingLoop which recurses if needed.
# Catch Ctrl-C, deleting partial reservations.
lvl = None
try:
# Passing in the request as a DOM - after allowing edits as necessary. OK?
lastAM = self.mainStitchingLoop(sliceurn, self.parsedUserRequest.getLinkEditedDom())
# Construct and save out a combined manifest
combinedManifest, filename, retVal = self.getAndSaveCombinedManifest(lastAM)
# If some AMs used APIv3+, then we only did an allocation. Print something
msg = self.getProvisionMessage()
if msg:
self.logger.info(msg)
retVal += msg + "\n"
# Print something about sliver expiration times
msg = self.getExpirationMessage()
if msg:
self.logger.info(msg)
retVal += msg + "\n"
if filename:
msg = "Saved combined reservation RSpec at %d AM(s) to file '%s'" % (len(self.ams_to_process), os.path.abspath(filename))
self.logger.info(msg)
retVal += msg
except KeyboardInterrupt, kbi:
if lvl:
self.logger.setLevel(lvl)
msg = 'Stitching interrupted!'
if self.lastException:
msg += ' ' + str(self.lastException)
self.logger.error(msg)
import traceback
self.logger.debug("%s", traceback.format_exc())
if self.opts.noDeleteAtEnd:
# User requested to not delete on interrupt
self.logger.warn("Per command-line option, not deleting existing reservations.")
msg = self.endPartiallyReserved(kbi, aggs=self.ams_to_process)
# Here this method need not exit or raise. But should log something.
# sys.exit is called later.
self.logger.warn(msg)
elif self.ams_to_process is not None:
class DumbLauncher():
def __init__(self, agglist):
self.aggs = agglist
(delretText, delretStruct) = self.deleteAllReservations(DumbLauncher(self.ams_to_process))
for am in self.ams_to_process:
if am.manifestDom:
self.logger.warn("You have a reservation at %s", am)
sys.exit(-1)
except StitchingError, se:
if lvl:
self.logger.setLevel(lvl)
# FIXME: Return anything different for stitching error?
# Do we want to return a geni triple struct?
if self.lastException:
msg = "Stitching Failed. %s" % str(se)
if str(self.lastException) not in str(se):
msg += ". Root cause error: %s" % str(self.lastException)
self.logger.error(msg)
newError = StitchingError(msg)
se = newError
if "Requested no reservation" in str(se) or isinstance(se, StitchingStoppedError):
print str(se)
self.logger.debug(se)
sys.exit(0)
else:
raise se
finally:
# Save a file with the aggregates used in this slice
self.saveAggregateList(sliceurn)
# Clean up temporary files
self.cleanup()
self.dump_objects(self.parsedSCSRSpec, self.ams_to_process)
# Construct return message
retMsg = self.buildRetMsg()
# FIXME: What do we want to return?
# Make it something like createsliver / allocate, with the code/value/output triple plus a string
# On success
# Request from SCS that worked? Merged request as I modified?
# Merged manifest
# List of AMs, the URLs, and their API versions?
# Some indication of the slivers and their status and expiration at each AM?
# In particular, which AMs need a provision and poa geni_start
# ?? Stuff parsed from manifest?? EG some representation of each path with node list/count at each AM and VLAN tag for each link?, maybe list of the AMs added by the SCS?
#On error
# Error code / message (standard GENI triple)
# If the error was after SCS, include the expanded request from the SCS
# If particular AMs had errors, ID those AMs and the errors
self.logger.debug(retMsg)
return (retMsg, combinedManifest)
# End of doStitching()
def prepObjectsForNonCreate(self):
# Initialize variables and datastructures when they wont be created by doing createsliver
# EG to do a describe/listresources/delete/deletesliver. See rebuildManifests()
# Get username for slicecred filename
self.username = get_leaf(handler_utils._get_user_urn(self.logger, self.framework.config))
if not self.username:
raise OmniError("Failed to find your username to name your slice credential")
# Ensure the slice is valid before all those Omni calls use it
(sliceurn, sliceexp) = self.confirmSliceOK()
# We don't have any RSpecs
self.parsedUserRequest = None
self.parsedSCSRSpec = None
# Ensure all AM URNs in the commandline are Aggregate objects in ams_to_process
self.createObjectsFromOptArgs()
# Remove any -a arguments from the opts so that when we later call omni
# the right thing happens
self.opts.aggregate = []
# Add extra info about the aggregates to the AM objects
self.add_am_info(self.ams_to_process)
# If requesting from >1 ExoGENI AM, then use ExoSM. And use ExoSM only once.
# FIXME!!
# Will this correctly query the ExoSM vs the individual rack?
# Or should I always query both the individual rack and the ExoSM (once)?
self.ensureOneExoSM()
# Save slice cred and timeoutTime on each AM
for am in self.ams_to_process:
if self.slicecred:
# Hand each AM the slice credential, so we only read it once
am.slicecred = self.slicecred
# Also hand the timeout time
am.timeoutTime = self.config['timeoutTime']
am.userRequested = True
self.rspecParser = RSpecParser(self.logger)
def doDelete(self):
# Do delete at APIv3 AMs and deletesliver at v2 only AMs and combine the results
self.prepObjectsForNonCreate()
#self.logger.debug("Done with prep for delete. AMs: %s", self.ams_to_process)
# Fake mark that each AM had a reservation so we try the delete
for am in self.ams_to_process:
am.manifestDom = True
# Let deleteAllReservations call delete on each aggregate instance individually, and combine the results
# Could have instead produced 2 omni calls of course....
# Note that results are combined in a kind of odd way:
# All results are keyed by am.url. For v2 AMs, we try to make it True or False
# v2 return used to be (successURLs, failedURLs)
# But that's hard to preserve
# So instead, the v2 return is True if the AM was found in the success list, False if found in Failed list,
# and otherwise the return under the am.url is whatever the AM originally returned.
# Note that failing to find the AM url may mean it's a variant of the URL
class DumbLauncher():
def __init__(self, agglist):
self.aggs = agglist
(text, struct) = self.deleteAllReservations(DumbLauncher(self.ams_to_process))
self.logger.debug("Result from deleteAll: %s", text)
# deletesliver is (successList of AM URLs, failList)
# delete is a dictionary by AM URL of the raw APIv3 return
# This is text, dictionary by AM URL of [APIv3 return or
return (text, struct)
# End of doDelete()
def rebuildManifest(self):
# Process a listresources or describe call on a slice
# by fetching all the manifests and combining those into a new combined manifest
# Save off the various RSpecs to files.
# Return is consistent with Omni: (string, object)
# Describe return should be by URL with the full return triple
# Put the combined manifest under 'combined'
# ListResources return should be dict by URN,URL of RSpecs
# Put the combined manifest under ('combined','combined')
self.prepObjectsForNonCreate()
# Init some data structures
lastAM = None
workflow_parser = WorkflowParser(self.logger)
retStruct = dict()
# Now actually get the manifest for each AM
for am in self.ams_to_process:
opts_copy = copy.deepcopy(self.opts)
opts_copy.aggregate = [(am.nick if am.nick else am.url)]
self.logger.info("Gathering current reservations at %s...", am)
rspec = None
try:
rspec = am.listResources(opts_copy, self.slicename)
except StitchingError, se:
self.logger.debug("Failed to list current reservation: %s", se)
if am.api_version == 2:
retStruct[(am.urn,am.url)] = rspec
else:
retStruct[am.url] = {'code':dict(),'value':rspec,'output':None}
if am.isPG:
retStruct[am.url]['code'] = {'geni_code':0, 'am_type':'protogeni', 'am_code':0}
elif am.dcn:
retStruct[am.url]['code'] = {'geni_code':0, 'am_type':'dcn', 'am_code':0}
elif am.isEG:
retStruct[am.url]['code'] = {'geni_code':0, 'am_type':'orca', 'am_code':0}
elif am.isGRAM:
retStruct[am.url]['code'] = {'geni_code':0, 'am_type':'gram', 'am_code':0}
else:
retStruct[am.url]['code'] = {'geni_code':0, 'am_code':0}
if rspec is None:
continue
# Look for and save any sliver expiration
am.setSliverExpirations(handler_utils.expires_from_rspec(rspec, self.logger))
# Fill in more data structures using this RSpec to the extent it helps
parsedMan = self.rspecParser.parse(rspec)
if self.parsedUserRequest is None:
self.parsedUserRequest = parsedMan
if self.parsedSCSRSpec is None:
self.parsedSCSRSpec = parsedMan
# This next, if I had a workflow, would create the hops
# on the aggregates. As is, it does verly little
# Without the hops on the aggregates, we don't merge hops in the stitching extension
workflow_parser.parse({}, parsedMan)
# Make sure the ExoSM lists URN synonyms for all the EG component managers
# that don't have their own Agg instance
# FIXME: Anything similar I need to do for other AMs like gram?
if am.isExoSM:
for urn in parsedMan.amURNs:
# self.logger.debug("Man from %s had AM URN %s", am, urn)
if urn in Aggregate.aggs:
# self.logger.debug("Already is an AM")
continue
syns = Aggregate.urn_syns(urn)
found = False
for urn2 in syns:
if urn2 in Aggregate.aggs:
found = True
urn = urn2
# self.logger.debug(".. which is an AM under syn %s", urn)
break
if not found:
if not (urn.strip().lower().endswith("+cm") or urn.strip().lower().endswith("+am")):
# Doesn't look like an AM URN. Skip it.
self.logger.debug("URN parsed from man doesn't look like an AM URN: %s", urn)
continue
# self.logger.debug("... is not any existing AM")
urnO = URN(urn=urn)
urnAuth = urnO.getAuthority()
if urnAuth.startswith("exogeni.net"):
# self.logger.debug("Is an ExoGENI URN. Since this is the exoSM, add it as a urn syn")
am.urn_syns.append(urn)
# end of loop over AM URNs
# End of block to handle ExoSM
# Try to use the info I do have to construct hops on aggregates
# Note this has to be redone on the combined manifest later.
# May need to tell it to not swap hops?
self.fixHopRefs(parsedMan, am)
self.logger.debug("%s has %d hops", am, len(am.hops))
# Parse the manifest and fill in the manifest suggested/range values
try:
from xml.dom.minidom import parseString
am.manifestDom = parseString(rspec)
am.requestDom = am.manifestDom
# Fill in the manifest values on hops
for hop in am.hops:
self.logger.debug("Updating hop %s", hop)
# 7/12/13: FIXME: EG Manifests reset the Hop ID. So you have to look for the link URN
if am.isEG:
self.logger.debug("Parsing EG manifest with special method")
range_suggested = am.getEGVLANRangeSuggested(am.manifestDom, hop._hop_link.urn, hop.path.id)
else:
range_suggested = am.getVLANRangeSuggested(am.manifestDom, hop._id, hop.path.id)
pathGlobalId = None
if range_suggested and len(range_suggested) > 0:
if range_suggested[0] is not None:
pathGlobalId = str(range_suggested[0]).strip()
if pathGlobalId and pathGlobalId is not None and pathGlobalId != "None" and pathGlobalId != '':
if hop.globalId and hop.globalId is not None and hop.globalId != "None" and hop.globalId != pathGlobalId:
self.logger.warn("Changing Hop %s global ID from %s to %s", hop, hop.globalId, pathGlobalId)
hop.globalId = pathGlobalId
else:
self.logger.debug("Got no global id")
else:
#self.logger.debug("Got nothing in range_suggested first slot")
pass
if len(range_suggested) > 1 and range_suggested[1] is not None:
rangeValue = str(range_suggested[1]).strip()
if not rangeValue or rangeValue in ('null', 'any', 'None'):
self.logger.debug("Got no valid vlan range on %s: %s", hop, rangeValue)
else:
rangeObject = VLANRange.fromString(rangeValue)
hop._hop_link.vlan_range_manifest = rangeObject
self.logger.debug("Set range manifest: %s", rangeObject)
else:
self.logger.debug("Got no spot for a range value")
if len(range_suggested) > 2 and range_suggested[2] is not None:
suggestedValue = str(range_suggested[2]).strip()
if not suggestedValue or suggestedValue in ('null', 'any', 'None'):
self.logger.debug("Got no valid vlan suggestion on %s: %s", hop, suggestedValue)
else:
suggestedObject = VLANRange.fromString(suggestedValue)
hop._hop_link.vlan_suggested_manifest = suggestedObject
self.logger.debug("Set suggested manifest: %s", hop._hop_link.vlan_suggested_manifest)
else:
self.logger.debug("Got no spot for a suggested value")
else:
self.logger.debug("Got no range_suggested at all")
# End block for found the range and suggested from the RSpec for this hop
# end of loop over hops
except Exception, e:
self.logger.debug("Failed to parse rspec: %s", e)
continue
if am.manifestDom is not None:
lastAM = am
self.logger.debug("Setting lastAM to %s", lastAM)
# Done looping over AMs
if lastAM is None:
# Failed to get any manifests, so bail
raise StitchingError("Failed to retrieve resource listing - see logs")
# Construct and save out a combined manifest
combinedManifest, filename, retVal = self.getAndSaveCombinedManifest(lastAM)
if self.opts.api_version == 2:
retStruct[('combined','combined')] = combinedManifest
else:
retStruct['combined'] = {'code':{'geni_code':0},'value':combinedManifest,'output':None}
parsedCombined = self.rspecParser.parse(combinedManifest)
# Fix up the parsed combined RSpec to ensure we use the proper
# hop instances and all the objects point to each other
self.fixHopRefs(parsedCombined)
self.dump_objects(parsedCombined, self.ams_to_process)
# Print something about sliver expiration times
msg = self.getExpirationMessage()
if msg:
self.logger.info(msg)
retVal += msg + "\n"
if filename:
msg = "Saved combined reservation RSpec at %d AM(s) to file '%s'" % (len(self.ams_to_process), os.path.abspath(filename))
self.logger.info(msg)
retVal += msg
# Construct return message
retMsg = self.buildRetMsg()
self.logger.debug(retMsg)
# # Simplest return: just the combined rspec
# return (retMsg, combinedManifest)
# API method compliant returns
# Describe return should be by URL with the full return triple
# Put the combined manifest under 'combined'
# ListResources return should be dict by URN,URL of RSpecs
# Put the combined manifest under ('combined','combined')
return (retMsg, retStruct)
# End of rebuildManifest()
def fixHopRefs(self, parsedManifest, thisAM=None):
# Use a parsed RSpec to fix up the Hop and Aggregate objects that would otherwise
# be fixed up using the workflow.
# Used by rebuildManifest()
if not parsedManifest or not parsedManifest.stitching:
return
for path in parsedManifest.stitching.paths:
for hop in path.hops:
if hop.path != path:
hop.path = path
# Fill in the Aggregate instance on the hop
if not hop.aggregate:
self.logger.debug("%s missing aggregate", hop)
urn = hop.urn
if not urn or not '+' in urn:
self.logger.debug("%s had invalid urn", hop)
continue
spl = urn.split('+')
if len(spl) < 4:
self.logger.debug("%s URN malformed", hop)
continue
urnAuth = urn_to_string_format(spl[1])
urnC = URN(authority=urnAuth, type='authority', name='am')
hopAgg = Aggregate.find(urnC.urn)
hop.aggregate = hopAgg
self.logger.debug("Found %s", hopAgg)
if thisAM and hop.aggregate != thisAM:
# self.logger.debug("%s not for this am (%s) - continue", hop, thisAM)
continue
if not hop.aggregate in hop.path.aggregates:
self.logger.debug("%s's AM not on its path - adding", hop)
hop.path.aggregates.add(hop.aggregate)
# Find the AM for this hop
if not thisAM:
anAM = None
for am in self.ams_to_process:
if hop.aggregate == am:
anAM = am
break
if not anAM:
return
am = anAM
else:
am = thisAM
# Now ensure we have the right objects
found=False
for hop2 in am.hops:
# Ensure use right version of the Hop object
if hop2.urn == hop.urn and hop2.path.id == hop.path.id:
self.logger.debug("%s already listed by its AM", hop)
if hop != hop2:
self.logger.debug("... but the 2 hop instances are different!")
# Do I need to swap instances?
if hop2._hop_link.vlan_suggested_manifest != hop._hop_link.vlan_suggested_manifest:
self.logger.debug("Swapping out the path version of the hop to use the AM version instead, which has sug man: %s", hop2._hop_link.vlan_suggested_manifest)
# use hop2 not hop
# edit path.hops
newHops = []
for hop3 in path.hops:
if hop3 == hop:
newHops.append(hop2)
else:
newHops.append(hop3)
path.hops = newHops
else:
# both hops have same manifest value, shouldn't matter
self.logger.debug(" ... but have same suggested manifest, so leave it alone")
found = True
break
# AM didn't know the hop, so add it
if not found:
self.logger.debug("%s not listed on it's AM's hops - adding", hop)
am.add_hop(hop)
found = False
# And make sure the AM has the Path too
for path2 in am.paths:
if hop.path.id == path2.id:
found = True
self.logger.debug("%s 's path already listed by its aggregate %s", hop, hop.aggregate)
if hop.path != path2:
self.logger.debug("... but FIXME the 2 path instances are different!!")
# FIXME: Do I need to swap instances?
break
if not found:
self.logger.debug("%s 's path not listed on the AM's paths, adding", hop)
am.add_path(hop.path)
# End of block to ensure the AM has the hop
# End of loop over hops
# End of loop over paths
# End of method fixHopRefs
def passToOmni(self, args):
# Pass the call on to Omni, using the given args. Reset logging appropriately
# Return is the omni.call return
# Try to force a call that falls through to omni to log at info level,
# or whatever level the main stitcher is using on the console
ologger = logging.getLogger("omni")
myLevel = logging.INFO
handlers = self.logger.handlers
if len(handlers) == 0:
handlers = logging.getLogger().handlers
for handler in handlers:
if isinstance(handler, logging.StreamHandler):
myLevel = handler.level
break
for handler in ologger.handlers:
if isinstance(handler, logging.StreamHandler):
handler.setLevel(myLevel)
break
return omni.call(args, self.opts)
# End of passToOmni
def buildRetMsg(self):
# Build the return message from this handler on success
# Typically counting links and aggregates.
amcnt = len(self.ams_to_process)
scs_added_amcnt = 0
pathcnt = 0
grecnt = 0
if self.parsedSCSRSpec and self.parsedSCSRSpec.stitching:
pathcnt = len(self.parsedSCSRSpec.stitching.paths)
if self.parsedSCSRSpec and self.parsedSCSRSpec.links:
for link in self.parsedSCSRSpec.links:
if link.typeName in (link.GRE_LINK_TYPE, link.EGRE_LINK_TYPE):
grecnt += 1
for am in self.ams_to_process:
if not am.userRequested:
scs_added_amcnt = scs_added_amcnt + 1
greStr = ""
if grecnt > 0:
greStr = ", creating %d GRE link(s)" % grecnt
stitchStr = ""
if pathcnt > 0:
stitchStr = ", creating %d stitched link(s)" % pathcnt
if scs_added_amcnt > 0:
retMsg = "Success: Reserved resources in slice %s at %d Aggregates (including %d intermediate aggregate(s) not in the original request)%s%s." % (self.slicename, amcnt, scs_added_amcnt, greStr, stitchStr)
else:
retMsg = "Success: Reserved resources in slice %s at %d Aggregates%s%s." % (self.slicename, amcnt, greStr, stitchStr)
return retMsg
# End of buildRetMsg
def cleanDashAArgs(self, unboundNode):
# Check and clean the -a args relative to the request RSpec
# logging on issues found
# Used in doStitching
if unboundNode is not None:
self.logger.info("Node '%s' is unbound in request - all nodes must be bound for stitcher, as all aggregates get the same request RSpec" % unboundNode)
if self.isBound:
if self.opts.aggregate is None or len(self.opts.aggregate) == 0:
# A bound non multi AM RSpec but no AM specified. Fill in the -a appropriately
if self.parsedUserRequest.amURNs and len(self.parsedUserRequest.amURNs) > 0:
amURN = self.parsedUserRequest.amURNs.pop()
(nick, url) = handler_utils._lookupAggNickURLFromURNInNicknames(self.logger, self.config, amURN)
if url and url.strip() != '':
self.logger.debug("Setting -a argument for Omni: Found RSpec AM %s in omni_config AM nicknames: %s", amURN, nick)
self.opts.aggregate = [nick]
else:
self.logger.debug("Could not find AM from RSpec for URN %s - Omni will have no -a argument", amURN)
#else:
# weird and really shouldn't happen
elif len(self.opts.aggregate) == 1:
# If the AM specified is not what it is bound to, then what? complain? fix it? do it anyhow?
# else this is good
if self.parsedUserRequest.amURNs and len(self.parsedUserRequest.amURNs) > 0:
amURN = self.parsedUserRequest.amURNs.pop()
(nick, url) = handler_utils._lookupAggNickURLFromURNInNicknames(self.logger, self.config, amURN)
amNick = None
amURL = None
if url and url.strip() != '':
self.logger.debug("Found RSpec AM %s in omni_config AM nicknames: %s", amURN, nick)
amNick = nick
amURL = url
if not self.opts.debug:
# Suppress most log messages on the console for doing the nickname lookup
lvl = logging.INFO
handlers = self.logger.handlers
if len(handlers) == 0:
handlers = logging.getLogger().handlers
for handler in handlers:
if isinstance(handler, logging.StreamHandler):
lvl = handler.level
handler.setLevel(logging.WARN)
break
url1,urn1 = handler_utils._derefAggNick(self, self.opts.aggregate[0])
if not self.opts.debug:
handlers = self.logger.handlers
if len(handlers) == 0:
handlers = logging.getLogger().handlers
for handler in handlers:
if isinstance(handler, logging.StreamHandler):
handler.setLevel(lvl)
break
if (amNick and amNick == self.opts.aggregate[0]) or (amURL and amURL == url1) or (amURN == urn1):
self.logger.debug("Supplied -a matches the AM found in the RSpec: %s=%s", amURN, self.opts.aggregate[0])
elif amNick and url1:
# A valid comparison that didn't find anything
self.logger.warn("RSpec appears bound to a different AM than you are submitting it to. RSpec specifies AM %s (%s) but -a argument specifies %s (%s)! Continuing anyway....", amURN, amNick, self.opts.aggregate[0], url1)
# FIXME: Correct it? Bail?
# else:
# Didn't get all the values for a proper comparison
# else:
# No AMs parsed out of the RSpec. I don't think this should happen
else:
# the RSpec appeared to be single AM but multiple AMs specified.
# Perhaps check if the bound AM is at least one of them?
# Complain? Bail? Fix it? Continue?
self.logger.debug("RSpec appeared bound to a single AM but multiple -a arguments specified?")
if self.parsedUserRequest.amURNs and len(self.parsedUserRequest.amURNs) > 0:
amURN = self.parsedUserRequest.amURNs.pop()
(nick, url) = handler_utils._lookupAggNickURLFromURNInNicknames(self.logger, self.config, amURN)
amNick = None
amURL = None
if url and url.strip() != '':
self.logger.debug("Found RSpec AM %s URL from omni_config AM nicknames: %s", amURN, nick)
amNick = nick
amURL = url
# Get the urn,urn for each -a and see if it is in the RSpec
found = False
for dasha in self.opts.aggregate:
if not self.opts.debug:
# Suppress most log messages on the console for doing the nickname lookup
lvl = logging.INFO
handlers = self.logger.handlers
if len(handlers) == 0:
handlers = logging.getLogger().handlers
for handler in handlers:
if isinstance(handler, logging.StreamHandler):
lvl = handler.level
handler.setLevel(logging.WARN)
break
url1,urn1 = handler_utils._derefAggNick(self, dasha)
if not self.opts.debug:
handlers = self.logger.handlers
if len(handlers) == 0:
handlers = logging.getLogger().handlers
for handler in handlers:
if isinstance(handler, logging.StreamHandler):
handler.setLevel(lvl)
break
if (amNick and amNick == dasha) or (amURL and amURL == url1) or (amURN == urn1):
self.logger.debug("1 of the supplied -a args matches the AM found in the RSpec: %s", amURN)
found = True
break
# End of loop over -a args
if not found:
self.logger.warn("RSpec appears bound to a different AM than the multiple AMs you are submitting it to. RSpec specifies AM %s (%s) but -a argument specifies %s! Continuing anyway....", amURN, amNick, self.opts.aggregate)
else:
self.logger.warn("RSpec appeared bound to a single AM (%s) but multiple -a arguments specified? %s", amURN, self.opts.aggregate)
self.logger.info("... continuing anyway")
# FIXME: Correct it? Bail?
# end of multiple AMs found in parsed RSpec
# end of multi AMs specified with -a
# end of if self.isBound
# End of cleanDashAArgs
def getAndSaveCombinedManifest(self, lastAM):
# Construct a unified manifest and save it to a file
# Used in doStitching
# Return combinedManifest, name of file where saved (or None), retVal string partially constructed to return
# include AMs, URLs, API versions
# Avoid EG manifests - they are incomplete
# Avoid DCN manifests - they do funny things with namespaces (ticket #549)
# GRAM AMs seems to also miss nodes. Avoid if possible.
if lastAM is None and len(self.ams_to_process) > 0:
lastAM = self.ams_to_process[-1]
if lastAM is not None and (lastAM.isEG or lastAM.dcn or lastAM.isGRAM or lastAM.manifestDom is None):
self.logger.debug("Last AM was an EG or DCN or GRAM AM. Find another for the template.")
i = 1
while (lastAM.isEG or lastAM.dcn or lastAM.isGRAM or lastAM.manifestDom is None) and i <= len(self.ams_to_process):
# This has lost some hops and messed up hop IDs. Don't use it as the template
# I'd like to find another AM we did recently
lastAM = self.ams_to_process[-i]
i = i + 1
if lastAM.isEG or lastAM.dcn or lastAM.isGRAM or lastAM.manifestDom is None:
self.logger.debug("Still had an EG or DCN or GRAM template AM - use the raw SCS request")
lastAM = None
# I have a slight preference for a PG AM. See if we have one
if lastAM is not None and not lastAM.isPG and len(self.ams_to_process) > 1:
for am in self.ams_to_process:
if am != lastAM and am.isPG and am.manifestDom is not None:
lastAM = am
break
combinedManifest = self.combineManifests(self.ams_to_process, lastAM)
# FIXME: Handle errors. Maybe make return use code/value/output struct
# If error and have an expanded request from SCS, include that in output.
# Or if particular AM had errors, ID the AMs and errors
# FIXME: This prepends a header on an RSpec that might already have a header
# -- maybe replace any existing header
# FIXME: We force -o here and keep it from logging the
# RSpec. Do we need an option to not write the RSpec to a file?
ot = self.opts.output
if not self.opts.tostdout:
self.opts.output = True
if not self.opts.debug:
# Suppress all but WARN on console here
lvl = self.logger.getEffectiveLevel()
handlers = self.logger.handlers
if len(handlers) == 0:
handlers = logging.getLogger().handlers
for handler in handlers:
if isinstance(handler, logging.StreamHandler):
lvl = handler.level
handler.setLevel(logging.WARN)
break
retVal, filename = handler_utils._writeRSpec(self.opts, self.logger, combinedManifest, self.slicename, 'multiam-combined', '', None)
if not self.opts.debug:
handlers = self.logger.handlers
if len(handlers) == 0:
handlers = logging.getLogger().handlers
for handler in handlers:
if isinstance(handler, logging.StreamHandler):
handler.setLevel(lvl)
break
self.opts.output = ot
return combinedManifest, filename, retVal
# End of getAndSaveCombinedManifest
def getExpirationMessage(self):
# Return a message to return/print about the expiration of reservations at aggregates.
# Used in doStitching
# FIXME: 15min? 30min?
# FIXME: Old code printed per agg exp at debug level
sortedAggs = Aggregate.sortAggsByExpirations(15) # 15min apart counts as same
firstTime = None
firstCount = 0
firstLabel = ""
secondTime = None
secondCount = 0
secondLabel = ""
noPrint = False
msgAdd = ''
msg = None
if len(sortedAggs) == 0:
msg = "No aggregates"
self.logger.debug("Got no aggregates?")
noPrint = True
else:
self.logger.debug("AMs expire at %d time(s).", len(sortedAggs))
firstSlotTimes = sortedAggs[0][0].sliverExpirations
skipFirst = False
if firstSlotTimes is None or len(firstSlotTimes) == 0:
skipFirst = True
if len(sortedAggs) == 1:
msg = "Aggregates did not report sliver expiration"
self.logger.debug("Only expiration timeslot has an agg with no expirations")
noPrint = True
else:
msgAdd = "Resource expiration unknown at %d aggregate(s)" % len(sortedAggs[0])
self.logger.debug("First slot had no times, but there are other slots")
ind = -1
for slot in sortedAggs:
ind += 1
if skipFirst and ind == 0:
continue
if firstTime is None:
firstTime = slot[0].sliverExpirations[0]
firstCount = len(slot)
firstLabel = str(slot[0])
if len(sortedAggs) > 1:
self.logger.debug("First expiration is at %s UTC at %s, at %d total AM(s).", firstTime.isoformat(), firstLabel, firstCount)
else:
self.logger.debug("Resource expiration is at %s UTC, at %d total AM(s).", firstTime.isoformat(), firstCount)
if firstCount == 1:
continue
elif firstCount == 2:
firstLabel += " and " + str(slot[1])
else:
firstLabel += " and %d other AM(s)" % (firstCount - 1)
continue
elif secondTime is None:
secondTime = slot[0].sliverExpirations[0]
secondCount = len(slot)
secondLabel = str(slot[0])
self.logger.debug("Second expiration at %s UTC at %s, at %d total AM(s)", secondTime.isoformat(), secondLabel, secondCount)
if secondCount == 1:
break
elif secondCount == 2:
secondLabel += " and " + str(slot[1])
else:
secondLabel += " and %d other AM(s)" % (secondCount - 1)
break
# Done looping over agg exp times in sortedAggs
# Done handling sortedAggs
if not noPrint:
if len(sortedAggs) == 1 or secondTime is None:
msg = "Your resources expire at %s (UTC). %s" % (firstTime.isoformat(), msgAdd)
else:
msg = "Your resources expire at %d different times. The first resources expire at %s (UTC) at %s. The second expiration time is %s (UTC) at %s. %s" % (len(sortedAggs), firstTime.isoformat(), firstLabel, secondTime.isoformat(), secondLabel, msgAdd)
return msg
# end getExpirationMessage
def getProvisionMessage(self):
# Get a message warning the experimenter to do provision and poa at AMs that are only allocated
msg = None
for agg in self.ams_to_process:
if agg.manifestDom and agg.api_version > 2:
if msg is None:
msg = ""
aggnick = agg.nick
if aggnick is None:
aggnick = agg.url
msg += " Reservation at %s is temporary! \nYou must manually call `omni -a %s -V3 provision %s` and then `omni -a %s -V3 poa %s geni_start`.\n" % (aggnick, aggnick, self.slicename, aggnick, self.slicename)
return msg
# Compare the list of AMs in the request with AMs known
# to the SCS. Any that the SCS does not know means the request
# cannot succeed if those are AMs in a stitched link
# This would be in the doStitching() method but is currently commented out.
def checkSCSAMs(self):
# FIXME: This takes time. If this can't block a more expensive later operation, why bother?
scsAggs = {}
try:
scsAggs = self.scsService.ListAggregates(False, self.opts.ssltimeout)
except Exception, e:
self.logger.debug("SCS ListAggregates failed: %s", e)
if scsAggs and isinstance(scsAggs, dict) and len(scsAggs.keys()) > 0:
if scsAggs.has_key('value') and scsAggs['value'].has_key('geni_aggregate_list'):
scsAggs = scsAggs['value']['geni_aggregate_list']
# self.logger.debug("Got geni_agg_list from scs: %s", scsAggs)
# Now sanity check AMs requested
# Note that this includes AMs that the user does not
# want to stitch - so we cannot error out early
# FIXME: Can we ID from the request which are AMs that need a stitch?
for reqAMURN in self.parsedUserRequest.amURNs:
found = False
for sa in scsAggs.keys():
if scsAggs[sa]['urn'] == reqAMURN:
self.logger.debug("Requested AM URN %s is listed by SCS with URL %s", reqAMURN, scsAggs[sa]['url'])
found = True
break
if not found:
self.logger.warn("Your request RSpec specifies the aggregate (component manager) '%s' for which there are no stitching paths configured. If you requested a stitched link to this aggregate, it will fail.", reqAMURN)
def cleanup(self):
'''Remove temporary files if not in debug mode'''
if self.opts.debug:
return
scsres = prependFilePrefix(self.opts.fileDir, Aggregate.FAKEMODESCSFILENAME)
if os.path.exists(scsres):
os.unlink(scsres)
if self.savedSliceCred and os.path.exists(self.opts.slicecredfile):
os.unlink(self.opts.slicecredfile)
if not self.ams_to_process:
return
for am in self.ams_to_process:
# Remove getversion files
# Note the AM URN here may not be right, so we might miss a file
filename = handler_utils._construct_output_filename(self.opts, None, am.url, am.urn, "getversion", ".json", 1)
# self.logger.debug("Deleting AM getversion: %s", filename)
if os.path.exists(filename):
os.unlink(filename)
# Remove any per AM request RSpecs
if am.rspecfileName and not self.opts.output:
# self.logger.debug("Deleting AM request: %s", am.rspecfileName)
if os.path.exists(am.rspecfileName):
os.unlink(am.rspecfileName)
# v2.5 left these manifest & status files there. Leave them still? Remove them?
# Now delete the per AM saved manifest rspec file
if not self.opts.output:
manfile = handler_utils._construct_output_filename(self.opts, self.slicename, am.url, am.urn, "manifest-rspec", ".xml", 1)
# self.logger.debug("Deleting AM manifest: %s", manfile)
if os.path.exists(manfile):
os.unlink(manfile)
# Now delete per AM saved status files
statusfilename = handler_utils._construct_output_filename(self.opts, self.slicename, am.url, am.urn, "sliverstatus", ".json", 1)
# self.logger.debug("Deleting AM status: %s", statusfilename)
if os.path.exists(statusfilename):
os.unlink(statusfilename)
# The main loop that does the work of getting all aggregates objects to make reservations.
# This method recurses on itself when an attempt fails.
# - Handle timeout
# - Call the SCS as needed
# - pause to let AMs free resources from earlier attempts
# - parse the SCS response, constructing aggregate objects and dependencies
# - save aggregate state from any previous time through this loop
# - gather extra info on aggregates
# - ensure we use only 1 ExoSM instance, handle various request oddities
# - request 'any' at AMs where we can
# - handle rrequests to exit early
# - update the available range in the request based on current availability where appropriate
# - spawn the Launcher to loop over aggregates until all aggregates have a reservation, or raise an error
# - On error, delete partial reservations, and recurse for recoverable errors
def mainStitchingLoop(self, sliceurn, requestDOM, existingAggs=None):
# existingAggs are Aggregate objects
# Time out stitcher call if needed
if datetime.datetime.utcnow() >= self.config['timeoutTime']:
msg = "Reservation attempt timed out after %d minutes." % self.opts.timeout
if self.opts.noDeleteAtEnd:
# User requested to not delete on interrupt
# Update the message to indicate not deleting....
self.logger.warn("%s Per command-line option, not deleting existing reservations.", msg)
msg2 = self.endPartiallyReserved(aggs=existingAggs, timeout=True)
msg = "%s %s" % (msg, msg2)
# Allow later code to raise this as an error
else:
self.logger.warn("%s Deleting any reservations...", msg)
class DumbLauncher():
def __init__(self, agglist):
self.aggs = agglist
try:
(delretText, delretStruct) = self.deleteAllReservations(DumbLauncher(existingAggs))
for am in existingAggs:
if am.manifestDom:
self.logger.warn("You have a reservation at %s", am)
except KeyboardInterrupt:
self.logger.error('... deleting interrupted!')
for am in existingAggs:
if am.manifestDom:
self.logger.warn("You have a reservation at %s", am)
raise StitchingError(msg)
# Call SCS if needed
self.scsCalls = self.scsCalls + 1
if self.isStitching and not self.opts.noSCS:
if self.scsCalls == 1:
self.logger.info("Calling SCS...")
else:
thStr = 'th'
if self.scsCalls == 2:
thStr = 'nd'
elif self.scsCalls == 3:
thStr = 'rd'
if self.scsCalls == self.maxSCSCalls:
self.logger.info("Calling SCS for the %d%s and last time...", self.scsCalls, thStr)
else:
self.logger.info("Calling SCS for the %d%s time...", self.scsCalls, thStr)
scsResponse = self.callSCS(sliceurn, requestDOM, existingAggs)
self.lastException = None # Clear any last exception from the last run through
# If needed, pause to let AMs free up resources; recheck the timeout if needed
if self.scsCalls > 1 and existingAggs:
# We are doing another call.
# Let AMs recover. Is this long enough?
# If one of the AMs is a DCN AM, use that sleep time instead - longer
sTime = Aggregate.PAUSE_FOR_V3_AM_TO_FREE_RESOURCES_SECS
for agg in existingAggs:
if agg.dcn and agg.triedRes:
# Only need to sleep this much longer time
# if this is a DCN AM that we tried a reservation on (whether it worked or failed)
if sTime < Aggregate.PAUSE_FOR_DCN_AM_TO_FREE_RESOURCES_SECS:
self.logger.debug("Must sleep longer cause had a previous reservation attempt at a DCN AM: %s", agg)
sTime = Aggregate.PAUSE_FOR_DCN_AM_TO_FREE_RESOURCES_SECS
elif agg.api_version == 2 and agg.triedRes and sTime < Aggregate.PAUSE_FOR_AM_TO_FREE_RESOURCES_SECS:
self.logger.debug("Must sleep longer cause had a previous v2 reservation attempt at %s", agg)
sTime = Aggregate.PAUSE_FOR_AM_TO_FREE_RESOURCES_SECS
# Reset whether we've tried this AM this time through
agg.triedRes = False
if datetime.datetime.utcnow() + datetime.timedelta(seconds=sTime) >= self.config['timeoutTime']:
# We'll time out. So quit now.
self.logger.debug("After planned sleep for %d seconds we will time out", sTime)
msg = "Reservation attempt timing out after %d minutes." % self.opts.timeout
if self.opts.noDeleteAtEnd:
# User requested to not delete on interrupt
# Update the message to indicate not deleting....
self.logger.warn("%s Per command-line option, not deleting existing reservations.", msg)
msg2 = self.endPartiallyReserved(aggs=existingAggs, timeout=True)
msg = "%s %s" % (msg, msg2)
# Allow later code to raise this as an error
else:
self.logger.warn("%s Deleting any reservations...", msg)
class DumbLauncher():
def __init__(self, agglist):
self.aggs = agglist
try:
(delretText, delretStruct) = self.deleteAllReservations(DumbLauncher(existingAggs))
for am in existingAggs:
if am.manifestDom:
self.logger.warn("You have a reservation at %s", am)
except KeyboardInterrupt:
self.logger.error('... deleting interrupted!')
for am in existingAggs:
if am.manifestDom:
self.logger.warn("You have a reservation at %s", am)
raise StitchingError(msg)
self.logger.info("Pausing for %d seconds for Aggregates to free up resources...\n\n", sTime)
time.sleep(sTime)
# Done pausing to let AMs free resources
# Parse SCS Response, constructing objects and dependencies, validating return
if self.isStitching and not self.opts.noSCS:
self.parsedSCSRSpec, workflow_parser = self.parseSCSResponse(scsResponse)
scsResponse = None # Just to note we are done with this here (keep no state)
else:
# Fake out the data structures using the original user request RSpec
try:
xmlreq = requestDOM.toxml()
except Exception, xe:
self.logger.debug("Failed to XMLify requestDOM for parsing: %s", xe)
self._raise_omni_error("Malformed request RSpec: %s" % xe)
self.parsedSCSRSpec = self.rspecParser.parse(xmlreq)
workflow_parser = WorkflowParser(self.logger)
# Parse the workflow, creating Path/Hop/etc objects
# In the process, fill in a tree of which hops depend on which,
# and which AMs depend on which
# Also mark each hop with what hop it imports VLANs from,
# And check for AM dependency loops
workflow_parser.parse({}, self.parsedSCSRSpec)
# self.logger.debug("Did fake workflow parsing")
# Save off existing Aggregate object state
parsedURNExistingAggs = [] # Existing aggs that came from a parsed URN, not in workflow
self.parsedURNNewAggs = [] # New aggs created not from workflow
if existingAggs:
# Copy existingAggs.hops.vlans_unavailable to workflow_parser.aggs.hops.vlans_unavailable? Other state?
self.saveAggregateState(existingAggs, workflow_parser.aggs)
# An AM added only from parsed AM URNs will have state lost. Ticket #781
if self.parsedSCSRSpec:
# Look for existing aggs that came from parsed URN and aren't in workflow
for agg in existingAggs:
self.logger.debug("Looking at existing AM %s", agg)
isWorkflow = False
for agg2 in workflow_parser.aggs:
if agg.urn == agg2.urn or agg.urn in agg2.urn_syns:
self.logger.debug("Is a workflow AM; found AM's URN %s in workflow's AMs", agg.urn)
isWorkflow = True
break
else:
for urn2 in agg.urn_syns:
if urn2 == agg2.urn or urn2 in agg2.urn_syns:
self.logger.debug("Is a workflow AM based on urn_syn; found AM's urn_syn %s in workflow AM", urn2)
isWorkflow = True
break
if isWorkflow:
break
if isWorkflow:
continue
isParsed = False
if agg.urn in self.parsedSCSRSpec.amURNs:
self.logger.debug("isParsed from main URN %s", agg.urn)
isParsed = True
else:
for urn2 in agg.urn_syns:
if urn2 in self.parsedSCSRSpec.amURNs:
self.logger.debug("isParsed from urn syn %s", urn2)
isParsed = True
break
if not isParsed:
continue
# Have an AM that came from parsed URN and is not in the workflow.
# So this agg needs its data copied over.
# this agg wont be in ams_to_process
# need to do self.saveAggregateState(otherExistingAggs, newAggsFromURNs)
self.logger.debug("%s was not in workflow and came from parsed URN", agg)
parsedURNExistingAggs.append(agg)
# end loop over existing aggs
# End block to handle parsed URNs not in workflow
existingAggs = None # Now done
# FIXME: if notScript, print AM dependency tree?
# Ensure we are processing all the workflow aggs plus any aggs in the RSpec not in
# the workflow
self.ams_to_process = copy.copy(workflow_parser.aggs)
if self.isStitching and not self.opts.noSCS:
self.logger.debug("SCS workflow said to include resources from these aggregates:")
for am in self.ams_to_process:
self.logger.debug("\t%s", am)
# Ensure all AM URNs we found in the RSpec are Aggregate objects in ams_to_process
self.createObjectsFromParsedAMURNs()
# If we saved off some existing aggs that were from parsed URNs and not in the workflow earlier,
# and we also just created some new aggs, then see if those need to have existing data copied over
# Ticket #781
if len(parsedURNExistingAggs) > 0 and len(self.parsedURNNewAggs) > 0:
self.saveAggregateState(parsedURNExistingAggs, self.parsedURNNewAggs)
parsedURNExistingAggs = []
self.parsedURNNewAggs = []
# Add extra info about the aggregates to the AM objects
self.add_am_info(self.ams_to_process)
# FIXME: check each AM reachable, and we know the URL/API version to use
# If requesting from >1 ExoGENI AM, then use ExoSM. And use ExoSM only once.
self.ensureOneExoSM()
self.dump_objects(self.parsedSCSRSpec, self.ams_to_process)
self.logger.info("Multi-AM reservation will include resources from these aggregates:")
for am in self.ams_to_process:
self.logger.info("\t%s", am)
# If we said this rspec needs a fixed / fake endpoint, add it here - so the SCS and other stuff
# doesn't try to do anything with it
if self.opts.fixedEndpoint:
self.addFakeNode()
# DCN AMs seem to require there be at least one sliver_type specified
self.ensureSliverType()
# Change the requested VLAN tag to 'any' where we can, allowing
# The AM to pick from the currently available tags
self.changeRequestsToAny()
# Save slice cred and timeoutTime on each AM
for am in self.ams_to_process:
if self.slicecred:
# Hand each AM the slice credential, so we only read it once
am.slicecred = self.slicecred
# Also hand the timeout time
am.timeoutTime = self.config['timeoutTime']
# Exit if user specified --noReservation, saving expanded request RSpec
self.handleNoReservation()
# Check current VLAN tag availability before doing allocations
ret = self.updateAvailRanges(sliceurn, requestDOM)
if ret is not None:
return ret
# Exit if user specified --genRequest, saving more fully expanded request RSpec
self.handleGenRequest()
# The launcher handles calling the aggregates to do their allocation
# Create a launcher and run it. That in turn calls the Aggregates to do the allocations,
# where all the work happens.
# A StitchingCircuitFailedError is a transient or recoverable error. On such errors,
# recurse and call this main method again, re-calling the SCS and retrying reservations at AMs.
# A StitchingError is a permanent failure.
# On any error, delete any partial reservations.
launcher = stitch.Launcher(self.opts, self.slicename, self.ams_to_process, self.config['timeoutTime'])
try:
# Spin up the main loop
lastAM = launcher.launch(self.parsedSCSRSpec, self.scsCalls)
# for testing calling the SCS only many times
# raise StitchingCircuitFailedError("testing")
except StitchingCircuitFailedError, se:
# A StitchingCircuitFailedError is a transient or recoverable error. On such errors,
# recurse and call this main method again, re-calling the SCS and retrying reservations at AMs.
# On any error, delete any partial reservations.
# Do not recurse if we've hit the maxSCSCalls or if there's an error deleting
# previous reservations.
self.lastException = se
if self.opts.noDeleteAtEnd:
# User requested to not delete on interrupt
# Update the message to indicate not deleting....
self.logger.warn("Stitching failed. Would retry but commandline option specified not to. Last error: %s", se)
msg = self.endPartiallyReserved(se, aggs=self.ams_to_process)
# Exit by raising an error
raise StitchingError("Stitching failed due to: %s. %s" % (se, msg))
else:
if self.scsCalls == self.maxSCSCalls:
self.logger.error("Stitching max circuit failures reached - will delete and exit.")
try:
(delretText, delretStruct) = self.deleteAllReservations(launcher)
for am in launcher.aggs:
if am.manifestDom:
self.logger.warn("You have a reservation at %s", am)
except KeyboardInterrupt:
self.logger.error('... deleting interrupted!')
for am in launcher.aggs:
if am.manifestDom:
self.logger.warn("You have a reservation at %s", am)
raise StitchingError("Stitching reservation failed %d times. Last error: %s" % (self.scsCalls, se))
self.logger.warn("Stitching failed but will retry: %s", se)
success = False
try:
(delRetText, delRetStruct) = self.deleteAllReservations(launcher)
hadFail = False
for url in delRetStruct.keys():
if not delRetStruct[url]:
hadFail = True
break
if isinstance(delRetStruct[url], dict) and delRetStruct[url].has_key('code') and isinstance(delRetStruct[url]['code'], dict) and delRetStruct[url]['code'].has_key('geni_code') and delRetStruct[url]['code']['geni_code'] not in (0, 12, 15):
hadFail = True
break
if isinstance(delRetStruct[url], dict) and delRetStruct[url].has_key('code') and isinstance(delRetStruct[url]['code'], dict) and delRetStruct[url]['code'].has_key('geni_code') and delRetStruct[url]['code']['geni_code'] == 0 and delRetStruct[url].has_key('value') and isinstance(delRetStruct[url]['value'], list) and len(delRetStruct[url]['value']) > 0:
try:
for sliver in delRetStruct[url]["value"]:
status = sliver["geni_allocation_status"]
if status != 'geni_unallocated':
hadFail = True
break
if hadFail:
break
except:
# Malformed return I think
hadFail = True
# FIXME: Handle other cases...
if not hadFail:
success = True
except KeyboardInterrupt:
self.logger.error('... deleting interrupted!')
for am in launcher.aggs:
if am.manifestDom:
self.logger.warn("You have a reservation at %s", am)
if not success:
raise StitchingError("Stitching failed. Would retry but delete had errors. Last Stitching error: %s" % se)
# Flush the cache of aggregates. Loses all state. Avoids
# double adding hops to aggregates, etc. But we lose the vlans_unavailable. And ?
aggs = copy.copy(self.ams_to_process)
self.ams_to_process = None # Clear local memory of AMs to avoid issues
Aggregate.clearCache()
# construct new SCS args
# redo SCS call et al
# FIXME: aggs.hops have loose tag: mark the hops in the request as explicitly loose
# FIXME: Here we pass in the request to give to the SCS. I'd like this
# to be modified (different VLAN range? Some hops marked loose?) in future
lastAM = self.mainStitchingLoop(sliceurn, requestDOM, aggs)
except StitchingError, se:
# A StitchingError is a permanent failure.
# On any error, delete any partial reservations.
if not isinstance(se, StitchingStoppedError):
self.logger.error("Stitching failed with an error: %s", se)
if self.lastException:
self.logger.error("Root cause error: %s", self.lastException)
newError = StitchingError("%s which caused %s" % (str(self.lastException), str(se)))
se = newError
if self.opts.noDeleteAtEnd:
# User requested to not delete on interrupt
# Update the message to indicate not deleting....
self.logger.warn("Per commandline option, not deleting existing reservations.")
msg = self.endPartiallyReserved(se, aggs=self.ams_to_process)
# Create a new error with a new return msg and raise that
raise StitchingStoppedError("Stitching stopped. %s. %s" % (se, msg))
else:
try:
(delRetText, delRetStruct) = self.deleteAllReservations(launcher)
for am in launcher.aggs:
if am.manifestDom:
self.logger.warn("You have a reservation at %s", am)
except KeyboardInterrupt:
self.logger.error('... deleting interrupted!')
for am in launcher.aggs:
if am.manifestDom:
self.logger.warn("You have a reservation at %s", am)
#raise
raise se
return lastAM
def writeExpandedRequest(self, ams, requestDom):
# Write the fully expanded/updated request RSpec to a file
self.logger.debug("Generating updated combined request RSpec")
combinedRequestDom = combineManifestRSpecs(ams, requestDom, useReqs=True)
try:
reqString = combinedRequestDom.toprettyxml(encoding="utf-8")
except Exception, xe:
self.logger.debug("Failed to XMLify combined Request RSpec: %s", xe)
self._raise_omni_error("Malformed combined request RSpec: %s" % xe)
reqString = stripBlankLines(reqString)
# set rspec to be UTF-8
if isinstance(reqString, unicode):
reqString = reqString.encode('utf-8')
self.logger.debug("Combined request RSpec was unicode")
# FIXME: Handle errors. Maybe make return use code/value/output struct
# If error and have an expanded request from SCS, include that in output.
# Or if particular AM had errors, ID the AMs and errors
# FIXME: This prepends a header on an RSpec that might already have a header
# -- maybe replace any existing header
# FIXME: We force -o here and keep it from logging the
# RSpec. Do we need an option to not write the RSpec to a file?
ot = self.opts.output
if not self.opts.tostdout:
self.opts.output = True
if not self.opts.debug:
# Suppress all but WARN on console here
lvl = self.logger.getEffectiveLevel()
handlers = self.logger.handlers
if len(handlers) == 0:
handlers = logging.getLogger().handlers
for handler in handlers:
if isinstance(handler, logging.StreamHandler):
lvl = handler.level
handler.setLevel(logging.WARN)
break
retVal, filename = handler_utils._writeRSpec(self.opts, self.logger, reqString, None, '%s-expanded-request'%self.slicename, '', None)
if not self.opts.debug:
handlers = self.logger.handlers
if len(handlers) == 0:
handlers = logging.getLogger().handlers
for handler in handlers:
if isinstance(handler, logging.StreamHandler):
handler.setLevel(lvl)
break
self.opts.output = ot
if filename:
msg = "Saved expanded request RSpec at %d AM(s) to file '%s'" % (len(ams), os.path.abspath(filename))
else:
msg = "Generated expanded request RSpec"
return msg
def handleGenRequest(self):
# Exit if user specified --genRequest, saving more fully expanded request RSpec
# Used in mainStitchingLoop
if self.opts.genRequest:
msg = self.writeExpandedRequest(self.ams_to_process, self.parsedSCSRSpec.dom)
self.logger.info(msg)
raise StitchingError("Requested to only generate and save the expanded request")
# End of block to save the expanded request and exit
def handleNoReservation(self):
# Exit if user specified --noReservation, saving expanded request RSpec
# Used in mainStitchingLoop
if self.opts.noReservation:
self.logger.info("Not reserving resources")
# Write the request rspec to a string that we save to a file
try:
requestString = self.parsedSCSRSpec.dom.toxml(encoding="utf-8")
except Exception, xe:
self.logger.debug("Failed to XMLify parsed SCS request RSpec for saving: %s", xe)
self._raise_omni_error("Malformed SCS expanded request RSpec: %s" % xe)
header = "<!-- Expanded Resource request for:\n\tSlice: %s -->" % (self.slicename)
if requestString is not None:
content = stripBlankLines(string.replace(requestString, "\\n", '\n'))
else:
self.logger.debug("None expanded request RSpec?")
content = ""
filename = None
ot = self.opts.output
if not self.opts.tostdout:
self.opts.output = True
if self.opts.output:
filename = handler_utils._construct_output_filename(self.opts, self.slicename, '', None, "expanded-request-rspec", ".xml", 1)
if filename:
self.logger.info("Saving expanded request RSpec to file: %s", os.path.abspath(filename))
else:
self.logger.info("Expanded request RSpec:")
if not self.opts.debug:
# Suppress all but WARN on console here
lvl = self.logger.getEffectiveLevel()
handlers = self.logger.handlers
if len(handlers) == 0:
handlers = logging.getLogger().handlers
for handler in handlers:
if isinstance(handler, logging.StreamHandler):
lvl = handler.level
handler.setLevel(logging.WARN)
break
# Create FILE
# This prints or logs results, depending on whether filename is None
handler_utils._printResults(self.opts, self.logger, header, content, filename)
if not self.opts.debug:
handlers = self.logger.handlers
if len(handlers) == 0:
handlers = logging.getLogger().handlers
for handler in handlers:
if isinstance(handler, logging.StreamHandler):
handler.setLevel(lvl)
break
self.opts.output = ot
raise StitchingError("Requested no reservation")
# Done handling --noReservation
def createObjectFromOneURN(self, amURN):
# Create an Aggregate class instance from the URN of the aggregate,
# avoiding duplicates.
# If the AM URN we parsed from the RSpec is already in the list of aggregates to process,
# skip to the next parsed URN
found = False
for agg in self.ams_to_process:
if agg.urn == amURN:
found = True
# self.logger.debug(" .. was already in ams_to_process")
break
# For EG there are multiple URNs that are really the same
# If find one, found them all
for urn2 in agg.urn_syns:
if urn2 == amURN:
# self.logger.debug(" .. was in ams_to_process under synonym. Ams_to_process had %s", agg.urn)
found = True
break
if found:
return
# AM URN was not in the workflow from the SCS
# # If this URN was on a stitching link, then this isn't going to work
# for link in self.parsedSCSRSpec.links:
# if len(link.aggregates) > 1 and not link.hasSharedVlan and link.typeName == link.VLAN_LINK_TYPE:
# # This is a link that needs stitching
# for linkagg in link.aggregates:
# if linkagg.urn == amURN or amURN in linkagg.urn_syns:
# self.logger.debug("Found AM %s on stitching link %s that is not in SCS Workflow. URL: %s", amURN, link.id, linkagg.url)
# stitching = self.parsedSCSRSpec.stitching
# slink = None
# if stitching:
# slink = stitching.find_path(link.id)
# if not slink:
# self.logger.debug("No path in stitching section of rspec for link %s that seems to need stitching", link.id)
# raise StitchingError("SCS did not handle link %s - perhaps AM %s is unknown?", link.id, amURN)
am = Aggregate.find(amURN)
# Fill in a URL for this AM
# First, find it in the agg_nick_cache
if not am.url:
# FIXME: Avoid apparent v1 URLs
for urn in am.urn_syns:
(nick, url) = handler_utils._lookupAggNickURLFromURNInNicknames(self.logger, self.config, urn)
if url and url.strip() != '':
self.logger.debug("Found AM %s URL using URN %s from omni_config AM nicknames: %s", amURN, urn, nick)
am.url = url
am.nick = nick
break
# If that failed, try asking the CH
if not am.url:
# Try asking our CH for AMs to get the URL for the
# given URN
fw_ams = dict()
try:
fw_ams = self.framework.list_aggregates()
for fw_am_urn in fw_ams.keys():
if fw_am_urn and fw_am_urn.strip() in am.urn_syns and fw_ams[fw_am_urn].strip() != '':
am.url = fw_ams[fw_am_urn]
self.logger.debug("Found AM %s URL from CH ListAggs: %s", amURN, am.url)
break
except:
pass
if not am.url:
raise StitchingError("RSpec requires AM '%s' which is not in workflow and URL is unknown!" % amURN)
else:
self.logger.debug("Adding am to ams_to_process from URN %s, with url %s", amURN, am.url)
self.ams_to_process.append(am)
self.parsedURNNewAggs.append(am) # Save off the new agg as something we just added
return
# End of createObjectFromOneURN
def createObjectsFromOptArgs(self):
# For use when merging manifests
for amNick in self.opts.aggregate:
url1,urn1 = handler_utils._derefAggNick(self, amNick)
self.createObjectFromOneURN(urn1)
def createObjectsFromParsedAMURNs(self):
# Ensure all AM URNs we found in the RSpec are Aggregate objects in ams_to_process
if self.parsedSCSRSpec is None:
return
for amURN in self.parsedSCSRSpec.amURNs:
# self.logger.debug("Looking at SCS returned amURN %s", amURN)
self.createObjectFromOneURN(amURN)
# Done adding user requested non linked AMs to list of AMs to process
def updateAvailRanges(self, sliceurn, requestDOM):
# Check current VLAN tag availability before doing allocations
# Loop over AMs. If I update an AM, then go to AMs that depend on it and intersect there (but don't redo avail query), and recurse.
for am in self.ams_to_process:
# If doing the avail query at this AM doesn't work or wouldn't help or we did it recently, move on
if not am.doAvail(self.opts):
self.logger.debug("Not checking VLAN availability at %s", am)
continue
self.logger.debug("Checking current availabilty at %s", am)
madeChange = False
try:
madeChange = am.updateWithAvail(self.opts)
if madeChange:
# Must intersect the new ranges with others in the chain
# We have already updated avail and checked request at this AM
for hop in am.hops:
self.logger.debug("Applying updated availability up the chain for %s", hop)
while hop.import_vlans:
newHop = hop.import_vlans_from
oldRange = newHop._hop_link.vlan_range_request
newHop._hop_link.vlan_range_request = newHop._hop_link.vlan_range_request.intersection(hop._hop_link.vlan_range_request)
if oldRange != newHop._hop_link.vlan_range_request:
self.logger.debug("Reset range of %s to '%s' from %s", newHop, newHop._hop_link.vlan_range_request, oldRange)
else:
self.logger.debug("Availability unchanged at %s", newHop)
if len(newHop._hop_link.vlan_range_request) <= 0:
self.logger.debug("New available range is empty!")
raise StitchingCircuitFailedError("No VLANs possible at %s based on latest availability; Try again from the SCS" % newHop.aggregate)
if newHop._hop_link.vlan_suggested_request != VLANRange.fromString("any") and not newHop._hop_link.vlan_suggested_request <= newHop._hop_link.vlan_range_request:
self.logger.debug("Suggested (%s) is not in reset available range - mark it unavailable and raise an error!", newHop._hop_link.vlan_suggested_request)
newHop.vlans_unavailable = newHop.vlans_unavailable.union(newHop._hop_link.vlan_suggested_request)
raise StitchingCircuitFailedError("Requested VLAN unavailable at %s based on latest availability; Try again from the SCS" % newHop)
else:
self.logger.debug("Suggested (%s) still in reset available range", newHop._hop_link.vlan_suggested_request)
hop = newHop
# End of loop up the imports chain for this hop
# End of loop over all hops on this AM where we just updated availability
self.logger.debug("Done applying updated availabilities from %s", am)
else:
self.logger.debug("%s VLAN availabilities did not change. Done with this AM", am)
# End of block to only update avails up the chain if we updated availability on this AM
except StitchingCircuitFailedError, se:
self.lastException = se
if self.scsCalls == self.maxSCSCalls:
self.logger.error("Stitching max circuit failures reached")
raise StitchingError("Stitching reservation failed %d times. Last error: %s" % (self.scsCalls, se))
# FIXME: If we aren't doing stitching so won't be calling the SCS, then does it ever make sense
# to try this again here? For example, EG Embedding workflow ERROR?
# if not self.isStitching:
# self.logger.error("Reservation failed and not reasonable to retry - not a stitching request.")
# raise StitchingError("Multi AM reservation failed. Not stitching so cannot retry with new path. %s" % se)
self.logger.warn("Stitching failed but will retry: %s", se)
# Flush the cache of aggregates. Loses all state. Avoids
# double adding hops to aggregates, etc. But we lose the vlans_unavailable. And ?
aggs = copy.copy(self.ams_to_process)
self.ams_to_process = None # Clear local memory of AMs to avoid issues
Aggregate.clearCache()
# construct new SCS args
# redo SCS call et al
return self.mainStitchingLoop(sliceurn, requestDOM, aggs)
# End of exception handling block
# End of loop over AMs getting current availability
return None # Not an AM return so don't return it in the main block
def changeRequestsToAny(self):
# Change requested VLAN tags to 'any' where appropriate
# Check the AMs: For each hop that says it is a VLAN producer / imports no VLANs, lets change the suggested request to "any".
# That should ensure that that hop succeeds the first time through. Hopefully the SCS has set up the avail ranges to work throughout
# the path, so everything else will just work as well.
# In APIv3, a failure later is just a negotiation case (we'll get a new tag to try). In APIv2, a later failure is a pseudo negotiation case.
# That is, we can go back to the 'any' hop and exclude the failed tag, deleting that reservation, and try again.
# FIXME: In schema v2, the logic for where to figure out if it is a consumer or producer is more complex. But for now, the hoplink says,
# and the hop indicates if it imports vlans.
# While doing this, make sure the tells for whether we can tell the hop to pick the tag are consistent.
if self.opts.useSCSSugg:
self.logger.info("Per option, requesting SCS suggested VLAN tags")
return
for am in self.ams_to_process:
if self.opts.useSCSSugg:
#self.logger.info("Per option, requesting SCS suggested VLAN tags")
continue
if not am.supportsAny():
self.logger.debug("%s doesn't support requesting 'any' VLAN tag - move on", am)
continue
# Could a complex topology have some hops producing VLANs and some accepting VLANs at the same AM?
# if len(am.dependsOn) == 0:
# self.logger.debug("%s says it depends on no other AMs", am)
for hop in am.hops:
# Init requestAny so we never request 'any' when option says not or it is one of the non-supported AMs
requestAny = not self.opts.useSCSSugg and am.supportsAny()
if not requestAny:
continue
isConsumer = False
isProducer = False
imports = False
if hop._hop_link.vlan_consumer:
# self.logger.debug("%s says it is a vlan consumer. In itself, that is OK", hop)
isConsumer = True
if hop._import_vlans:
if hop.import_vlans_from._aggregate != hop._aggregate:
imports = True
self.logger.debug("%s imports VLANs from another AM, %s. Don't request 'any'.", hop, hop.import_vlans_from)
if len(am.dependsOn) == 0:
self.logger.warn("%s imports VLANs from %s but the AM says it depends on no AMs?!", hop, hop.import_vlans_from)
requestAny = False
else:
# This hop imports tags from another hop on the same AM.
# So we want this hop to do what that other hop does. So if that other hop is changing to any, this this
# hop should change to any.
hop2 = hop.import_vlans_from
if hop2._import_vlans and hop2.import_vlans_from._aggregate != hop2._aggregate:
imports = True
requestAny = False
self.logger.debug("%s imports VLANs from %s which imports VLANs from a different AM (%s) so don't request 'any'.", hop, hop2, hop2._import_vlans_from)
elif not hop2._hop_link.vlan_producer:
self.logger.debug("%s imports VLANs from %s which does not say it is a vlan producer. Don't request 'any'.", hop, hop2)
requestAny = False
else:
self.logger.debug("%s imports VLANs from %s which is OK to request 'any', so this hop should request 'any'.", hop, hop2)
if not hop._hop_link.vlan_producer:
if not imports and not isConsumer:
# See http://groups.geni.net/geni/ticket/1263 and http://groups.geni.net/geni/ticket/1262
if not am.supportsAny():
self.logger.debug("%s doesn't import VLANs and not marked as either a VLAN producer or consumer. But it is an EG or GRAM or OESS or DCN AM, where we cannot assume 'any' works.", hop)
requestAny = False
else:
# If this hop doesn't import and isn't explicitly marked as either a consumer or a producer, then
# assume it is willing to produce a VLAN tag
self.logger.debug("%s doesn't import and not marked as either a VLAN producer or consumer. Assuming 'any' is OK.", hop)
requestAny = True
else:
if requestAny:
self.logger.debug("%s does not say it is a vlan producer. Don't request 'any'.", hop)
requestAny = False
else:
self.logger.debug("%s does not say it is a vlan producer. Still not requesting 'any'.", hop)
else:
isProducer = True
self.logger.debug("%s marked as a VLAN producer", hop)
if not requestAny and not imports and not isConsumer and not isProducer:
if not am.supportsAny():
self.logger.debug("%s doesn't import VLANs and not marked as either a VLAN producer or consumer. But it is an EG or GRAM or OESS or DCN AM, where we cannot assume 'any' works.", hop)
else:
# If this hop doesn't import and isn't explicitly marked as either a consumer or a producer, then
# assume it is willing to produce a VLAN tag
self.logger.debug("%s doesn't import VLANs and not marked as either a VLAN producer or consumer. Assuming 'any' is OK.", hop)
requestAny = True
if self.opts.useSCSSugg and requestAny:
self.logger.info("Would request 'any', but user requested to stick to SCS suggestions.")
elif requestAny:
if len(am.dependsOn) != 0:
self.logger.debug("%s appears OK to request tag 'any', but the AM says it depends on other AMs?", hop)
if hop._hop_link.vlan_suggested_request != VLANRange.fromString("any"):
self.logger.debug("Changing suggested request tag from %s to 'any' on %s", hop._hop_link.vlan_suggested_request, hop)
hop._hop_link.vlan_suggested_request = VLANRange.fromString("any")
# else:
# self.logger.debug("%s suggested request was already 'any'.", hop)
# End of loop over hops in AM
# End of loop over AMs to process
def deleteAllReservations(self, launcher):
'''On error exit, ensure all outstanding reservations are deleted.'''
# Try to combine v2 and v3 results together
# Text is just appended
# all results in struct are keyed by am.url
# For v3, this is therefore same as before
# v2 return used to be (successURLs, failedURLs)
# But that's hard to preserve
# So instead, the v2 return is True if the AM was found in the success list, False if found in Failed list,
# and otherwise the return under the am.url is whatever the AM originally returned.
# Note that failing to find the AM url may mean it's a variant of the URL
loggedDeleting = False
retText = ""
retStruct = {}
if len(launcher.aggs) == 0:
self.logger.debug("0 aggregates from which to delete")
for am in launcher.aggs:
if am.manifestDom:
if not loggedDeleting:
loggedDeleting = True
self.logger.info("Deleting existing reservations...")
self.logger.debug("Had reservation at %s", am)
try:
(text, result) = am.deleteReservation(self.opts, self.slicename)
self.logger.info("Deleted reservation at %s.", am)
if text is not None and text.strip() != "":
if retText != "":
retText += "\n %s" % text
else:
retText = text
if am.api_version < 3 or not isinstance(result, dict):
if not (isinstance(result, tuple) and isinstance(result[0], list)):
if result is None and text.startswith("Success"):
retStruct[am.url] = True
else:
# Some kind of error
self.logger.debug("Struct result from delete or deletesliver unknown from %s: %s", am, result)
retStruct[am.url] = result
else:
(succ, fail) = result
# FIXME: Do the handler_utils tricks for comparing URLs?
if am.url in succ or am.alt_url in succ:
retStruct[am.url] = True
elif am.url in fail or am.alt_url in fail:
retStruct[am.url] = False
else:
self.logger.debug("Failed to find AM URL in v2 deletesliver return struct. AM %s, return %s", am, result)
retStruct[am.url] = result
else:
retCopy = retStruct.copy()
retCopy.update(result)
retStruct = retCopy
except StitchingError, se2:
msg = "Failed to delete reservation at %s: %s" % (am, se2)
self.logger.warn(msg)
retStruct[am.url] = False
if retText != "":
retText += "\n %s" % msg
else:
retText = msg
if retText == "":
retText = "No aggregates with reservations from which to delete"
return (retText, retStruct)
def confirmGoodRSpec(self, requestString, rspecType=rspec_schema.REQUEST, doRSpecLint=True):
'''Ensure an rspec is valid'''
typeStr = 'Request'
if rspecType == rspec_schema.MANIFEST:
typeStr = 'Manifest'
# Confirm the string is a request rspec, valid
if requestString is None or str(requestString).strip() == '':
raise OmniError("Empty %s rspec" % typeStr)
if not is_rspec_string(requestString, None, None, logger=self.logger):
raise OmniError("%s RSpec file did not contain an RSpec" % typeStr)
# if not is_rspec_of_type(requestString, rspecType):
# if not is_rspec_of_type(requestString, rspecType, "GENI 3", False, logger=self.logger):
# FIXME: ION does not support PGv2 schema RSpecs. Stitcher doesn't mind, and PG AMs don't mind, but
# this if the request is PGv2 and crosses ION this may cause trouble.
if not (is_rspec_of_type(requestString, rspecType, "GENI 3", False) or is_rspec_of_type(requestString, rspecType, "ProtoGENI 2", False)):
if self.opts.devmode:
self.logger.info("RSpec of wrong type or schema, but continuing...")
else:
raise OmniError("%s RSpec file did not contain a %s RSpec (wrong type or schema)" % (typeStr, typeStr))
# Run rspeclint
if doRSpecLint:
try:
rspeclint_exists()
except:
self.logger.debug("No rspeclint found")
return
# FIXME: Make this support GENIv4+? PGv2?
schema = rspec_schema.GENI_3_REQ_SCHEMA
if rspecType == rspec_schema.MANIFEST:
schema = rspec_schema.GENI_3_MAN_SCHEMA
if not validate_rspec(requestString, rspec_schema.GENI_3_NAMESPACE, schema):
raise OmniError("%s RSpec does not validate against its schemas" % typeStr)
def confirmSliceOK(self):
'''Ensure the given slice name corresponds to a current valid slice,
and return the Slice URN and expiration datetime.'''
self.logger.info("Reading slice %s credential...", self.slicename)
# Get slice URN from name
try:
sliceurn = self.framework.slice_name_to_urn(self.slicename)
except Exception, e:
self.logger.error("Could not determine slice URN from name %s: %s", self.slicename, e)
raise StitchingError(e)
self.slicehrn = urn_to_clean_hrn(sliceurn)[0]
if self.opts.fakeModeDir:
self.logger.info("Fake mode: not checking slice credential")
return (sliceurn, naiveUTC(datetime.datetime.max))
if self.opts.noReservation:
self.logger.info("Requested noReservation: not checking slice credential")
return (sliceurn, naiveUTC(datetime.datetime.max))
if self.opts.genRequest:
self.logger.info("Requested to only generate the request: not checking slice credential")
return (sliceurn, naiveUTC(datetime.datetime.max))
# Get slice cred
(slicecred, message) = handler_utils._get_slice_cred(self, sliceurn)
if not slicecred:
# FIXME: Maybe if the slice doesn't exist, create it?
# omniargs = ["createslice", self.slicename]
# try:
# (slicename, message) = omni.call(omniargs, self.opts)
# except:
# pass
raise StitchingError("Could not get a slice credential for slice %s: %s" % (sliceurn, message))
self.slicecred = slicecred
self.savedSliceCred = False
# Force the slice cred to be from a saved file if not already set
if not self.opts.slicecredfile:
self.opts.slicecredfile = os.path.join(os.getenv("TMPDIR", os.getenv("TMP", "/tmp")), SLICECRED_FILENAME)
if "%username" in self.opts.slicecredfile:
self.opts.slicecredfile = string.replace(self.opts.slicecredfile, "%username", self.username)
if "%slicename" in self.opts.slicecredfile:
self.opts.slicecredfile = string.replace(self.opts.slicecredfile, "%slicename", self.slicename)
if "%slicehrn" in self.opts.slicecredfile:
self.opts.slicecredfile = string.replace(self.opts.slicecredfile, "%slicehrn", self.slicehrn)
self.opts.slicecredfile = os.path.normpath(self.opts.slicecredfile)
if self.opts.fileDir:
self.opts.slicecredfile = prependFilePrefix(self.opts.fileDir, self.opts.slicecredfile)
trim = -4
if self.opts.slicecredfile.endswith("json"):
trim = -5
# -4 is to cut off .xml. It would be -5 if the cred is json
#self.logger.debug("Saving slice cred %s... to %s", str(slicecred)[:15], self.opts.slicecredfile[:trim])
self.opts.slicecredfile = handler_utils._save_cred(self, self.opts.slicecredfile[:trim], slicecred)
self.savedSliceCred = True
# Ensure slice not expired
sliceexp = credutils.get_cred_exp(self.logger, slicecred)
sliceexp = naiveUTC(sliceexp)
now = datetime.datetime.utcnow()
shorthours = 3
middays = 1
if sliceexp <= now:
# FIXME: Maybe if the slice doesn't exist, create it?
# omniargs = ["createslice", self.slicename]
# try:
# (slicename, message) = omni.call(omniargs, self.opts)
# except:
# pass
raise StitchingError("Slice %s expired at %s" % (sliceurn, sliceexp))
elif sliceexp - datetime.timedelta(hours=shorthours) <= now:
self.logger.warn('Slice %s expires in <= %d hours on %s UTC' % (sliceurn, shorthours, sliceexp))
self.logger.debug('It is now %s UTC' % (datetime.datetime.utcnow()))
elif sliceexp - datetime.timedelta(days=middays) <= now:
self.logger.info('Slice %s expires within %d day on %s UTC' % (sliceurn, middays, sliceexp))
else:
self.logger.info('Slice %s expires on %s UTC' % (sliceurn, sliceexp))
# return the slice urn, slice expiration (datetime)
return (sliceurn, sliceexp)
# End of confirmSliceOK
# Ensure the link has well formed property elements for cross-AM links each with a capacity
# Really there could be multiple AMs on the link, and each cross-AM link could have different properties,
# and properties are unidirectional so capacities could differ in different directions
# For now, the first 2 different AMs get properties
def addCapacityOneLink(self, link):
# look for property elements
if len(link.properties) > 2:
# raise StitchingError("Your request RSpec is malformed: include either 2 or 0 property elements on link '%s'" % link.id)
self.logger.debug("Request RSpec has %d property elements on link '%s'", len(link.properties), link.id)
# Get the 2 node IDs
ifcs = link.interfaces
if len(ifcs) < 2:
self.logger.debug("Link '%s' doesn't have at least 2 interfaces? Has %d", link.id, len(ifcs))
# If there is a stitching extension path for this, then this is a stitched link.
# Theoretically that means we want a property so SCS can put this in the stitching extension,
# but the stitching extension already exists
return
if len(ifcs) > 2:
self.logger.debug("Link '%s' has more than 2 interfaces (%d). Picking source and dest from the first 2 on different AMs.", link.id, len(ifcs))
# FIXME: Create a list of AM pairs, so I can look for 1 or 2 properties for each pair, and ensure
# each has a capacity. AM pairs means 2 interface_refs whose nodes are at different AMs
# Create a mapping of AM -> interface_id. Then can find the pairs of AMs and ensure there's a property for each,
# and use that interface_id for the property.
amToIfc = {}
for ifc in ifcs:
cid = ifc.client_id
idam = None
for node in self.parsedUserRequest.nodes:
if cid in node.interface_ids:
idam = node.amURN
break
if idam and idam not in amToIfc:
amToIfc[idam] = cid
self.logger.debug("Link '%s' has interfaces on %d AMs", link.id, len(amToIfc.keys()))
if len(amToIfc.keys()) > 0:
node1AM = amToIfc.keys()[0]
node1ID = amToIfc[node1AM]
# Now find a 2nd interface on a different AM
node2ID = None
node2AM = None
if len(amToIfc.keys()) > 1:
keys = amToIfc.keys()
node2AM = keys[1]
if node2AM == node1AM:
node2AM = keys[0]
node2ID = amToIfc[node2AM]
if node2AM is None:
# No 2nd interface on different AM found
self.logger.debug("Link '%s' doesn't have interfaces on more than 1 AM ('%s')?" % (link.id, node1AM))
# Even if this is a stitched link, the stitching extensino would already have capacity
return
else:
# FIXME: Eventually want all the pairs to have properties
self.logger.debug("Link '%s' properties will be from '%s' to '%s'", link.id, node1ID, node2ID)
# If we get here, the link crosses 2+ AMs
# FIXME: Really I want properties between every pair of AMs (not nodes), and not
# just the first 2 different AMs
# If there are no property elements
if len(link.properties) == 0:
self.logger.debug("Link '%s' had no properties - must add them", link.id)
# Then add them
s_id = node1ID
d_id = node2ID
s_p = LinkProperty(s_id, d_id, None, None, self.opts.defaultCapacity)
s_p.link = link
d_p = LinkProperty(d_id, s_id, None, None, self.opts.defaultCapacity)
d_p.link = link
link.properties = [s_p, d_p]
return
# Error check properties:
for prop in link.properties:
if prop.source_id is None or prop.source_id == "":
raise StitchingError("Malformed property on link '%s' missing source_id attribute" % link.id)
if prop.dest_id is None or prop.dest_id == "":
raise StitchingError("Malformed property on link '%s' missing dest_id attribute" % link.id)
if prop.dest_id == prop.source_id:
raise StitchingError("Malformed property on link '%s' has matching source and dest_id: '%s'" % (link.id, prop.dest_id))
# If the elements are there, error check them, adding property if necessary
# FIXME: Generalize this to find any pair of properties that is reciprocal to ensure that if 1 has a capacity, the other has same
if len(link.properties) == 2:
props = link.properties
prop1S = props[0].source_id
prop1D = props[0].dest_id
prop2S = props[1].source_id
prop2D = props[1].dest_id
# FIXME: Compare to the interface_refs
if prop1S != prop2D or prop1D != prop2S:
# raise StitchingError("Malformed properties on link '%s': source and dest tags are not reversed" % link.id)
# This could happen if >2 ifcs and 2 asymetric props
# But it could also mean a single property is duplicated
self.logger.debug("On link '%s': source and dest tags are not reversed" % link.id)
else:
if props[0].capacity and not props[1].capacity:
props[1].capacity = props[0].capacity
if props[1].capacity and not props[0].capacity:
props[0].capacity = props[1].capacity
# FIXME: Warn about really small or big capacities?
return
# End of handling have 2 current properties
for prop in link.properties:
# If this is a cross AM property, then it should have an explicit capacity
sourceAM = None
destAM = None
for node in self.parsedUserRequest.nodes:
if prop.source_id in node.interface_ids:
sourceAM = node.amURN
if prop.dest_id in node.interface_ids:
destAM = node.amURN
if sourceAM and destAM:
break
if sourceAM and destAM and sourceAM != destAM:
if prop.capacity is None or prop.capacity == "":
prop.capacity = self.opts.defaultCapacity
# FIXME: Warn about really small or big capacities?
# FIXME: Do we need the reciprocal property?
# # Create the 2nd property with the source and dest reversed
# prop2 = LinkProperty(prop.dest_id, prop.source_id, prop.latency, prop.packet_loss, prop.capacity)
# link.properties = [prop, prop2]
# self.logger.debug("Link '%s' added missing reverse property", link.id)
# End of addCapacityOneLink
# Ensure all implicit AMs (from interface_ref->node->component_manager_id) are explicit on the link
def ensureLinkListsAMs(self, link, requestRSpecObject):
if not link:
return
ams = []
for ifc in link.interfaces:
found = False
for node in requestRSpecObject.nodes:
if ifc.client_id in node.interface_ids:
if node.amURN is not None and node.amURN not in ams:
ams.append(node.amURN)
found = True
self.logger.debug("Link '%s' interface '%s' found on node '%s'", link.id, ifc.client_id, node.id)
break
if not found:
self.logger.debug("Link '%s' interface '%s' not found on any node", link.id, ifc.client_id)
# FIXME: What would this mean?
for amURN in ams:
am = Aggregate.find(amURN)
if am not in link.aggregates:
self.logger.debug("Adding missing AM %s to link '%s'", amURN, link.id)
link.aggregates.append(am)
# End of ensureLinkListsAMs
def hasGRELink(self, requestRSpecObject):
# Does the given RSpec have a GRE link
# Side effect: ensure all links list all known component_managers
# Return boolean
if not requestRSpecObject:
return False
isGRE = False
for link in requestRSpecObject.links:
# Make sure links explicitly lists all its aggregates, so this test is valid
self.ensureLinkListsAMs(link, requestRSpecObject)
# has a link that has 2 interface_refs and has a link type of *gre_tunnel and endpoint nodes are PG
if not (link.typeName == link.GRE_LINK_TYPE or link.typeName == link.EGRE_LINK_TYPE):
# Not GRE
# self.logger.debug("Link %s not GRE but %s", link.id, link.typeName)
continue
if len(link.aggregates) != 2:
self.logger.warn("Link '%s' is a GRE link with %d AMs?", link.id, len(link.aggregates))
continue
if len(link.interfaces) != 2:
self.logger.warn("Link '%s' is a GRE link with %d interfaces?", link.id, len(link.interfaces))
continue
isGRE = True
for ifc in link.interfaces:
found = False
for node in requestRSpecObject.nodes:
if ifc.client_id in node.interface_ids:
found = True
# This is the node
# I'd like to ensure the node is a PG node.
# But at this point we haven't called getversion yet
# So we don't really know if this is a PG node
# am = Aggregate.find(node.amURN)
# if not am.isPG:
# self.logger.warn("Bad GRE link %s: interface_ref %s is on a non PG node: %s", link.id, ifc.client_id, am)
# isGRE = False
# We do not currently parse sliver-type off of nodes to validate that
break
if not found:
self.logger.warn("GRE link '%s' has unknown interface_ref '%s' - assuming it is OK", link.id, ifc.client_id)
if isGRE:
self.logger.debug("Link '%s' is GRE", link.id)
# Extra: ensure endpoints are xen for link type egre, openvz or rawpc for gre
# End of loop over links
return isGRE
# End of hasGRELink
def mustCallSCS(self, requestRSpecObject):
'''Does this request actually require stitching?
Check: >=1 link in main body with >= 2 diff component_manager
names and no shared_vlan extension and no non-VLAN link_type
'''
# side effects
# - links list known component_managers
# - links have 2 well formed property elements with explicit capacities
if not requestRSpecObject:
return False
needSCS = False
for link in requestRSpecObject.links:
# Make sure links explicitly lists all its aggregates, so this test is valid
self.ensureLinkListsAMs(link, requestRSpecObject)
if len(link.aggregates) > 1 and not link.hasSharedVlan and link.typeName == link.VLAN_LINK_TYPE:
# Ensure this link has 2 well formed property elements with explicit capacities
self.addCapacityOneLink(link)
self.logger.debug("Requested link '%s' is stitching", link.id)
# Links that are ExoGENI only use ExoGENI stitching, not the SCS
# So only if the link includes anything non-ExoGENI, we use the SCS
egOnly = True
for am in link.aggregates:
# I wish I could do am.isEG but we don't get that info until later.
# Hack!
if 'exogeni' not in am.urn:
needSCS = True
egOnly = False
break
if egOnly:
self.logger.debug("Link '%s' is only ExoGENI, so can use ExoGENI stitching.", link.id)
if needSCS:
self.logger.debug("But we already decided we need the SCS.")
elif self.opts.noEGStitching and not needSCS:
self.logger.info("Requested to use GENI stitching instead of ExoGENI stitching")
needSCS = True
elif self.opts.noEGStitchingOnLink and link.id in self.opts.noEGStitchingOnLink and not needSCS:
self.logger.info("Requested to use GENI stitching on link %s instead of ExoGENI stitching", link.id)
needSCS = True
# FIXME: If the link includes the openflow rspec extension marking a desire to make the link
# be OF controlled, then use the SCS and GENI stitching?
# End of block to handle likely stitching link
# FIXME: Can we be robust to malformed requests, and stop and warn the user?
# EG the link has 2+ interface_ref elements that are on 2+ nodes belonging to 2+ AMs?
# Currently the parser only saves the IRefs on Links - no attempt to link to Nodes
# And for Nodes, we don't even look at the Interface sub-elements
# End of loop over links
return needSCS
def callSCS(self, sliceurn, requestDOM, existingAggs):
'''Construct SCS args, call the SCS service'''
# - Construct the args
# - Call ComputePath
# - raise an informative error if necessary
# - if --debug, save scs-result.json
# - return scsResponse
requestString, scsOptions = self.constructSCSArgs(requestDOM, existingAggs)
existingAggs = None # Clear to note we are done
self.scsService.result = None # Avoid any unexpected issues
self.logger.debug("Calling SCS with options %s", scsOptions)
if self.opts.savedSCSResults:
self.logger.debug("** Not actually calling SCS, using results from '%s'", self.opts.savedSCSResults)
try:
scsResponse = self.scsService.ComputePath(sliceurn, requestString, scsOptions, self.opts.savedSCSResults)
except StitchingError as e:
self.logger.debug("Error from slice computation service: %s", e)
raise
except Exception as e:
# FIXME: If SCS used dossl then that might handle many of these errors.
# Alternatively, the SCS could handle these itself.
excName = e.__class__.__name__
strE = str(e)
if strE == '':
strE = excName
elif strE == "''":
strE = "%s: %s" % (excName, strE)
if strE.startswith('BadStatusLine'):
# Did you call scs with http when https was expected?
url = self.opts.scsURL.lower()
if '8443' in url and not url.startswith('https'):
strE = "Bad SCS URL: Use https for a SCS requiring SSL (running on port 8443). (%s)" % strE
elif 'unknown protocol' in strE:
url = self.opts.scsURL.lower()
if url.startswith('https'):
strE = "Bad SCS URL: Try using http not https. (%s)" % strE
elif '404 Not Found' in strE:
strE = 'Bad SCS URL (%s): %s' % (self.opts.scsURL, strE)
elif 'Name or service not known' in strE:
strE = 'Bad SCS host (%s): %s' % (self.opts.scsURL, strE)
elif 'alert unknown ca' in strE:
try:
certObj = gid.GID(filename=self.framework.cert)
certiss = certObj.get_issuer()
certsubj = certObj.get_urn()
self.logger.debug("SCS gave exception: %s", strE)
strE = "SCS does not trust the CA (%s) that signed your (%s) user certificate! Use an account at another clearinghouse or find another SCS server." % (certiss, certsubj)
except:
strE = 'SCS does not trust your certificate. (%s)' % strE
self.logger.error("Exception from slice computation service: %s", strE)
import traceback
self.logger.debug("%s", traceback.format_exc())
raise StitchingError("SCS gave error: %s" % strE)
# Done SCS call error handling
self.logger.debug("SCS successfully returned.");
if self.opts.debug:
scsresfile = prependFilePrefix(self.opts.fileDir, "scs-result.json")
self.logger.debug("Writing SCS result JSON to %s" % scsresfile)
with open (scsresfile, 'w') as file:
file.write(stripBlankLines(str(json.dumps(self.scsService.result, encoding='ascii', cls=DateTimeAwareJSONEncoder))))
self.scsService.result = None # Clear memory/state
return scsResponse
# Done callSCS
def constructSCSArgs(self, requestDOM, existingAggs=None):
'''Build and return the string rspec request and options arguments for calling the SCS.'''
# return requestString and options
# Handles --noEGStitching, --includeHop, --excludeHop, --noEGSttichingOnLink, --includeHopOnPath
# Also handles requesting to avoid any VLAN tags found to be unavailable on the hops
options = {}
# options is a struct
# Supply the SCS option that requests the
# '##all_paths_merged##' path in the workflow.
# Doing so forces SCS to detect cross path workflow loops for
# us.
# Note that in omnilib/stitch/workflow we ignore that "path"
# currently, and construct our own workflow
options[scs.GENI_PATHS_MERGED_TAG] = True
if self.opts.noEGStitching:
# User requested no EG stitching. So ask SCS to find a GENI path
# for all EG links
options[scs.ATTEMPT_PATH_FINDING_TAG] = True
# To exclude a hop, add a geni_routing_profile struct
# This in turn should have a struct per path whose name is the path name
# Each shuld have a hop_exclusion_list array, containing the names of hops
# If you append '=<VLANRange>' to the hop URN, that means to exclude
# that set of VLANs from consideration on that hop, but don't entirely exclude
# the hop.
# exclude = "urn:publicid:IDN+instageni.gpolab.bbn.com+interface+procurve2:5.24=3747-3748"
# path = "link-pg-utah1-ig-gpo1"
# exclude = "urn:publicid:IDN+ion.internet2.edu+interface+rtr.atla:ge-7/1/6:protogeni"
# excludes = []
# excludes.append(exclude)
# exclude = "urn:publicid:IDN+ion.internet2.edu+interface+rtr.hous:ge-9/1/4:protogeni"
# excludes.append(exclude)
# exclude = "urn:publicid:IDN+ion.internet2.edu+interface+rtr.losa:ge-7/1/3:protogeni"
# excludes.append(exclude)
# exclude = "urn:publicid:IDN+ion.internet2.edu+interface+rtr.salt:ge-7/1/2:*"
## excludes.append(exclude)
# exclude = "urn:publicid:IDN+ion.internet2.edu+interface+rtr.wash:ge-7/1/3:protogeni"
# excludes.append(exclude)
# profile = {}
# pathStruct = {}
# pathStruct["hop_exclusion_list"]=excludes
# profile[path] = pathStruct
# options["geni_routing_profile"]=profile
profile = {}
# If we have existing AMs,
# Add the options to tell the SCS to exclude any hops marked for exclusion, or any VLANs
# marked unavailable
if existingAggs and len(existingAggs) > 0:
for agg in existingAggs:
for hop in agg.hops:
if hop.excludeFromSCS or (hop.vlans_unavailable and len(hop.vlans_unavailable) > 0):
# get path and ensure a pathStruct object
path = hop._path.id
if profile.has_key(path):
pathStruct = profile[path]
else:
pathStruct = {}
# Get hop_exclusion_list
if pathStruct.has_key(scs.HOP_EXCLUSION_TAG):
excludes = pathStruct[scs.HOP_EXCLUSION_TAG]
else:
excludes = []
# get hop URN
urn = hop.urn
# Add to the excludes list
if hop.excludeFromSCS:
excludes.append(urn)
elif hop.vlans_unavailable and len(hop.vlans_unavailable) > 0:
excludes.append(urn + "=" + str(hop.vlans_unavailable))
# Put the new objects in the struct
pathStruct[scs.HOP_EXCLUSION_TAG] = excludes
profile[path] = pathStruct
# Done loop over hops
# Done loop over AMs
# Done block to handle existing AMs
# Handle the commandline options to modify how links are processed.
# IE, Exclude any hops given as an option from _all_ hops
# And add the right include hops and force GENI Stitching options
links = None
if (self.opts.excludehop and len(self.opts.excludehop) > 0) or (self.opts.includehop and len(self.opts.includehop) > 0) or \
(self.opts.includehoponpath and len(self.opts.includehoponpath) > 0) or \
(self.opts.noEGStitchingOnLink and len(self.opts.noEGStitchingOnLink) > 0):
links = requestDOM.getElementsByTagName(defs.LINK_TAG)
if links and len(links) > 0:
if not self.opts.excludehop:
self.opts.excludehop = []
if not self.opts.includehop:
self.opts.includehop = []
if not self.opts.includehoponpath:
self.opts.includehoponpath= []
if not self.opts.noEGStitchingOnLink:
self.opts.noEGStitchingOnLink= []
self.logger.debug("Got links and option to exclude hops: %s, include hops: %s, include hops on paths: %s, force GENI stitching on paths: %s", self.opts.excludehop, self.opts.includehop, self.opts.includehoponpath, self.opts.noEGStitchingOnLink)
# Handle any --excludeHop
for exclude in self.opts.excludehop:
# For each path
for link in links:
path = link.getAttribute(Link.CLIENT_ID_TAG)
path = str(path).strip()
if profile.has_key(path):
pathStruct = profile[path]
else:
pathStruct = {}
# Get hop_exclusion_list
if pathStruct.has_key(scs.HOP_EXCLUSION_TAG):
excludes = pathStruct[scs.HOP_EXCLUSION_TAG]
else:
excludes = []
excludes.append(exclude)
self.logger.debug("Excluding %s from path %s", exclude, path)
# Put the new objects in the struct
pathStruct[scs.HOP_EXCLUSION_TAG] = excludes
profile[path] = pathStruct
# Handle any --includeHop
for include in self.opts.includehop:
# For each path
for link in links:
path = link.getAttribute(Link.CLIENT_ID_TAG)
path = str(path).strip()
if profile.has_key(path):
pathStruct = profile[path]
else:
pathStruct = {}
# Get hop_inclusion_list
if pathStruct.has_key(scs.HOP_INCLUSION_TAG):
includes = pathStruct[scs.HOP_INCLUSION_TAG]
else:
includes = []
includes.append(include)
self.logger.debug("Including %s on path %s", include, path)
# Put the new objects in the struct
pathStruct[scs.HOP_INCLUSION_TAG] = includes
profile[path] = pathStruct
# Handle any --includeHopOnPath
for (includehop, includepath) in self.opts.includehoponpath:
# For each path
for link in links:
path = link.getAttribute(Link.CLIENT_ID_TAG)
path = str(path).strip()
if not path.lower() == includepath.lower():
continue
if profile.has_key(path):
pathStruct = profile[path]
else:
pathStruct = {}
# Get hop_inclusion_list
if pathStruct.has_key(scs.HOP_INCLUSION_TAG):
includes = pathStruct[scs.HOP_INCLUSION_TAG]
else:
includes = []
includes.append(includehop)
self.logger.debug("Including %s on path %s", includehop, path)
# Put the new objects in the struct
pathStruct[scs.HOP_INCLUSION_TAG] = includes
profile[path] = pathStruct
# Handle any --noEGStitchingOnLink
for noeglink in self.opts.noEGStitchingOnLink:
for link in links:
path = link.getAttribute(Link.CLIENT_ID_TAG)
path = str(path).strip()
if not path.lower() == noeglink.lower():
continue
if profile.has_key(path):
pathStruct = profile[path]
else:
pathStruct = {}
pathStruct[scs.ATTEMPT_PATH_FINDING_TAG] = True
self.logger.debug("Force SCS to find a GENI stitching path for link %s", noeglink)
profile[path] = pathStruct
# Done block to handle commandline per link arguments
if profile != {}:
options[scs.GENI_PROFILE_TAG] = profile
self.logger.debug("Sending SCS options %s", options)
try:
xmlreq = requestDOM.toprettyxml(encoding="utf-8")
except Exception, xe:
self.logger.debug("Failed to XMLify requestDOM for sending to SCS: %s", xe)
self._raise_omni_error("Malformed request RSpec: %s" % xe)
return xmlreq, options
# Done constructSCSArgs
def parseSCSResponse(self, scsResponse):
# Parse the response from the SCS
# - print / save SCS expanded RSpec in debug mode
# - print SCS picked VLAN tags in debug mode
# - parse the RSpec, creating objects
# - parse the workflow, creating dependencies
# return the parsed RSpec object and the workflow parser
expandedRSpec = scsResponse.rspec()
if self.opts.debug or self.opts.fakeModeDir or self.logger.isEnabledFor(logging.DEBUG):
if isRSpecStitchingSchemaV2(expandedRSpec):
self.logger.debug("SCS RSpec uses v2 stitching schema")
# Write the RSpec the SCS gave us to a file
header = "<!-- SCS expanded stitching request for:\n\tSlice: %s\n -->" % (self.slicename)
if expandedRSpec and is_rspec_string( expandedRSpec, None, None, logger=self.logger ):
content = stripBlankLines(string.replace(expandedRSpec, "\\n", '\n'))
else:
content = "<!-- No valid RSpec returned. -->"
if expandedRSpec is not None:
content += "\n<!-- \n" + expandedRSpec + "\n -->"
if self.opts.debug or self.opts.fakeModeDir:
# Set -o to ensure this goes to a file, not logger or stdout
opts_copy = copy.deepcopy(self.opts)
opts_copy.output = True
scsreplfile = prependFilePrefix(self.opts.fileDir, Aggregate.FAKEMODESCSFILENAME)
handler_utils._printResults(opts_copy, self.logger, header, \
content, \
scsreplfile)
# In debug mode, keep copies of old SCS expanded requests
if self.opts.debug:
handler_utils._printResults(opts_copy, self.logger, header, content, scsreplfile + str(self.scsCalls))
self.logger.debug("Wrote SCS expanded RSpec to %s", \
scsreplfile)
# A debugging block: print out the VLAN tag the SCS picked for each hop, independent of objects
if self.logger.isEnabledFor(logging.DEBUG):
start = 0
path = None
while True:
if not content.find("<link id=", start) >= start:
break
hopIdStart = content.find('<link id=', start) + len('<link id=') + 1
hopIdEnd = content.find(">", hopIdStart)-1
# Get the link ID
hop = content[hopIdStart:hopIdEnd]
# Look for the name of the path for this hop before the name of the hop
if content.find('<path id=', start, hopIdStart) > 0:
pathIdStart = content.find('<path id=', start) + len('<path id=') + 1
pathIdEnd = content.find(">", pathIdStart)-1
self.logger.debug("Found path from %d to %d", pathIdStart, pathIdEnd)
path = content[pathIdStart:pathIdEnd]
# find suggestedVLANRange
suggestedStart = content.find("suggestedVLANRange>", hopIdEnd) + len("suggestedVLANRange>")
suggestedEnd = content.find("</suggested", suggestedStart)
suggested = content[suggestedStart:suggestedEnd]
# find vlanRangeAvailability
availStart = content.find("vlanRangeAvailability>", hopIdEnd) + len("vlanRangeAvailability>")
availEnd = content.find("</vlanRange", availStart)
avail = content[availStart:availEnd]
# print that all
self.logger.debug("SCS gave hop %s on path %s suggested VLAN %s, avail: '%s'", hop, path, suggested, avail)
start = suggestedEnd
# parseRequest
parsed_rspec = self.rspecParser.parse(expandedRSpec)
# self.logger.debug("Parsed SCS expanded RSpec of type %r",
# type(parsed_rspec))
# parseWorkflow
workflow = scsResponse.workflow_data()
scsResponse = None # once workflow extracted, done with that object
# Dump the formatted workflow at debug level
import pprint
pp = pprint.PrettyPrinter(indent=2)
self.logger.debug("SCS workflow:\n" + pp.pformat(workflow))
workflow_parser = WorkflowParser(self.logger)
# Parse the workflow, creating Path/Hop/etc objects
# In the process, fill in a tree of which hops depend on which,
# and which AMs depend on which
# Also mark each hop with what hop it imports VLANs from,
# And check for AM dependency loops
workflow_parser.parse(workflow, parsed_rspec)
# FIXME: Check SCS output consistency in a subroutine:
# In each path: An AM with 1 hop must either _have_ dependencies or _be_ a dependency
# All AMs must be listed in workflow data at least once per path they are in
return parsed_rspec, workflow_parser
# End of parseSCSResponse
def ensureOneExoSM(self):
'''If 2 AMs in ams_to_process are ExoGENI and share a path and no noEGStitching specified,
then ensure we use the ExoSM. If 2 AMs use the ExoSM URL, combine them into a single AM.'''
if len(self.ams_to_process) < 2:
return
exoSMCount = 0
exoSMs = []
nonExoSMs = []
egAMCount = 0
egAMs = []
for am in self.ams_to_process:
if am.isExoSM:
egAMCount += 1
exoSMCount += 1
exoSMs.append(am)
self.logger.debug("%s is ExoSM", am)
else:
nonExoSMs.append(am)
if am.isEG:
egAMs.append(am)
egAMCount += 1
if egAMCount == 0:
return
if egAMCount > 1:
self.logger.debug("Request includes more than one ExoGENI AM.")
# If there is a stitched link between 2 EG AMs and no noEGStitching, then we
# must change each to be the ExoSM so we use EG stitching for those AMs / links.
# If there is no stitched link between the 2 EG AMs or the user specified noEGStitching,
# then we do not change them to be the ExoSM.
# Note that earlier useExoSM changed EG AMs into the ExoSM
if self.opts.noEGStitching:
# SCS will have tried to provide a GENI path and errored if not possible
self.logger.debug("Requested no EG stitching. Will edit requests to let this work later")
# And do not force the AMs to be the ExoSM
elif exoSMCount == egAMCount:
self.logger.debug("All EG AMs are already the ExoSM")
else:
# Now see if each EG AM should be made into the ExoSM or not.
for anEGAM in egAMs:
if self.opts.useExoSM:
# Should not happen I believe.
self.logger.debug("Asked to use the ExoSM for all EG AMs. So change this one.")
elif self.parsedSCSRSpec:
self.logger.debug("Will use EG stitching where applicable. Must go through the ExoSM for EG only links.")
# Does this AM participate in an EG only link? If so, convert it.
# If not, continue
# EG only links will not be in the stitching extension, so use the main body elements
hasEGLink = False
for link in self.parsedSCSRSpec.links:
# If this link was explicitly marked for no EG stitching
# via a commandline option, then log at debug and continue to next link
if self.opts.noEGStitchingOnLink and link.id in self.opts.noEGStitchingOnLink:
self.logger.debug("Requested no EG stitching on link %s, so this link cannot force this AM to be the ExoSM", link.id)
continue
hasThisAgg = False
hasOtherEGAgg = False
hasNonEGAgg = False
for agg in link.aggregates:
if anEGAM == agg:
hasThisAgg=True
elif agg.isEG:
hasOtherEGAgg = True
else:
hasNonEGAgg = True
if hasThisAgg and hasOtherEGAgg:
# then this AM has an EG link
# Or FIXME, must it also not hasNonEGAgg?
self.logger.debug("Looking at links, %s uses this %s and also another EG AM", link.id, anEGAM)
if hasNonEGAgg:
self.logger.debug("FIXME: Also has a non EG AM. Should this case avoid setting hasEGLink to true and use GENI stitching? Assuming so...")
else:
hasEGLink = True
break # out of loop over links
# End of loop over links in the RSpec
if not hasEGLink:
self.logger.debug("%s is EG but has no links to other EG AMs, so no need to make it the ExoSM", anEGAM)
continue # to next EG AM
self.logger.debug("%s has a link that to another EG AM. To use EG stitching between them, make this the ExoSM.", anEGAM)
# At this point, we're going to make a non ExoSM EG AM into the ExoSM so the ExoSM
# can handle the stitching.
# Make anEGAM the ExoSM
self.logger.debug("Making %s the ExoSM", anEGAM)
anEGAM.alt_url = anEGAM.url
anEGAM.url = defs.EXOSM_URL
anEGAM.isExoSM = True
anEGAM.nick = handler_utils._lookupAggNick(self, anEGAM.url)
exoSMCount += 1
exoSMs.append(anEGAM)
nonExoSMs.remove(anEGAM)
# End of block where didn't specify useExoSM
# End of loop over EG AMs
# End of else to see if each EG AM must be changed into the ExoSM
# End of block handling EG AM count > 1
if exoSMCount == 0:
self.logger.debug("Not using ExoSM")
return
exoSM = None
# First ExoSM will be _the_ ExoSM
if exoSMCount > 0:
exoSM = exoSMs[0]
exoSMURN = handler_utils._lookupAggURNFromURLInNicknames(self.logger, self.config, defs.EXOSM_URL)
# Ensure standard ExoSM URN is the URN and old URN is in urn_syns
if exoSM.urn not in exoSM.urn_syns:
exoSM.urn_syns.append(exoSM.urn)
if exoSMURN != exoSM.urn:
exoSM.urn = exoSMURN
if exoSMURN not in exoSM.urn_syns:
exoSM.urn_syns += Aggregate.urn_syns(exoSMURN)
if exoSMCount < 2:
self.logger.debug("Only %d ExoSMs", exoSMCount)
return
# Now merge other ExoSMs into _the_ ExoSM
for am in exoSMs:
if am == exoSM:
continue
self.logger.debug("Merge AM %s (%s, %s) into %s (%s, %s)", am.urn, am.url, am.alt_url, exoSM, exoSM.url, exoSM.alt_url)
# Merge urn_syns
if exoSM.urn != am.urn and am.urn not in exoSM.urn_syns:
exoSM.urn_syns.append(am.urn)
for urn in am.urn_syns:
if urn not in exoSM.urn_syns:
exoSM.urn_syns.append(urn)
# Merge _dependsOn
if am in exoSM.dependsOn:
exoSM._dependsOn.discard(am)
if exoSM in am.dependsOn:
am._dependsOn.discard(exoSM)
exoSM._dependsOn.update(am._dependsOn)
# If both am and exoSM are in dependsOn or isDependencyFor for some other AM, then remove am
for am2 in self.ams_to_process:
if am2 in exoSMs:
continue
if am2 == am:
continue
if am2 == exoSM:
continue
if am in am2.dependsOn:
self.logger.debug("Removing dup ExoSM %s from %s.dependsOn", am, am2)
am2._dependsOn.discard(am)
if not exoSM in am2.dependsOn:
self.logger.debug("Adding real ExoSM %s to %s.dependsOn", exoSM, am2)
am2._dependsOn.add(exoSM)
if am in am2.isDependencyFor:
self.logger.debug("Removing dup ExoSM %s from %s.isDependencyFor", am, am2)
am2.isDependencyFor.discard(am)
if not exosM in am2.isDependencyFor:
self.logger.debug("Adding real ExosM %s to %s.isDependencyFor", exoSM, am2)
am2.isDependencyFor.add(exoSM)
# End of loop over AMs to merge dependsOn and isDependencyFor
# merge isDependencyFor
if am in exoSM.isDependencyFor:
exoSM.isDependencyFor.discard(am)
if exoSM in am.isDependencyFor:
am.isDependencyFor.discard(exoSM)
exoSM.isDependencyFor.update(am.isDependencyFor)
# merge _paths
# Path has hops and aggregates
# Fix the list of aggregates to drop the aggregate being merged away
# What happens when a path has same aggregate at 2 discontiguous hops?
for path in am.paths:
path._aggregates.remove(am)
if not exoSM in path.aggregates:
path._aggregates.add(exoSM)
if not path in exoSM.paths:
self.logger.debug("Merging in path %s", path)
exoSM._paths.add(path)
# FIXME: What does it mean for the same path to be on both aggregates? What has to be merged?
# merge _hops
# Hop points back to aggregate. Presumably these pointers must be reset
for hop in am.hops:
hop._aggregate = exoSM
if not hop in exoSM.hops:
self.logger.debug("Merging in hop %s", hop)
exoSM._hops.add(hop)
# merge userRequested
# - If 1 was user requested and 1 was not, whole thing is user requested
if am.userRequested:
exoSM.userRequested = True
# merge alt_url
if exoSM.alt_url and handler_utils._extractURL(self.logger, exoSM.alt_url) == handler_utils._extractURL(self.logger, exoSM.url):
if handler_utils._extractURL(self.logger, exoSM.alt_url) != handler_utils._extractURL(self.logger, am.url):
exoSM.alt_url = am.alt_url
# End of loop over exoSMs, doing merge
# ensure only one in cls.aggs
newaggs = dict()
for (key, agg) in Aggregate.aggs.items():
if not (agg.isExoSM and agg != exoSM):
newaggs[key] = agg
Aggregate.aggs = newaggs
nonExoSMs.append(exoSM)
self.ams_to_process = nonExoSMs
def add_am_info(self, aggs):
'''Add extra information about the AMs to the Aggregate objects, like the API version'''
options_copy = copy.deepcopy(self.opts)
options_copy.debug = False
options_copy.info = False
options_copy.aggregate = []
aggsc = copy.copy(aggs)
for agg in aggsc:
# Don't do an aggregate twice
if agg.urn in self.amURNsAddedInfo:
continue
# self.logger.debug("add_am_info looking at %s", agg)
# Note which AMs were user requested
if self.parsedUserRequest and agg.urn in self.parsedUserRequest.amURNs:
agg.userRequested = True
elif self.parsedUserRequest:
for urn2 in agg.urn_syns:
if urn2 in self.parsedUserRequest.amURNs:
agg.userRequested = True
# FIXME: Better way to detect this?
if handler_utils._extractURL(self.logger, agg.url) in defs.EXOSM_URL:
agg.isExoSM = True
# self.logger.debug("%s is the ExoSM cause URL is %s", agg, agg.url)
# EG AMs in particular have 2 URLs in some sense - ExoSM and local
# So note the other one, since VMs are split between the 2
for (amURN, amURL) in self.config['aggregate_nicknames'].values():
if amURN.strip() in agg.urn_syns:
hadURL = handler_utils._extractURL(self.logger, agg.url)
newURL = handler_utils._extractURL(self.logger, amURL)
if hadURL != newURL and not hadURL in newURL and not newURL in hadURL and not newURL.strip == '':
agg.alt_url = amURL.strip()
break
# else:
# self.logger.debug("Not setting alt_url for %s. URL is %s, alt candidate was %s with URN %s", agg, hadURL, newURL, amURN)
# elif "exogeni" in amURN and "exogeni" in agg.urn:
# self.logger.debug("Config had URN %s URL %s, but that URN didn't match our URN synonyms for %s", amURN, newURL, agg)
if "exogeni" in agg.urn and not agg.alt_url:
# self.logger.debug("No alt url for Orca AM %s (URL %s) with URN synonyms:", agg, agg.url)
# for urn in agg.urn_syns:
# self.logger.debug("\t%s", urn)
if not agg.isExoSM:
agg.alt_url = defs.EXOSM_URL
# Try to get a URL from the CH? Do we want/need this
# expense? This is a call to the CH....
# Comment this out - takes too long, not clear
# it is needed.
# if not agg.alt_url:
# fw_ams = dict()
# try:
# fw_ams = self.framework.list_aggregates()
# for fw_am_urn in fw_ams.keys():
# if fw_am_urn and fw_am_urn.strip() in am.urn_syns and fw_ams[fw_am_urn].strip() != '':
# cand_url = fw_ams[fw_am_urn]
# if cand_url != am.url and not am.url in cand_url and not cand_url in am.url:
# am.alt_url = cand_url
# self.logger.debug("Found AM %s alternate URL from CH ListAggs: %s", am.urn, am.alt_url)
# break
# except:
# pass
# If --noExoSM then ensure this is not the ExoSM
if agg.isExoSM and agg.alt_url and self.opts.noExoSM:
self.logger.warn("%s used ExoSM URL. Changing to %s", agg, agg.alt_url)
amURL = agg.url
agg.url = agg.alt_url
agg.alt_url = amURL
agg.isExoSM = False
# For using the test ION AM
# if 'alpha.dragon' in agg.url:
# agg.url = 'http://alpha.dragon.maxgigapop.net:12346/'
# Use GetVersion to determine AM type, AM API versions spoken, etc
# Hack: Here we hard-code using APIv2 always to call getversion, assuming that v2 is the AM default
# and so the URLs are v2 URLs.
if options_copy.warn:
omniargs = ['--ForceUseGetVersionCache', '-V2', '-a', agg.url, 'getversion']
else:
omniargs = ['--ForceUseGetVersionCache', '-o', '--warn', '-V2', '-a', agg.url, 'getversion']
try:
self.logger.debug("Getting extra AM info from Omni for AM %s", agg)
(text, version) = omni.call(omniargs, options_copy)
aggurl = agg.url
if isinstance (version, dict) and version.has_key(aggurl) and isinstance(version[aggurl], dict) \
and version[aggurl].has_key('value') and isinstance(version[aggurl]['value'], dict):
# First parse geni_am_type
if version[aggurl]['value'].has_key('geni_am_type') and isinstance(version[aggurl]['value']['geni_am_type'], list):
if DCN_AM_TYPE in version[aggurl]['value']['geni_am_type']:
self.logger.debug("AM %s is DCN", agg)
agg.dcn = True
elif ORCA_AM_TYPE in version[aggurl]['value']['geni_am_type']:
self.logger.debug("AM %s is Orca", agg)
agg.isEG = True
elif PG_AM_TYPE in version[aggurl]['value']['geni_am_type']:
self.logger.debug("AM %s is ProtoGENI", agg)
agg.isPG = True
elif GRAM_AM_TYPE in version[aggurl]['value']['geni_am_type']:
self.logger.debug("AM %s is GRAM", agg)
agg.isGRAM = True
elif FOAM_AM_TYPE in version[aggurl]['value']['geni_am_type']:
self.logger.debug("AM %s is FOAM", agg)
agg.isFOAM = True
elif OESS_AM_TYPE in version[aggurl]['value']['geni_am_type']:
self.logger.debug("AM %s is OESS", agg)
agg.isOESS = True
elif version[aggurl]['value'].has_key('geni_am_type') and ORCA_AM_TYPE in version[aggurl]['value']['geni_am_type']:
self.logger.debug("AM %s is Orca", agg)
agg.isEG = True
elif version[aggurl]['value'].has_key('geni_am_type') and DCN_AM_TYPE in version[aggurl]['value']['geni_am_type']:
self.logger.debug("AM %s is DCN", agg)
agg.dcn = True
elif version[aggurl]['value'].has_key('geni_am_type') and PG_AM_TYPE in version[aggurl]['value']['geni_am_type']:
self.logger.debug("AM %s is ProtoGENI", agg)
agg.isPG = True
elif version[aggurl]['value'].has_key('geni_am_type') and GRAM_AM_TYPE in version[aggurl]['value']['geni_am_type']:
self.logger.debug("AM %s is GRAM", agg)
agg.isGRAM = True
elif version[aggurl]['value'].has_key('geni_am_type') and FOAM_AM_TYPE in version[aggurl]['value']['geni_am_type']:
self.logger.debug("AM %s is FOAM", agg)
agg.isFOAM = True
elif version[aggurl]['value'].has_key('geni_am_type') and OESS_AM_TYPE in version[aggurl]['value']['geni_am_type']:
self.logger.debug("AM %s is OESS", agg)
agg.isOESS = True
# This code block looks nice but doesn't work - the version object is not the full triple
# elif version[aggurl].has_key['code'] and isinstance(version[aggurl]['code'], dict) and \
# version[aggurl]['code'].has_key('am_type') and str(version[aggurl]['code']['am_type']).strip() != "":
# if version[aggurl]['code']['am_type'] == PG_AM_TYPE:
# self.logger.debug("AM %s is ProtoGENI", agg)
# agg.isPG = True
# elif version[aggurl]['code']['am_type'] == ORCA_AM_TYPE:
# self.logger.debug("AM %s is Orca", agg)
# agg.isEG = True
# elif version[aggurl]['code']['am_type'] == DCN_AM_TYPE:
# self.logger.debug("AM %s is DCN", agg)
# agg.dcn = True
# Now parse geni_api_versions
if version[aggurl]['value'].has_key('geni_api_versions') and isinstance(version[aggurl]['value']['geni_api_versions'], dict):
maxVer = 1
hasV2 = False
v2url = None
maxVerUrl = None
reqVerUrl = None
for key in version[aggurl]['value']['geni_api_versions'].keys():
if int(key) == 2:
hasV2 = True
v2url = version[aggurl]['value']['geni_api_versions'][key]
# Ugh. Why was I changing the URL based on the Ad? Not needed, Omni does this.
# And if the AM says the current URL is the current opts.api_version OR the AM only lists
# one URL, then changing the URL makes no sense. So if I later decide I need this
# for some reason, only do it if len(keys) > 1 and [value][geni_api] != opts.api_version
# Or was I trying to change to the 'canonical' URL for some reason?
# # Change the stored URL for this Agg to the URL the AM advertises if necessary
# if agg.url != version[aggurl]['value']['geni_api_versions'][key]:
# agg.url = version[aggurl]['value']['geni_api_versions'][key]
# The reason to do this would be to
# avoid errors like:
#16:46:34 WARNING : Requested API version 2, but AM https://clemson-clemson-control-1.clemson.edu:5001 uses version 3. Same aggregate talks API v2 at a different URL: https://clemson-clemson-control-1.clemson.edu:5002
# if len(version[aggurl]['value']['geni_api_versions'].keys()) > 1 and \
# agg.url != version[aggurl]['value']['geni_api_versions'][key]:
# agg.url = version[aggurl]['value']['geni_api_versions'][key]
if int(key) > maxVer:
maxVer = int(key)
maxVerUrl = version[aggurl]['value']['geni_api_versions'][key]
if int(key) == self.opts.api_version:
reqVerUrl = version[aggurl]['value']['geni_api_versions'][key]
# Done loop over api versions
# This code is just to avoid ugly WARNs from Omni about changing URL to get the right API version.
# Added it for GRAM. But GRAM is manually fixed at the SCS now, so no need.
# if self.opts.api_version == 2 and hasV2 and agg.url != v2url:
# if agg.isEG and "orca/xmlrpc" in agg.url and "orca/geni" in v2url:
# # EGs ad lists the wrong v2 URL
# #self.logger.debug("Don't swap at EG with the wrong URL")
# pass
# else:
# self.logger.debug("%s: Swapping URL to v2 URL. Change from %s to %s", agg, agg.url, v2url)
# if agg.alt_url is None:
# agg.alt_url = agg.url
# agg.url = v2url
# Stitcher doesn't really know how to parse
# APIv1 return structs
if maxVer == 1:
msg = "%s speaks only AM API v1 - not supported!" % agg
#self.logger.error(msg)
raise StitchingError(msg)
# Hack alert: v3 AM implementations don't work even if they exist
if not hasV2:
msg = "%s does not speak AM API v2 (max is V%d). APIv2 required!" % (agg, maxVer)
#self.logger.error(msg)
raise StitchingError(msg)
agg.api_version = self.opts.api_version
if self.opts.api_version > maxVer:
self.logger.debug("Asked for APIv%d but %s only supports v%d", self.opts.api_version, agg, maxVer)
agg.api_version = maxVer
# if maxVer != 2:
# self.logger.debug("%s speaks AM API v%d, but sticking with v2", agg, maxVer)
# if self.opts.fakeModeDir:
# self.logger.warn("Testing v3 support")
# agg.api_version = 3
# agg.api_version = maxVer
# Change the URL for the AM so that later calls to this AM don't get complaints from Omni
# Here we hard-code knowledge that APIv2 is the default in Omni, the agg_nick_cache, and at AMs
if agg.api_version != 2:
if agg.api_version == maxVer and maxVerUrl is not None and maxVerUrl != agg.url:
self.logger.debug("%s: Swapping URL to v%d URL. Change from %s to %s", agg, agg.api_version, agg.url, maxVerUrl)
if agg.alt_url is None:
agg.alt_url = agg.url
agg.url = maxVerUrl
elif agg.api_version == self.opts.api_version and reqVerUrl is not None and reqVerUrl != agg.url:
self.logger.debug("%s: Swapping URL to v%d URL. Change from %s to %s", agg, agg.api_version, agg.url, reqVerUrl)
if agg.alt_url is None:
agg.alt_url = agg.url
agg.url = reqVerUrl
# Done handling geni_api_versions
if version[aggurl]['value'].has_key('GRAM_version'):
agg.isGRAM = True
self.logger.debug("AM %s is GRAM", agg)
if version[aggurl]['value'].has_key('foam_version') and ('oess' in agg.url or 'al2s' in agg.url):
agg.isOESS = True
self.logger.debug("AM %s is OESS", agg)
if version[aggurl]['value'].has_key('geni_request_rspec_versions') and \
isinstance(version[aggurl]['value']['geni_request_rspec_versions'], list):
for rVer in version[aggurl]['value']['geni_request_rspec_versions']:
if isinstance(rVer, dict) and rVer.has_key('type') and rVer.has_key('version') and \
rVer.has_key('extensions') and rVer['type'].lower() == 'geni' and str(rVer['version']) == '3' and \
isinstance(rVer['extensions'], list):
v2 = False
v1 = False
for ext in rVer['extensions']:
if defs.STITCH_V1_BASE in ext:
v1 = True
if defs.STITCH_V2_BASE in ext:
v2 = True
if v2:
self.logger.debug("%s supports stitch schema v2", agg)
agg.doesSchemaV2 = True
if not v1:
self.logger.debug("%s does NOT say it supports stitch schema v1", agg)
agg.doesSchemaV1 = False
# End of if block
# Done with loop over versions
if not agg.doesSchemaV2 and not agg.doesSchemaV1:
self.logger.debug("%s doesn't say whether it supports either stitching schema, so assume v1", agg)
agg.doesSchemaV1 = True
except StitchingError, se:
# FIXME: Return anything different for stitching error?
# Do we want to return a geni triple struct?
raise
except Exception, e:
self.logger.debug("Got error extracting extra AM info: %s", e)
import traceback
self.logger.debug(traceback.format_exc())
pass
# finally:
# logging.disable(logging.NOTSET)
# Done with call to GetVersion
# If this is an EG AM and we said useExoSM, make this the ExoSM
# Later we'll use ensureOneExoSM to dedupe
if agg.isEG and self.opts.useExoSM and not agg.isExoSM:
agg.alt_url = defs.EXOSM_URL
self.logger.info("%s is an EG AM and user asked for ExoSM. Changing to %s", agg, agg.alt_url)
amURL = agg.url
agg.url = agg.alt_url
agg.alt_url = amURL
agg.isExoSM = True
aggsc.append(agg)
continue
# else:
# self.logger.debug("%s is EG: %s, alt_url: %s, isExo: %s", agg, agg.isEG, agg.alt_url, agg.isExoSM)
# Save off the aggregate nickname if possible
agg.nick = handler_utils._lookupAggNick(self, agg.url)
if not agg.isEG and not agg.isGRAM and not agg.dcn and not agg.isOESS and "protogeni/xmlrpc" in agg.url:
agg.isPG = True
# self.logger.debug("Remembering done getting extra info for %s", agg)
# Remember we got the extra info for this AM
self.amURNsAddedInfo.append(agg.urn)
# Done loop over aggs
# End add_am_info
def dump_objects(self, rspec, aggs):
'''Print out the hops, aggregates, and dependencies'''
if rspec and rspec.stitching:
stitching = rspec.stitching
self.logger.debug( "\n===== Hops =====")
for path in stitching.paths:
self.logger.debug( "Path %s" % (path.id))
for hop in path.hops:
self.logger.debug( " Hop %s" % (hop))
if hop.globalId:
self.logger.debug( " GlobalId: %s" % hop.globalId)
if hop._hop_link.isOF:
self.logger.debug( " An Openflow controlled hop")
if hop._hop_link.controllerUrl:
self.logger.debug( " Controller: %s", hop._hop_link.controllerUrl)
if hop._hop_link.ofAMUrl:
self.logger.debug( " Openflow AM URL: %s", hop._hop_link.ofAMUrl)
if len(hop._hop_link.capabilities) > 0:
self.logger.debug( " Capabilities: %s", hop._hop_link.capabilities)
# FIXME: don't use the private variable
self.logger.debug( " VLAN Suggested (requested): %s" % (hop._hop_link.vlan_suggested_request))
self.logger.debug( " VLAN Available Range (requested): %s" % (hop._hop_link.vlan_range_request))
if hop._hop_link.vlan_suggested_manifest:
self.logger.debug( " VLAN Suggested (manifest): %s" % (hop._hop_link.vlan_suggested_manifest))
if hop._hop_link.vlan_range_manifest:
self.logger.debug( " VLAN Available Range (manifest): %s" % (hop._hop_link.vlan_range_manifest))
if hop.vlans_unavailable and len(hop.vlans_unavailable) > 0:
self.logger.debug( " VLANs found UN Available: %s" % hop.vlans_unavailable)
self.logger.debug( " Import VLANs From: %s" % (hop.import_vlans_from))
deps = hop.dependsOn
if deps:
self.logger.debug( " Dependencies:")
for h in deps:
self.logger.debug( " Hop %s" % (h))
# End of loop over hops
# End of loop over paths
# End of block to print hops if possible
if aggs and len(aggs) > 0:
self.logger.debug( "\n===== Aggregates =====")
for agg in aggs:
self.logger.debug( "\nAggregate %s" % (agg))
if agg.userRequested:
self.logger.debug(" (User requested)")
else:
self.logger.debug(" (SCS added)")
if agg.dcn:
self.logger.debug(" A DCN Aggregate")
if agg.isPG:
self.logger.debug(" A ProtoGENI Aggregate")
if agg.isGRAM:
self.logger.debug(" A GRAM Aggregate")
if agg.isOESS:
self.logger.debug(" An OESS Aggregate")
if agg.isFOAM:
self.logger.debug(" A FOAM Aggregate")
if agg.isEG:
self.logger.debug(" An Orca Aggregate")
if agg.isExoSM:
self.logger.debug(" The ExoSM Aggregate")
self.logger.debug(" URN synonyms: %s", agg.urn_syns)
if agg.alt_url:
self.logger.debug(" Alternate URL: %s", agg.alt_url)
self.logger.debug(" Using AM API version %d", agg.api_version)
if agg.manifestDom:
if agg.api_version > 2:
self.logger.debug(" Have a temporary reservation here (%s)! \n*** You must manually call `omni -a %s -V3 provision %s` and then `omni -a %s -V3 poa %s geni_start`", agg.url, agg.url, self.slicename, agg.url, self.slicename)
else:
self.logger.debug(" Have a reservation here (%s)!", agg.url)
if not agg.doesSchemaV1:
self.logger.debug(" Does NOT support Stitch Schema V1")
if agg.doesSchemaV2:
self.logger.debug(" Supports Stitch Schema V2")
if agg.lastError:
self.logger.debug(" Last Error: %s", agg.lastError)
if agg.pgLogUrl:
self.logger.debug(" PG Log URL %s", agg.pgLogUrl)
if agg.sliverExpirations is not None:
if len(agg.sliverExpirations) > 1:
# More than 1 distinct sliver expiration found
# Sort and take first
outputstr = agg.sliverExpirations[0].isoformat()
self.logger.debug(" Resources here expire at %d different times. First expiration is %s UTC" % (len(agg.sliverExpirations), outputstr))
elif len(agg.sliverExpirations) == 1:
outputstr = agg.sliverExpirations[0].isoformat()
self.logger.debug(" Resources here expire at %s UTC" % (outputstr))
for h in agg.hops:
self.logger.debug( " Hop %s" % (h))
for ad in agg.dependsOn:
self.logger.debug( " Depends on %s" % (ad))
# End of loop over aggregates
# End of block to print aggregates
# End of dump_objects
def _raise_omni_error( self, msg, err=OmniError, triple=None ):
msg2 = msg
if triple is not None:
msg2 += " "
msg2 += str(triple)
self.logger.error( msg2 )
if triple is None:
raise err, msg
else:
raise err, (msg, triple)
def combineManifests(self, ams, lastAM):
'''Produce a single combined manifest string from the reservation results at each aggregate.
lastAM is the last reservation that completed, for use as a template.'''
# Nodes and hops come from the AM that owns those
# interface_ref elements on link elements also come from the responsible AM
# Top level link element is effectively arbitrary, but with comments on what other AMs said
lastDom = None
if lastAM is None or lastAM.manifestDom is None:
self.logger.debug("Combined manifest will start from expanded request RSpec")
lastDom = self.parsedSCSRSpec.dom
# Change that dom to be a manifest RSpec
# for each attribute on the dom root node, change "request" to "manifest"
doc_root = lastDom.documentElement
for i in range(doc_root.attributes.length):
attr = doc_root.attributes.item(i)
doingChange = False
ind = attr.value.find('request')
if ind > -1:
doingChange = True
while ind > -1:
attr.value = attr.value[:ind] + 'manifest' + attr.value[ind+len('request'):]
ind = attr.value.find('request', ind+len('request'))
if doingChange:
self.logger.debug("Reset original request rspec attr %s='%s'", attr.name, attr.value)
# self.logger.debug(stripBlankLines(lastDom.toprettyxml(encoding="utf-8")))
else:
lastDom = lastAM.manifestDom
if lastAM:
self.logger.debug("Template for combining will be from %s", lastAM)
combinedManifestDom = combineManifestRSpecs(ams, lastDom)
try:
manString = combinedManifestDom.toprettyxml(encoding="utf-8")
except Exception, xe:
self.logger.debug("Failed to XMLify combined Manifest RSpec: %s", xe)
self._raise_omni_error("Malformed combined manifest RSpec: %s" % xe)
# set rspec to be UTF-8
if isinstance(manString, unicode):
manString = manString.encode('utf-8')
self.logger.debug("Combined manifest RSpec was unicode")
# FIXME
# For fake mode this is really a request, but should be treating it as a manifest
# For now, SCS gives us stitchSchemaV2 stuff, so rspeclint fails
try:
if self.opts.fakeModeDir:
self.confirmGoodRSpec(manString, rspec_schema.REQUEST, False)
else:
self.confirmGoodRSpec(manString, rspec_schema.MANIFEST, False)
except OmniError, oe:
# If there is an EG AM in the mix, then we expect an error
# like:
#Manifest RSpec file did not contain a Manifest RSpec (wrong type or schema)
hasEG = False
for am in ams:
if am.isEG:
hasEG = True
break
if hasEG and "Manifest RSpec file did not contain a Manifest RSpec (wrong type or schema)" in str(oe):
self.logger.debug("EG AM meant manifest does not validate: %s", oe)
except Exception, e:
self.logger.error(e)
return stripBlankLines(manString)
# End of combineManifest
def saveAggregateList(self, sliceurn):
'''Save a file with the list of aggregates used. Used as input
to later stitcher calls, e.g. to delete from all AMs.'''
# URN to hrn
(slicehrn, stype) = urn_to_clean_hrn(sliceurn)
if not slicehrn or slicehrn.strip() == '' or not stype=='slice':
self.logger.warn("Couldn't parse slice HRN from URN %s",
sliceurn)
return
# ./$slicehrn-amlist.txt
fname = prependFilePrefix(self.opts.fileDir, "~/.gcf/%s-amlist.txt" % slicehrn)
if not self.ams_to_process or len(self.ams_to_process) == 0:
self.logger.debug("No AMs in AM list to process, so not creating amlist file")
return
listdir = os.path.abspath(os.path.expanduser(os.path.dirname(fname)))
if not os.path.exists(listdir):
try:
os.makedirs(listdir)
except Exception, e:
self.logger.warn("Failed to create dir '%s' to save list of used AMs: %s", listdir, e)
# URL,URN
with open (fname, 'w') as file:
file.write("# AM List for multi-AM slice %s\n" % sliceurn)
file.write("# Slice allocated at %s\n" % datetime.datetime.utcnow().isoformat())
for am in self.ams_to_process:
file.write("%s,%s\n" % (am.url, am.urn) )
# Include am.userRequested? am.api_version? len(am._hops)?
# file.write("%s,%s,%s,%d,%d\n" % (am.url, am.urn, am.userRequested,
# am.api_version, len(am._hops)))
# Done writing to file
# End of saveAggregateList
def addAggregateOptions(self, args):
'''Read a file with a list of aggregates, adding those as -a
options. Allows stitcher to delete from all AMs. Note that
extra aggregate options are added only if no -a options are
already supplied.'''
# Find slice name from args[1]
if not args or len(args) < 2:
self.logger.debug("Cannot find slice name")
return
slicename = args[1]
# get slice URN
# Get slice URN from name
try:
sliceurn = self.framework.slice_name_to_urn(slicename)
except Exception, e:
self.logger.warn("Could not determine slice URN from name %s: %s", slicename, e)
return
if not sliceurn or sliceurn.strip() == '':
self.logger.warn("Could not determine slice URN from name %s", slicename)
return
# get slice HRN
(slicehrn, stype) = urn_to_clean_hrn(sliceurn)
if not slicehrn or slicehrn.strip() == '' or not stype=='slice':
self.logger.warn("Couldn't parse slice HRN from URN %s",
sliceurn)
return
# ./$slicehrn-amlist.txt
fname = prependFilePrefix(self.opts.fileDir, "~/.gcf/%s-amlist.txt" % slicehrn)
# look to see if $slicehrn-amlist.txt exists
if not os.path.exists(fname) or not os.path.getsize(fname) > 0:
self.logger.debug("File of AMs for slice %s not found or empty: %s", slicename, fname)
return
self.logger.info("Reading slice %s aggregates from file %s", slicename, fname)
self.opts.ensure_value('aggregate', [])
addOptions = True
if len(self.opts.aggregate) > 0:
addOptions = False
with open(fname, 'r') as file:
# For each line:
for line in file:
line = line.strip()
# Skip if starts with # or is empty
if line == '' or line.startswith('#'):
continue
# split on ,
(url,urn) = line.split(',')
# (url,urn,userRequested,api_version,numHops) = line.split(',')
url = url.strip()
# If first looks like a URL, log
if not url == '':
# add -a option
# Note this next doesn't avoid the dup of a nickname
if not url in self.opts.aggregate:
if addOptions:
self.logger.debug("Adding aggregate option %s (%s)", url, urn)
self.opts.aggregate.append(url)
else:
self.logger.debug("NOTE not adding aggregate %s", url)
# Non-empty URL
# End of loop over lines
# End of block to read the file
# End of addAggregateOptions
def addExpiresAttribute(self, rspecDOM, sliceexp):
'''Set the expires attribute on the rspec to the slice
expiration. DCN AMs used to not support renew, but this is no
longer true, so this should not be necessary. Additionally,
some AMs treat this as a strict requirement and if this
exceeds local policy for maximum sliver, the request will fail.'''
if not rspecDOM:
return
if not sliceexp or str(sliceexp).strip() == "":
return
rspecs = rspecDOM.getElementsByTagName(defs.RSPEC_TAG)
if not rspecs or len(rspecs) < 1:
return
if rspecs[0].hasAttribute(defs.EXPIRES_ATTRIBUTE):
self.logger.debug("Not over-riding expires %s", rspecs[0].getAttribute(defs.EXPIRES_ATTRIBUTE))
return
# Some PG based AMs cannot handle fractional seconds, and
# erroneously treat expires as in local time. So (a) avoid
# microseconds, and (b) explicitly note this is in UTC.
# So this is sliceexp.isoformat() except without the
# microseconds and with the Z. Note that PG requires exactly
# this format.
rspecs[0].setAttribute(defs.EXPIRES_ATTRIBUTE, sliceexp.strftime('%Y-%m-%dT%H:%M:%SZ'))
self.logger.debug("Added expires %s", rspecs[0].getAttribute(defs.EXPIRES_ATTRIBUTE))
def getUnboundNode(self):
'''Set self.isMultiAM by looking at Node component_manager_id fields. Also return at most 1 node without such a field.'''
# If any node is unbound, then all AMs will try to allocate it.
amURNs = []
unboundNode = None
for node in self.parsedUserRequest.nodes:
if node.amURN is None:
if self.opts.devmode:
# Note that SCS likely will fail with something like:
# code 65535: std::exception
self.logger.warn("Node %s is unbound in request", node.id)
else:
self.logger.debug("Node %s is unbound in request", node.id)
unboundNode = node.id
else:
# self.logger.debug("Node %s is on AM %s", node.id, node.amURN)
if node.amURN not in amURNs:
amURNs.append(node.amURN)
self.logger.debug("Request RSpec binds nodes to %d AMs", len(amURNs))
if len(amURNs) > 1:
self.isMultiAM = True
return unboundNode
def confirmSafeRequest(self):
'''Confirm this request is not asking for a loop. Bad things should
not be allowed, dangerous things should get a warning.'''
# Currently, this method is a no-op
# FIXME FIXME - what other checks go here?
# Ticket #570: to stitch multiple VMs at same PG AM on same VLAN, ensure component IDs are eth0-3 on interfaces
# to force it to go through hardware
# for link in self.parsedUserRequest.links:
# Only care about stitching links with more than 2 interfaces
# if len(link.aggregates) > 1 and not link.hasSharedVlan and link.typeName == link.VLAN_LINK_TYPE and len(link.interfaces) > 2:
# ifcsByNode = {}
# for ifc in link.interfaces:
# theNode = None
# for node in self.parseUserRequest.nodes:
# if ifc in node.interface_ids
# theNode = node
# break
# if theNode is None:
# error
# ifcsByNode[theNode] = [ifc]
# for node in ifcsByNode.keys():
# if len(ifcsByNode[node] < 2:
# continue
# agg = Aggregate.find(theNode.amURN)
# if not agg.isPG:
# self.logger.warn("%s is not a PG AM and may not support stitching multiple Nodes on same link", agg)
# continue
# # Now we have a PG node with >2 interfaces on the same stitched link
# # Find the node in the rspec XML
# # find the interface
# # Add the component_id if it is not already there
# # FIXME: If some ifc on the node has the component_id, then I need to avoid using the same ones!
# # Maybe for now, if any ifc has a component_id in the original rspec, skip this node?
# FIXME: we call rspec.getLinkEditedDom() to build what we send to the SCS. So the object / dom there needs to put the
# component_id in in the right way. So probably I need to do this via objects.
# So: objects.py class Node: store the interface_ref as an object that has both client_id (the id) and component_id.
# Make that class have a toDOM method that writes in the correct interface_ref sub-elements as needed, and call that method
# from clas RSpec.getLinkEditedDom
# ethcnt = 0
# For each ifc
# If ifc in the current link, then add component_id attribute using ethcnt, and then increment
pass
def saveAggregateState(self, oldAggs, newAggs):
'''Save state from old aggregates for use with new aggregates from later SCS call'''
for agg in newAggs:
for oldAgg in oldAggs:
# Is this oldAgg the same as the new 'agg' by URN? If so, copy from old to new
# FIXME: Correct to compare urn_syns too?
if not (agg.urn == oldAgg.urn or agg.urn in oldAgg.urn_syns or oldAgg.urn in agg.urn_syns):
# Not a match
continue
for hop in agg.hops:
for oldHop in oldAgg.hops:
if hop.urn == oldHop.urn:
if oldHop.excludeFromSCS:
self.logger.warn("%s had been marked to exclude from SCS, but we got it again", oldHop)
hop.vlans_unavailable = hop.vlans_unavailable.union(oldHop.vlans_unavailable)
break
# End of loop over hops
# FIXME: agg.allocateTries?
agg.dcn = oldAgg.dcn
agg.isOESS = oldAgg.isOESS
agg.isFOAM = oldAgg.isFOAM
agg.isGRAM = oldAgg.isGRAM
agg.isPG = oldAgg.isPG
agg.isEG = oldAgg.isEG
agg.isExoSM = oldAgg.isExoSM
agg.userRequested = oldAgg.userRequested
agg.alt_url = oldAgg.alt_url
agg.api_version = oldAgg.api_version
agg.nick = oldAgg.nick
agg.doesSchemaV1 = oldAgg.doesSchemaV1
agg.doesSchemaV2 = oldAgg.doesSchemaV2
agg.slicecred = oldAgg.slicecred
# Since we're restarting, clear out any old error, so don't do this copy
# agg.lastError = oldAgg.lastError
# FIXME: correct?
agg.url = oldAgg.url
agg.urn_syns = copy.deepcopy(oldAgg.urn_syns)
break # out of loop over oldAggs, cause we found the new 'agg'
# Loop over oldAggs
# Loop over newAggs
# End of saveAggregateState
def ensureSliverType(self):
# DCN AMs seem to insist that there is at least one sliver_type specified one one node
# So if we have a DCN AM, add one if needed
haveDCN = False
for am in self.ams_to_process:
if am.dcn:
haveDCN = True
break
if not haveDCN:
# Only have a problem if there is a DCN AM. Nothing to do.
return
# Do we have a sliver type?
slivtypes = self.parsedSCSRSpec.dom.getElementsByTagName(defs.SLIVER_TYPE_TAG)
if slivtypes and len(slivtypes) > 0:
# have at least one sliver type element. Nothing to do
return
slivTypeNode = self.parsedSCSRSpec.dom.createElement(defs.SLIVER_TYPE_TAG)
slivTypeNode.setAttribute("name", "default-vm")
# Find the rspec element from parsedSCSRSpec.dom
rspecs = self.parsedSCSRSpec.dom.getElementsByTagName(defs.RSPEC_TAG)
if rspecs and len(rspecs):
rspec = rspecs[0]
# Find a node and add a sliver type
for child in rspec.childNodes:
if child.localName == defs.NODE_TAG:
id = child.getAttribute(Node.CLIENT_ID_TAG)
child.appendChild(slivTypeNode)
self.logger.debug("To keep DCN AMs happy, adding a default-vm sliver type to node %s", id)
return
# End of ensureSliverType
# If we said this rspec needs a fake endpoint, add it here - so the SCS and other stuff
# doesn't try to do anything with it. Useful with Links from IG AMs to fixed interfaces
# on ION or AL2S.
def addFakeNode(self):
fakeNode = self.parsedSCSRSpec.dom.createElement(defs.NODE_TAG)
fakeInterface = self.parsedSCSRSpec.dom.createElement("interface")
fakeInterface.setAttribute(Node.CLIENT_ID_TAG, "fake:if0")
fakeNode.setAttribute(Node.CLIENT_ID_TAG, "fake")
fakeNode.setAttribute(Node.COMPONENT_MANAGER_ID_TAG, "urn:publicid:IDN+fake+authority+am")
fakeCM = self.parsedSCSRSpec.dom.createElement(Link.COMPONENT_MANAGER_TAG)
fakeCM.setAttribute(Link.NAME_TAG, "urn:publicid:IDN+fake+authority+am")
fakeNode.appendChild(fakeInterface)
fakeiRef = self.parsedSCSRSpec.dom.createElement(Link.INTERFACE_REF_TAG)
fakeiRef.setAttribute(Node.CLIENT_ID_TAG, "fake:if0")
# Find the rspec element from parsedSCSRSpec.dom
rspecs = self.parsedSCSRSpec.dom.getElementsByTagName(defs.RSPEC_TAG)
if not rspecs or len(rspecs) < 1:
self.logger.debug("Failed to find <rspec> element")
return
rspec = rspecs[0]
# Add a node to the dom
# FIXME: Check that there is no node with the fake component_manager_id already?
self.logger.info("Adding fake Node endpoint")
rspec.appendChild(fakeNode)
# Also find all links for which there is a stitching path and add an interface_ref to any with only 1 interface_ref
for child in rspec.childNodes:
if child.localName == defs.LINK_TAG:
linkName = child.getAttribute(Node.CLIENT_ID_TAG)
stitchPath = self.parsedSCSRSpec.find_path(linkName)
if not stitchPath:
# The link has no matching stitching path
# This could be a link all within 1 AM, or a link on a shared VLAN, or an ExoGENI stitched link
self.logger.debug("For fakeEndpoint, skipping main body link %s with no stitching path", linkName)
continue
ifcCount = 0
ifcAMCount = 0 # Num AMs the interfaces are at
propCount = 0
ifc1Name = None
ifcAuths = []
for c2 in child.childNodes:
if c2.localName == Link.INTERFACE_REF_TAG:
ifcCount += 1
ifc1Name = c2.getAttribute(Node.CLIENT_ID_TAG)
for node in self.parsedSCSRSpec.nodes:
if ifc1Name in node.interface_ids:
ifcAuth = node.amURN
if not ifcAuth in ifcAuths:
ifcAuths.append(ifcAuth)
ifcAMCount += 1
break
if c2.localName == Link.PROPERTY_TAG:
propCount += 1
# End of loop over link sub-elements counting interface_refs
if ifcAMCount == 1:
self.logger.info("Adding fake interface_ref endpoint on link %s", linkName)
child.appendChild(fakeiRef)
child.appendChild(fakeCM)
if propCount == 0:
# Add the 2 property elements
self.logger.debug("Adding property tags to link %s to fake node", linkName)
sP = self.parsedSCSRSpec.dom.createElement(Link.PROPERTY_TAG)
sP.setAttribute(LinkProperty.SOURCE_TAG, ifc1Name)
sP.setAttribute(LinkProperty.DEST_TAG, "fake:if0")
sP.setAttribute(LinkProperty.CAPACITY_TAG, str(self.opts.defaultCapacity))
dP = self.parsedSCSRSpec.dom.createElement(Link.PROPERTY_TAG)
dP.setAttribute(LinkProperty.DEST_TAG, ifc1Name)
dP.setAttribute(LinkProperty.SOURCE_TAG, "fake:if0")
dP.setAttribute(LinkProperty.CAPACITY_TAG, str(self.opts.defaultCapacity))
child.appendChild(sP)
child.appendChild(dP)
else:
self.logger.debug("Link %s had only interfaces at 1 am (%d interfaces total), so added the fake interface - but it has %d properties already?", linkName, ifcCount, propCount)
else:
self.logger.debug("Not adding fake endpoint to link %s with %d interfaces at %d AMs", linkName, ifcCount, ifcAMCount)
# Got a link
# End of loop over top level elements in the RSpec XML to find links and add the fake interface_ref
# self.logger.debug("\n" + self.parsedSCSRSpec.dom.toxml())
# End of addFakeNode
def endPartiallyReserved(self, exception=None, aggs=[], timeout=False):
# End the run with things only partially reserved
# This could be due to --noDeleteAtEnd and a fatal failure or Ctrl-C, or it could be due to --noTransitAMs and only transit AMs remain
# exception would be an indication of why we are quitting to include in xml comments
# 1) Print where you have reservations and where you do not. Also print where there were failures if possible.
# 2) Output a combined manifest for what you do have
# - ideally with comments indicating what this is a manifest for and what AMs need reservations
# - Include the VLANs unavailable for failed AMs and any other available error information
# - Ideally comments also indicate which AMs / hops depend on which others, so experimenter can manually do what stitcher does
# 3) Output a combined request for what you do not have
# - ideally with comments indicating where this must be submitted and what AMs that are part of this topology have reservations
# - Include the VLANs unavailable for failed AMs and any other available error information
# - Ideally comments also indicate which AMs / hops depend on which others, so experimenter can manually do what stitcher does
# This method does not exit. It constructs a message suitable for logging at the end and returns it
retMsg = ""
# Note that caller has already noted we are not deleting existing reservations, and caller will log the stuff in 'msg'
aggsRes = []
aggsNoRes = []
aggsFailed = []
for agg in aggs:
if agg.manifestDom:
# FIXME: If the Ctrl-C happened during allocate, then we fake set the manifestDom so it looks like we have a reservation there,
# because the AM may think we do. In fact, we may not. Perhaps detect this case and log something here? Perhaps with agg.completed?
aggsRes.append(agg)
if agg.api_version > 2:
self.logger.debug(" Have a temporary reservation here (%s)! \n*** You must manually call `omni -a %s -V3 provision %s` and then `omni -a %s -V3 poa %s geni_start`", agg.url, agg.url, self.slicename, agg.url, self.slicename)
else:
self.logger.debug(" Have a reservation here (%s)!", agg.url)
else:
aggsNoRes.append(agg)
self.logger.debug("%s has no reservation", agg)
# Can we tell where we tried & failed?
if agg.inProcess or agg.allocateTries > 0 or agg.triedRes or agg.lastError:
aggsFailed.append(agg)
self.logger.debug("%s was a failed attempt. inProcess=%s, allocateTries=%d, triedRes=%s, lastError=%s", agg, agg.inProcess, agg.allocateTries, agg.triedRes, agg.lastError)
if len(aggsRes) + len(aggsNoRes) != len(aggs):
self.logger.debug("Ack! aggsRes=%d, aggsNoRes=%d, but total aggs is %d", len(aggsRes), len(aggsNoRes), len(aggs))
retMsg = "Stitcher interrupted"
if len(aggsRes) > 0:
retMsg += " with reservations at %d aggregate(s)" % len(aggsRes)
retMsg += ". "
if len(aggsNoRes) > 0:
retMsg += "Reservation must be completed at %d aggregate(s). " % len(aggsNoRes)
if len(aggsFailed) > 0:
retMsg += "Reservation failed at: %s." % aggsFailed
retMsg += "\n"
if len(aggsRes) > 0:
lastSuccAM = aggsRes[0]
# Note this will include the AMs where we have no reservation
combinedManifest, filename, retVal = self.getAndSaveCombinedManifest(lastSuccAM)
# Print something about sliver expiration times
msg = self.getExpirationMessage()
if msg:
retMsg += msg + '\n'
if filename:
msg = "Saved combined reservation RSpec at %d AM(s) to file '%s'\n" % (len(aggsRes), os.path.abspath(filename))
retMsg += msg
if len(aggsNoRes) > 0:
# For the DOM to start from, start with one I've edited if it exists
dom = self.parsedSCSRSpec.dom
for am in aggsNoRes:
if am.requestDom:
dom = am.requestDom
break
# Generate / save the expanded request using the full list of AMs. Note this means
# we'll include things that are technically for manifests only.
# To avoid that, call with aggsNoRes instead.
msg = self.writeExpandedRequest(aggs, dom)
retMsg += msg
self.logger.debug(retMsg)
return retMsg
# End of endPartiallyReserved
|
plantigrade/geni-tools
|
src/gcf/omnilib/stitchhandler.py
|
Python
|
mit
| 207,229
|
[
"ORCA"
] |
23ff9b738196fbee58a5600607c4e26a9cf72b3b178ef9f3a06c0625afff3201
|
""" ConanFile user tools, as download, etc
"""
from __future__ import print_function
import logging
import multiprocessing
import os
import platform
import re
import subprocess
import sys
from contextlib import contextmanager
import requests
from patch import fromfile, fromstring
from conans.client.output import ConanOutput
from conans.client.rest.uploader_downloader import Downloader
from conans.client.runner import ConanRunner
from conans.errors import ConanException
from conans.model.version import Version
# noinspection PyUnresolvedReferences
from conans.util.files import _generic_algorithm_sum, load, save
from conans.util.log import logger
def unix_path(path):
""""Used to translate windows paths to MSYS unix paths like
c/users/path/to/file"""
pattern = re.compile(r'([a-z]):\\', re.IGNORECASE)
return pattern.sub('/\\1/', path).replace('\\', '/').lower()
def escape_windows_cmd(command):
""" To use in a regular windows cmd.exe
1. Adds escapes so the argument can be unpacked by CommandLineToArgvW()
2. Adds escapes for cmd.exe so the argument survives cmd.exe's substitutions.
Useful to escape commands to be executed in a windows bash (msys2, cygwin etc)
"""
quoted_arg = subprocess.list2cmdline([command])
return "".join(["^%s" % arg if arg in r'()%!^"<>&|' else arg for arg in quoted_arg])
def run_in_windows_bash(conanfile, bashcmd, cwd=None):
""" Will run a unix command inside the msys2 environment
It requires to have MSYS2 in the path and MinGW
"""
if platform.system() != "Windows":
raise ConanException("Command only for Windows operating system")
# This needs to be set so that msys2 bash profile will set up the environment correctly.
try:
arch = conanfile.settings.arch # Maybe arch doesn't exist
except:
arch = None
env_vars = {"MSYSTEM": "MINGW32" if arch == "x86" else "MINGW64",
"MSYS2_PATH_TYPE": "inherit"}
with environment_append(env_vars):
curdir = unix_path(cwd or os.path.abspath(os.path.curdir))
# Needed to change to that dir inside the bash shell
to_run = 'cd "%s" && %s ' % (curdir, bashcmd)
wincmd = 'bash --login -c %s' % escape_windows_cmd(to_run)
conanfile.output.info('run_in_windows_bash: %s' % wincmd)
conanfile.run(wincmd)
@contextmanager
def chdir(newdir):
old_path = os.getcwd()
os.chdir(newdir)
try:
yield
finally:
os.chdir(old_path)
@contextmanager
def pythonpath(conanfile):
old_path = sys.path[:]
python_path = conanfile.env.get("PYTHONPATH", None)
if python_path:
if isinstance(python_path, list):
sys.path.extend(python_path)
else:
sys.path.append(python_path)
yield
sys.path = old_path
@contextmanager
def environment_append(env_vars):
"""
:param env_vars: List of simple environment vars. {name: value, name2: value2} => e.j: MYVAR=1
The values can also be lists of appendable environment vars. {name: [value, value2]}
=> e.j. PATH=/path/1:/path/2
:return: None
"""
old_env = dict(os.environ)
for name, value in env_vars.items():
if isinstance(value, list):
env_vars[name] = os.pathsep.join(value)
if name in old_env:
env_vars[name] += os.pathsep + old_env[name]
os.environ.update(env_vars)
try:
yield
finally:
os.environ.clear()
os.environ.update(old_env)
def build_sln_command(settings, sln_path, targets=None, upgrade_project=True):
"""
Use example:
build_command = build_sln_command(self.settings, "myfile.sln", targets=["SDL2_image"])
command = "%s && %s" % (tools.vcvars_command(self.settings), build_command)
self.run(command)
"""
targets = targets or []
command = "devenv %s /upgrade && " % sln_path if upgrade_project else ""
command += "msbuild %s /p:Configuration=%s" % (sln_path, settings.build_type)
if str(settings.arch) in ["x86_64", "x86"]:
command += ' /p:Platform='
command += '"x64"' if settings.arch == "x86_64" else '"x86"'
elif "ARM" in str(settings.arch).upper():
command += ' /p:Platform="ARM"'
if targets:
command += " /target:%s" % ";".join(targets)
return command
def vcvars_command(settings):
param = "x86" if settings.arch == "x86" else "amd64"
existing_version = os.environ.get("VisualStudioVersion")
if existing_version:
command = ""
existing_version = existing_version.split(".")[0]
if existing_version != settings.compiler.version:
raise ConanException("Error, Visual environment already set to %s\n"
"Current settings visual version: %s"
% (existing_version, settings.compiler.version))
else:
env_var = "vs%s0comntools" % settings.compiler.version
try:
vs_path = os.environ[env_var]
except KeyError:
raise ConanException("VS '%s' variable not defined. Please install VS or define "
"the variable (VS2017)" % env_var)
if settings.compiler.version != "15":
command = ('call "%s../../VC/vcvarsall.bat" %s' % (vs_path, param))
else:
command = ('call "%s../../VC/Auxiliary/Build/vcvarsall.bat" %s' % (vs_path, param))
return command
def cpu_count():
try:
return multiprocessing.cpu_count()
except NotImplementedError:
print("WARN: multiprocessing.cpu_count() not implemented. Defaulting to 1 cpu")
return 1 # Safe guess
def human_size(size_bytes):
"""
format a size in bytes into a 'human' file size, e.g. bytes, KB, MB, GB, TB, PB
Note that bytes/KB will be reported in whole numbers but MB and above will have
greater precision. e.g. 1 byte, 43 bytes, 443 KB, 4.3 MB, 4.43 GB, etc
"""
if size_bytes == 1:
return "1 byte"
suffixes_table = [('bytes', 0), ('KB', 0), ('MB', 1), ('GB', 2), ('TB', 2), ('PB', 2)]
num = float(size_bytes)
for suffix, precision in suffixes_table:
if num < 1024.0:
break
num /= 1024.0
if precision == 0:
formatted_size = "%d" % num
else:
formatted_size = str(round(num, ndigits=precision))
return "%s %s" % (formatted_size, suffix)
def unzip(filename, destination=".", keep_permissions=False):
"""
Unzip a zipped file
:param filename: Path to the zip file
:param destination: Destination folder
:param keep_permissions: Keep the zip permissions. WARNING: Can be dangerous if the zip was not created in a NIX
system, the bits could produce undefined permission schema. Use only this option if you are sure that the
zip was created correctly.
:return:
"""
if (filename.endswith(".tar.gz") or filename.endswith(".tgz") or
filename.endswith(".tbz2") or filename.endswith(".tar.bz2") or
filename.endswith(".tar")):
return untargz(filename, destination)
import zipfile
full_path = os.path.normpath(os.path.join(os.getcwd(), destination))
if hasattr(sys.stdout, "isatty") and sys.stdout.isatty():
def print_progress(extracted_size, uncompress_size):
txt_msg = "Unzipping %.0f %%\r" % (extracted_size * 100.0 / uncompress_size)
print(txt_msg, end='')
else:
def print_progress(extracted_size, uncompress_size):
pass
with zipfile.ZipFile(filename, "r") as z:
uncompress_size = sum((file_.file_size for file_ in z.infolist()))
print("Unzipping %s, this can take a while" % human_size(uncompress_size))
extracted_size = 0
if platform.system() == "Windows":
for file_ in z.infolist():
extracted_size += file_.file_size
print_progress(extracted_size, uncompress_size)
try:
# Win path limit is 260 chars
if len(file_.filename) + len(full_path) >= 260:
raise ValueError("Filename too long")
z.extract(file_, full_path)
except Exception as e:
print("Error extract %s\n%s" % (file_.filename, str(e)))
else: # duplicated for, to avoid a platform check for each zipped file
for file_ in z.infolist():
extracted_size += file_.file_size
print_progress(extracted_size, uncompress_size)
try:
z.extract(file_, full_path)
if keep_permissions:
# Could be dangerous if the ZIP has been created in a non nix system
# https://bugs.python.org/issue15795
perm = file_.external_attr >> 16 & 0xFFF
os.chmod(os.path.join(full_path, file_.filename), perm)
except Exception as e:
print("Error extract %s\n%s" % (file_.filename, str(e)))
def untargz(filename, destination="."):
import tarfile
with tarfile.TarFile.open(filename, 'r:*') as tarredgzippedFile:
tarredgzippedFile.extractall(destination)
def get(url):
""" high level downloader + unziper + delete temporary zip
"""
filename = os.path.basename(url)
download(url, filename)
unzip(filename)
os.unlink(filename)
def download(url, filename, verify=True, out=None, retry=2, retry_wait=5):
out = out or ConanOutput(sys.stdout, True)
if verify:
# We check the certificate using a list of known verifiers
import conans.client.rest.cacert as cacert
verify = cacert.file_path
downloader = Downloader(requests, out, verify=verify)
downloader.download(url, filename, retry=retry, retry_wait=retry_wait)
out.writeln("")
# save(filename, content)
def replace_in_file(file_path, search, replace):
content = load(file_path)
content = content.replace(search, replace)
content = content.encode("utf-8")
with open(file_path, "wb") as handle:
handle.write(content)
def check_with_algorithm_sum(algorithm_name, file_path, signature):
real_signature = _generic_algorithm_sum(file_path, algorithm_name)
if real_signature != signature:
raise ConanException("%s signature failed for '%s' file."
" Computed signature: %s" % (algorithm_name,
os.path.basename(file_path),
real_signature))
def check_sha1(file_path, signature):
check_with_algorithm_sum("sha1", file_path, signature)
def check_md5(file_path, signature):
check_with_algorithm_sum("md5", file_path, signature)
def check_sha256(file_path, signature):
check_with_algorithm_sum("sha256", file_path, signature)
def patch(base_path=None, patch_file=None, patch_string=None, strip=0, output=None):
"""Applies a diff from file (patch_file) or string (patch_string)
in base_path directory or current dir if None"""
class PatchLogHandler(logging.Handler):
def __init__(self):
logging.Handler.__init__(self, logging.DEBUG)
self.output = output or ConanOutput(sys.stdout, True)
self.patchname = patch_file if patch_file else "patch"
def emit(self, record):
logstr = self.format(record)
if record.levelno == logging.WARN:
self.output.warn("%s: %s" % (self.patchname, logstr))
else:
self.output.info("%s: %s" % (self.patchname, logstr))
patchlog = logging.getLogger("patch")
if patchlog:
patchlog.handlers = []
patchlog.addHandler(PatchLogHandler())
if not patch_file and not patch_string:
return
if patch_file:
patchset = fromfile(patch_file)
else:
patchset = fromstring(patch_string.encode())
if not patchset:
raise ConanException("Failed to parse patch: %s" % (patch_file if patch_file else "string"))
if not patchset.apply(root=base_path, strip=strip):
raise ConanException("Failed to apply patch: %s" % patch_file)
# DETECT OS, VERSION AND DISTRIBUTIONS
class OSInfo(object):
''' Usage:
print(os_info.is_linux) # True/False
print(os_info.is_windows) # True/False
print(os_info.is_macos) # True/False
print(os_info.is_freebsd) # True/False
print(os_info.is_solaris) # True/False
print(os_info.linux_distro) # debian, ubuntu, fedora, centos...
print(os_info.os_version) # 5.1
print(os_info.os_version_name) # Windows 7, El Capitan
if os_info.os_version > "10.1":
pass
if os_info.os_version == "10.1.0":
pass
'''
def __init__(self):
self.os_version = None
self.os_version_name = None
self.is_linux = platform.system() == "Linux"
self.linux_distro = None
self.is_windows = platform.system() == "Windows"
self.is_macos = platform.system() == "Darwin"
self.is_freebsd = platform.system() == "FreeBSD"
self.is_solaris = platform.system() == "SunOS"
if self.is_linux:
import distro
self.linux_distro = distro.id()
self.os_version = Version(distro.version())
version_name = distro.codename()
self.os_version_name = version_name if version_name != "n/a" else ""
if not self.os_version_name and self.linux_distro == "debian":
self.os_version_name = self.get_debian_version_name(self.os_version)
elif self.is_windows:
self.os_version = self.get_win_os_version()
self.os_version_name = self.get_win_version_name(self.os_version)
elif self.is_macos:
self.os_version = Version(platform.mac_ver()[0])
self.os_version_name = self.get_osx_version_name(self.os_version)
elif self.is_freebsd:
self.os_version = self.get_freebsd_version()
self.os_version_name = "FreeBSD %s" % self.os_version
elif self.is_solaris:
self.os_version = Version(platform.release())
self.os_version_name = self.get_solaris_version_name(self.os_version)
@property
def with_apt(self):
return self.is_linux and self.linux_distro in \
("debian", "ubuntu", "knoppix", "linuxmint", "raspbian")
@property
def with_yum(self):
return self.is_linux and self.linux_distro in \
("centos", "redhat", "fedora", "pidora", "scientific",
"xenserver", "amazon", "oracle")
def get_win_os_version(self):
"""
Get's the OS major and minor versions. Returns a tuple of
(OS_MAJOR, OS_MINOR).
"""
import ctypes
class _OSVERSIONINFOEXW(ctypes.Structure):
_fields_ = [('dwOSVersionInfoSize', ctypes.c_ulong),
('dwMajorVersion', ctypes.c_ulong),
('dwMinorVersion', ctypes.c_ulong),
('dwBuildNumber', ctypes.c_ulong),
('dwPlatformId', ctypes.c_ulong),
('szCSDVersion', ctypes.c_wchar * 128),
('wServicePackMajor', ctypes.c_ushort),
('wServicePackMinor', ctypes.c_ushort),
('wSuiteMask', ctypes.c_ushort),
('wProductType', ctypes.c_byte),
('wReserved', ctypes.c_byte)]
os_version = _OSVERSIONINFOEXW()
os_version.dwOSVersionInfoSize = ctypes.sizeof(os_version)
retcode = ctypes.windll.Ntdll.RtlGetVersion(ctypes.byref(os_version))
if retcode != 0:
return None
return Version("%d.%d" % (os_version.dwMajorVersion, os_version.dwMinorVersion))
def get_debian_version_name(self, version):
if not version:
return None
elif version.major() == "8.Y.Z":
return "jessie"
elif version.major() == "7.Y.Z":
return "wheezy"
elif version.major() == "6.Y.Z":
return "squeeze"
elif version.major() == "5.Y.Z":
return "lenny"
elif version.major() == "4.Y.Z":
return "etch"
elif version.minor() == "3.1.Z":
return "sarge"
elif version.minor() == "3.0.Z":
return "woody"
def get_win_version_name(self, version):
if not version:
return None
elif version.major() == "5.Y.Z":
return "Windows XP"
elif version.minor() == "6.0.Z":
return "Windows Vista"
elif version.minor() == "6.1.Z":
return "Windows 7"
elif version.minor() == "6.2.Z":
return "Windows 8"
elif version.minor() == "6.3.Z":
return "Windows 8.1"
elif version.minor() == "10.0.Z":
return "Windows 10"
def get_osx_version_name(self, version):
if not version:
return None
elif version.minor() == "10.12.Z":
return "Sierra"
elif version.minor() == "10.11.Z":
return "El Capitan"
elif version.minor() == "10.10.Z":
return "Yosemite"
elif version.minor() == "10.9.Z":
return "Mavericks"
elif version.minor() == "10.8.Z":
return "Mountain Lion"
elif version.minor() == "10.7.Z":
return "Lion"
elif version.minor() == "10.6.Z":
return "Snow Leopard"
elif version.minor() == "10.5.Z":
return "Leopard"
elif version.minor() == "10.4.Z":
return "Tiger"
elif version.minor() == "10.3.Z":
return "Panther"
elif version.minor() == "10.2.Z":
return "Jaguar"
elif version.minor() == "10.1.Z":
return "Puma"
elif version.minor() == "10.0.Z":
return "Cheetha"
def get_freebsd_version(self):
return platform.release().split("-")[0]
def get_solaris_version_name(self, version):
if not version:
return None
elif version.minor() == "5.10":
return "Solaris 10"
elif version.minor() == "5.11":
return "Solaris 11"
try:
os_info = OSInfo()
except Exception as exc:
logger.error(exc)
print("Error detecting os_info")
class SystemPackageTool(object):
def __init__(self, runner=None, os_info=None, tool=None):
env_sudo = os.environ.get("CONAN_SYSREQUIRES_SUDO", None)
self._sudo = (env_sudo != "False" and env_sudo != "0")
os_info = os_info or OSInfo()
self._is_up_to_date = False
self._tool = tool or self._create_tool(os_info)
self._tool._sudo_str = "sudo " if self._sudo else ""
self._tool._runner = runner or ConanRunner()
def _create_tool(self, os_info):
if os_info.with_apt:
return AptTool()
elif os_info.with_yum:
return YumTool()
elif os_info.is_macos:
return BrewTool()
else:
return NullTool()
def update(self):
"""
Get the system package tool update command
"""
self._is_up_to_date = True
self._tool.update()
def install(self, packages, update=True, force=False):
'''
Get the system package tool install command.
'''
packages = [packages] if isinstance(packages, str) else list(packages)
if not force and self._installed(packages):
return
if update and not self._is_up_to_date:
self.update()
self._install_any(packages)
def _installed(self, packages):
for pkg in packages:
if self._tool.installed(pkg):
print("Package already installed: %s" % pkg)
return True
return False
def _install_any(self, packages):
if len(packages) == 1:
return self._tool.install(packages[0])
for pkg in packages:
try:
return self._tool.install(pkg)
except ConanException:
pass
raise ConanException("Could not install any of %s" % packages)
class NullTool(object):
def update(self):
pass
def install(self, package_name):
print("Warn: Only available for linux with apt-get or yum or OSx with brew")
def installed(self, package_name):
return False
class AptTool(object):
def update(self):
_run(self._runner, "%sapt-get update" % self._sudo_str)
def install(self, package_name):
_run(self._runner, "%sapt-get install -y %s" % (self._sudo_str, package_name))
def installed(self, package_name):
exit_code = self._runner("dpkg -s %s" % package_name, None)
return exit_code == 0
class YumTool(object):
def update(self):
_run(self._runner, "%syum check-update" % self._sudo_str)
def install(self, package_name):
_run(self._runner, "%syum install -y %s" % (self._sudo_str, package_name))
def installed(self, package_name):
exit_code = self._runner("rpm -q %s" % package_name, None)
return exit_code == 0
class BrewTool(object):
def update(self):
_run(self._runner, "brew update")
def install(self, package_name):
_run(self._runner, "brew install %s" % package_name)
def installed(self, package_name):
exit_code = self._runner('test -n "$(brew ls --versions %s)"' % package_name, None)
return exit_code == 0
def _run(runner, command):
print("Running: %s" % command)
if runner(command, True) != 0:
raise ConanException("Command '%s' failed" % command)
|
mropert/conan
|
conans/tools.py
|
Python
|
mit
| 22,036
|
[
"Jaguar"
] |
027bfb0c8a1ecfcde74f95bf7a7836b196de5cf813501ef0cf6ead81e0dcc867
|
from DIRAC import S_OK, S_ERROR, gConfig
from DIRAC.ConfigurationSystem.Client.PathFinder import getAgentSection
from DIRAC.Core.Utilities.CFG import CFG
from DIRAC.Core.Utilities import List
from DIRAC.ConfigurationSystem.Client.Helpers.Operations import Operations
from DIRAC.Core.Utilities.JDL import loadJDLAsCFG, dumpCFGAsJDL
class JobManifest( object ):
def __init__( self, manifest = "" ):
self.__manifest = CFG()
self.__dirty = False
self.__ops = False
if manifest:
result = self.load( manifest )
if not result[ 'OK' ]:
raise Exception( result[ 'Message' ] )
def clone( self ):
manifest = JobManifest()
manifest.__manifest = self.__manifest.clone()
manifest.__dirty = self.__dirty
manifest.__ops = self.__ops
return manifest
def isDirty( self ):
return self.__dirty
def setDirty( self ):
self.__dirty = True
def clearDirty( self ):
self.__dirty = False
def load( self, dataString ):
"""
Auto discover format type based on [ .. ] of JDL
"""
dataString = dataString.strip()
if dataString[0] == "[" and dataString[-1] == "]":
return self.loadJDL( dataString )
else:
return self.loadCFG( dataString )
def loadJDL( self, jdlString ):
"""
Load job manifest from JDL format
"""
result = loadJDLAsCFG( jdlString.strip() )
if not result[ 'OK' ]:
self.__manifest = CFG()
return result
self.__manifest = result[ 'Value' ][0]
return S_OK()
def loadCFG( self, cfgString ):
"""
Load job manifest from CFG format
"""
try:
self.__manifest.loadFromBuffer( cfgString )
except Exception, e:
return S_ERROR( "Can't load manifest from cfg: %s" % str( e ) )
return S_OK()
def dumpAsCFG( self ):
return str( self.__manifest )
def getAsCFG( self ):
return self.__manifest.clone()
def dumpAsJDL( self ):
return dumpCFGAsJDL( self.__manifest )
def __getCSValue( self, varName, defaultVal = None ):
if not self.__ops:
self.__ops = Operations( group = self.__manifest[ 'OwnerGroup' ], setup = self.__manifest[ 'DIRACSetup' ] )
if varName[0] != "/":
varName = "JobDescription/%s" % varName
return self.__ops.getValue( varName, defaultVal )
def __checkNumericalVar( self, varName, defaultVal, minVal, maxVal ):
"""
Check a numerical var
"""
initialVal = False
if varName not in self.__manifest:
varValue = self.__getCSValue( "Default%s" % varName , defaultVal )
else:
varValue = self.__manifest[ varName ]
initialVal = varValue
try:
varValue = long( varValue )
except:
return S_ERROR( "%s must be a number" % varName )
minVal = self.__getCSValue( "Min%s" % varName, minVal )
maxVal = self.__getCSValue( "Max%s" % varName, maxVal )
varValue = max( minVal, min( varValue, maxVal ) )
if initialVal != varValue:
self.__manifest.setOption( varName, varValue )
return S_OK( varValue )
def __checkChoiceVar( self, varName, defaultVal, choices ):
"""
Check a choice var
"""
initialVal = False
if varName not in self.__manifest:
varValue = self.__getCSValue( "Default%s" % varName , defaultVal )
else:
varValue = self.__manifest[ varName ]
initialVal = varValue
if varValue not in self.__getCSValue( "Choices%s" % varName , choices ):
return S_ERROR( "%s is not a valid value for %s" % ( varValue, varName ) )
if initialVal != varValue:
self.__manifest.setOption( varName, varValue )
return S_OK( varValue )
def __checkMultiChoice( self, varName, choices ):
"""
Check a multi choice var
"""
initialVal = False
if varName not in self.__manifest:
return S_OK()
else:
varValue = self.__manifest[ varName ]
initialVal = varValue
choices = self.__getCSValue( "Choices%s" % varName , choices )
for v in List.fromChar( varValue ):
if v not in choices:
return S_ERROR( "%s is not a valid value for %s" % ( v, varName ) )
if initialVal != varValue:
self.__manifest.setOption( varName, varValue )
return S_OK( varValue )
def __checkMaxInputData( self, maxNumber ):
"""
Check Maximum Number of Input Data files allowed
"""
initialVal = False
varName = "InputData"
if varName not in self.__manifest:
return S_OK()
varValue = self.__manifest[ varName ]
if len( List.fromChar( varValue ) ) > maxNumber:
return S_ERROR( 'Number of Input Data Files (%s) greater than current limit: %s' % ( len( List.fromChar( varValue ) ) , maxNumber ) )
return S_OK()
def __contains__( self, key ):
""" Check if the manifest has the required key
"""
return key in self.__manifest
def setOptionsFromDict( self, varDict ):
for k in sorted( varDict ):
self.setOption( k, varDict[ k ] )
def check( self ):
"""
Check that the manifest is OK
"""
for k in [ 'OwnerName', 'OwnerDN', 'OwnerGroup', 'DIRACSetup' ]:
if k not in self.__manifest:
return S_ERROR( "Missing var %s in manifest" % k )
#Check CPUTime
result = self.__checkNumericalVar( "CPUTime", 86400, 100, 500000 )
if not result[ 'OK' ]:
return result
result = self.__checkNumericalVar( "Priority", 1, 0, 10 )
if not result[ 'OK' ]:
return result
allowedSubmitPools = []
for option in [ "DefaultSubmitPools", "SubmitPools", "AllowedSubmitPools" ]:
allowedSubmitPools += gConfig.getValue( "%s/%s" % ( getAgentSection( "WorkloadManagement/TaskQueueDirector" ),
option ),
[] )
result = self.__checkMultiChoice( "SubmitPools", list( set( allowedSubmitPools ) ) )
if not result[ 'OK' ]:
return result
result = self.__checkMultiChoice( "PilotTypes", [ 'private' ] )
if not result[ 'OK' ]:
return result
result = self.__checkMaxInputData( 500 )
if not result[ 'OK' ]:
return result
transformationTypes = Operations().getValue( "Transformations/DataProcessing", [] )
result = self.__checkMultiChoice( "JobType", ['User', 'Test', 'Hospital'] + transformationTypes )
if not result[ 'OK' ]:
return result
return S_OK()
def createSection( self, secName, contents = False ):
if secName not in self.__manifest:
if contents and not isinstance( contents, CFG ):
return S_ERROR( "Contents for section %s is not a cfg object" % secName )
self.__dirty = True
return S_OK( self.__manifest.createNewSection( secName, contents = contents ) )
return S_ERROR( "Section %s already exists" % secName )
def getSection( self, secName ):
self.__dirty = True
sec = self.__manifest[ secName ]
if not sec:
return S_ERROR( "%s does not exist" )
return S_OK( sec )
def setSectionContents( self, secName, contents ):
if contents and not isinstance( contents, CFG ):
return S_ERROR( "Contents for section %s is not a cfg object" % secName )
self.__dirty = True
if secName in self.__manifest:
self.__manifest[ secName ].reset()
self.__manifest[ secName ].mergeWith( contents )
else:
self.__manifest.createNewSection( secName, contents = contents )
def setOption( self, varName, varValue ):
"""
Set a var in job manifest
"""
self.__dirty = True
levels = List.fromChar( varName, "/" )
cfg = self.__manifest
for l in levels[:-1]:
if l not in cfg:
cfg.createNewSection( l )
cfg = cfg[ l ]
cfg.setOption( levels[-1], varValue )
def remove( self, opName ):
levels = List.fromChar( opName, "/" )
cfg = self.__manifest
for l in levels[:-1]:
if l not in cfg:
return S_ERROR( "%s does not exist" % opName )
cfg = cfg[ l ]
if cfg.deleteKey( levels[ -1 ] ):
self.__dirty = True
return S_OK()
return S_ERROR( "%s does not exist" % opName )
def getOption( self, varName, defaultValue = None ):
"""
Get a variable from the job manifest
"""
cfg = self.__manifest
return cfg.getOption( varName, defaultValue )
def getOptionList( self, section = "" ):
"""
Get a list of variables in a section of the job manifest
"""
cfg = self.__manifest.getRecursive( section )
if not cfg or 'value' not in cfg:
return []
cfg = cfg[ 'value' ]
return cfg.listOptions()
def isOption( self, opName ):
"""
Check if it is a valid option
"""
return self.__manifest.isOption( opName )
def getSectionList( self, section = "" ):
"""
Get a list of sections in the job manifest
"""
cfg = self.__manifest.getRecursive( section )
if not cfg or 'value' not in cfg:
return []
cfg = cfg[ 'value' ]
return cfg.listSections()
def expand( self ):
"""
Expand all options into themselves
"""
self.__manifest.expand()
|
Sbalbp/DIRAC
|
WorkloadManagementSystem/Client/JobState/JobManifest.py
|
Python
|
gpl-3.0
| 8,989
|
[
"DIRAC"
] |
ee019c5c2e20746db7c902368f30011cc43c1bb16dbe759f76cdb985c1cb5911
|
'''
Created on 2012-09-29
A simple script that reads a WRF netcdf-4 file and displays a 2D field in a proper geographic projection;
application here is plotting precipitation in the inner WRF domain.
@author: Andre R. Erler
'''
## includes
# matplotlib config: size etc.
import numpy as np
import matplotlib.pylab as pyl
import matplotlib as mpl
mpl.rc('lines', linewidth=1.)
mpl.rc('font', size=10)
# pygeode stuff
from myDatasets.loadWRF import openWRF
from myPlots.plots import surfacePlot
from mpl_toolkits.basemap import Basemap
from mpl_toolkits.basemap import cm, maskoceans
#from pygeode.plot import plot_v1 as pl
#from pygeode.plot import basemap as bm
## settings
nax = 2 # number of panels
ndom = 2
sf = dict(dpi=150) # print properties
folder = '/home/me/Research/Dynamical Downscaling/figures/' # figure directory
if __name__ == '__main__':
## read data
data = openWRF('ctrl-1',[1982],list(range(11,12)))
print(data[ndom-1])
## compute data
precip = []; ndays = []
for n in range(ndom):
nrec = data[n].time.values[-1]+1
ndays = data[n].xtime(time=nrec-1).get() /24/60 # xtime is in minutes, need days
dailyrain = data[n].rain(time=nrec-1).get() / ndays
# ndays = ( data[n].xtime(time=nrec-1).get() - data[n].xtime(time=0).get() )/24/60 # xtime is in minutes, need days
# dailyrain = ( data[n].rain(time=nrec-1).get() - data[n].rain(time=0).get() ) / ndays
precip.append(dailyrain.squeeze())
## setup projection
f = pyl.figure(facecolor='white', figsize = (6.25,4.25))
ax = []
for n in range(nax):
ax.append(f.add_subplot(1,2,n+1))
f.subplots_adjust(bottom=0.12, left=0.06, right=.97, top=.94, hspace=0.05, wspace=0.05) # hspace, wspace
# setup lambert conformal basemap.
# lat_1 is first standard parallel.
# lat_2 is second standard parallel (defaults to lat_1).
# lon_0,lat_0 is central point.
# rsphere=(6378137.00,6356752.3142) specifies WGS4 ellipsoid
# area_thresh=1000 means don't plot coastline features less
# than 1000 km^2 in area.
lcc = dict(projection='lcc', lat_0=59, lon_0=-123, lat_1=53, rsphere=(6378137.00,6356752.3142),#
width=310*10e3, height=315*10e3, area_thresh = 1000., resolution='l')
# map projection boundaries for inner WRF domain
map = []
for n in range(nax):
map.append(Basemap(ax=ax[n],**lcc)) # one map for each panel!!
## Plot data
grid = 10; res = 'l'
clevs = np.linspace(0,25,51)
norm = mpl.colors.Normalize(vmin=min(clevs),vmax=max(clevs),clip=True)
cmap = mpl.cm.gist_ncar #s3pcpn
cmap.set_over('purple'); cmap.set_under('blue')
# coordinates
lat = []; lon = []; x = []; y = []
for n in range(ndom):
lat.append(data[n].lat.get())
lon.append(data[n].lon.get())
xx, yy = map[0](lon[n],lat[n]) # convert to map-native coordinates
x.append(xx); y.append(yy)
# draw boundaries of inner and outer domains
bdy2 = np.ones_like(lat[1]); bdy2[0,:]=0; bdy2[-1,:]=0; bdy2[:,0]=0; bdy2[:,-1]=0
for n in range(nax):
# N.B.: bdy2 depends on inner domain coordinates x[1],y[1]
map[n].contour(x[1],y[1],bdy2,[0],ax=ax[n], colors='k') # draw boundary of inner domain
# # terrain data: mask out ocean
# zs = []
# for n in xrange(ndom):
# zs.append(maskoceans(lon[n],lat[n],data[n].zs.get(),resolution=res,grid=grid))
# draw data
cd = []
for n in range(nax): # only plot first domain in first panel
for m in range(n+1): # but also plot first domain in second panel (as background)
print('panel %i / domain %i'%(n,m))
print('precip: min %f / max %f / mean %f'%(precip[m].min(),precip[m].max(),precip[m].mean()))
cd.append(map[n].contourf(x[m],y[m],precip[m],clevs,ax=ax[n],cmap=cmap, norm=norm,extend='both'))
# add colorbar
cax = f.add_axes([0.1, 0.06, 0.8, 0.03])
for cn in cd: # [c1d1, c1d2, c2d2]:
cn.set_clim(vmin=min(clevs),vmax=max(clevs))
cbar = f.colorbar(cax=cax,mappable=cd[0],orientation='h',extend='both') # ,size='3%',pad='2%'
cbl = np.linspace(min(clevs),max(clevs),6)
cbar.set_ticks(cbl); cbar.set_ticklabels(['%02.1f mm'%(lev) for lev in cbl])
## Annotation
# add labels
f.suptitle('Average Daily Precipitation',fontsize=12)
ax[0].set_title('Outer Domain (30 km)',fontsize=11)
ax[1].set_title('Inner Domain (10 km)',fontsize=11)
# ax.set_xlabel('Longitude'); ax.set_ylabel('Latitude')
map[0].drawmapscale(-135, 49, -137, 57, 800, barstyle='fancy', yoffset=0.01*(map[n].ymax-map[n].ymin))
for n in range(nax):
if n == 0 or n == 1: Bottom = True
else: Bottom = False
if n == 0: Left = True
else: Left = False
# land/sea mask
map[n].drawlsmask(ocean_color='blue', land_color='green',resolution=res,grid=grid)
# add map stuff
map[n].drawcoastlines(linewidth=0.5)
map[n].drawcountries(linewidth=0.5)
# map[n].drawrivers(linewidth=0.5)
# map[n].fillcontinents(color = 'coral')
map[n].drawmapboundary(fill_color='k',linewidth=2)
# labels = [left,right,top,bottom]
map[n].drawparallels([45,65],linewidth=1, labels=[Left,False,False,False])
map[n].drawparallels([55,75],linewidth=0.5, labels=[Left,False,False,False])
map[n].drawmeridians([-140,-120,-100],linewidth=1, labels=[False,False,False,Bottom])
map[n].drawmeridians([-150,-130,-110],linewidth=0.5, labels=[False,False,False,Bottom])
# save figure to disk
f.savefig(folder+'AnnualPrecip.pdf', **sf) # save figure to pdf
print(('\nSaved figure in '+folder+'AnnualPrecip.pdf'))
# show plots
pyl.show()
## more projections
# setup lambert azimuthal equal area basemap.
# lat_ts is latitude of true scale.
# lon_0,lat_0 is central point.
# laea = dict(projection='laea', lat_0=57, lon_0=-137, lat_ts=53, resolution='l', #
# width=259*30e3, height=179*30e3, rsphere=(6378137.00,6356752.3142), area_thresh = 1000.)
# lon_0, lat_0 are the center point of the projection.
# resolution = 'l' means use low resolution coastlines.
# ortho = dict(projection='ortho', lat_0 = 57, lon_0 = -137, resolution = 'l', area_thresh = 1000.)
# 'parallels':[30,50,70], 'meridians':[-180,-150,-120,-90], 'labels':[1,0,0,1]}
|
aerler/WRF-Projects
|
src/archive/plotInnerPrecip.py
|
Python
|
gpl-3.0
| 6,163
|
[
"NetCDF"
] |
4ff9c453eb3862d034264f14557b96d2c55d8b62be166c195d51c9ddd59475ad
|
#!/usr/bin/env python
"""
Print list of users with proxies.
Example:
$ dirac-admin-users-with-proxy
* vhamar
DN : /O=GRID-FR/C=FR/O=CNRS/OU=CPPM/CN=Vanessa Hamar
group : dirac_admin
not after : 2011-06-29 12:04:25
persistent : False
-
DN : /O=GRID-FR/C=FR/O=CNRS/OU=CPPM/CN=Vanessa Hamar
group : dirac_pilot
not after : 2011-06-29 12:04:27
persistent : False
-
DN : /O=GRID-FR/C=FR/O=CNRS/OU=CPPM/CN=Vanessa Hamar
group : dirac_user
not after : 2011-06-29 12:04:30
persistent : True
"""
from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
import DIRAC
from DIRAC.Core.Utilities import Time
from DIRAC.Core.Utilities.DIRACScript import DIRACScript as Script
from DIRAC.FrameworkSystem.Client.ProxyManagerClient import gProxyManager
__RCSID__ = "$Id$"
class Params(object):
limited = False
proxyPath = False
proxyLifeTime = 3600
def setProxyLifeTime(self, arg):
try:
fields = [f.strip() for f in arg.split(":")]
self.proxyLifeTime = int(fields[0]) * 3600 + int(fields[1]) * 60
except Exception:
print("Can't parse %s time! Is it a HH:MM?" % arg)
return DIRAC.S_ERROR("Can't parse time argument")
return DIRAC.S_OK()
def registerCLISwitches(self):
Script.registerSwitch("v:", "valid=", "Required HH:MM for the users", self.setProxyLifeTime)
@Script()
def main():
params = Params()
params.registerCLISwitches()
Script.parseCommandLine(ignoreErrors=True)
result = gProxyManager.getDBContents()
if not result["OK"]:
print("Can't retrieve list of users: %s" % result["Message"])
DIRAC.exit(1)
keys = result["Value"]["ParameterNames"]
records = result["Value"]["Records"]
dataDict = {}
now = Time.dateTime()
for record in records:
expirationDate = record[3]
dt = expirationDate - now
secsLeft = dt.days * 86400 + dt.seconds
if secsLeft > params.proxyLifeTime:
userName, userDN, userGroup, _, persistent = record
if userName not in dataDict:
dataDict[userName] = []
dataDict[userName].append((userDN, userGroup, expirationDate, persistent))
for userName in dataDict:
print("* %s" % userName)
for iP in range(len(dataDict[userName])):
data = dataDict[userName][iP]
print(" DN : %s" % data[0])
print(" group : %s" % data[1])
print(" not after : %s" % Time.toString(data[2]))
print(" persistent : %s" % data[3])
if iP < len(dataDict[userName]) - 1:
print(" -")
DIRAC.exit(0)
if __name__ == "__main__":
main()
|
ic-hep/DIRAC
|
src/DIRAC/FrameworkSystem/scripts/dirac_admin_users_with_proxy.py
|
Python
|
gpl-3.0
| 2,813
|
[
"DIRAC"
] |
1ff71a2faf284a118c3836b703eaf348ee59907d389d01c3e481ba228f3d9a17
|
# This file is part of cclib (http://cclib.github.io), a library for parsing
# and interpreting the results of computational chemistry packages.
#
# Copyright (C) 2007,2008,2012,2014,2015, the cclib development team
#
# The library is free software, distributed under the terms of
# the GNU Lesser General Public version 2.1 or later. You should have
# received a copy of the license along with cclib. You can also access
# the full license online at http://www.gnu.org/copyleft/lgpl.html.
"""Test single point time-dependent logfiles in cclib"""
import os
import unittest
import numpy
from skip import skipForParser
__filedir__ = os.path.realpath(os.path.dirname(__file__))
class GenericTDTest(unittest.TestCase):
"""Generic time-dependent HF/DFT unittest"""
number = 5
expected_l_max = 41000
@skipForParser('DALTON', 'etoscs are not parsed')
def testenergies(self):
"""Is the l_max reasonable?"""
self.assertEqual(len(self.data.etenergies), self.number)
# Note that if all oscillator strengths are zero (like for triplets)
# then this will simply pick out the first energy.
idx_lambdamax = [i for i, x in enumerate(self.data.etoscs)
if x == max(self.data.etoscs)][0]
self.assertAlmostEqual(self.data.etenergies[idx_lambdamax], self.expected_l_max, delta=5000)
@skipForParser('DALTON', 'Oscillator strengths will have to be calculated, not just parsed.')
def testoscs(self):
"""Is the maximum of etoscs in the right range?"""
self.assertEqual(len(self.data.etoscs), self.number)
self.assertAlmostEqual(max(self.data.etoscs), 0.67, delta=0.1)
@skipForParser('DALTON', '???')
def testsecs(self):
"""Is the sum of etsecs close to 1?"""
self.assertEqual(len(self.data.etsecs), self.number)
lowestEtrans = self.data.etsecs[1]
sumofsec = sum([z*z for (x, y, z) in lowestEtrans])
self.assertAlmostEqual(sumofsec, 1.0, delta=0.16)
@skipForParser('DALTON', '???')
def testsecs_transition(self):
"""Is the lowest E transition from the HOMO or to the LUMO?"""
idx_minenergy = [i for i, x in enumerate(self.data.etenergies)
if x == min(self.data.etenergies)][0]
sec = self.data.etsecs[idx_minenergy]
t = [(c*c, s, e) for (s, e, c) in sec]
t.sort()
t.reverse()
self.assert_(t[0][1][0] == self.data.homos[0] or
t[0][2][0] == self.data.homos[0]+1, t[0])
def testsymsnumber(self):
"""Is the length of etsyms correct?"""
self.assertEqual(len(self.data.etsyms), self.number)
class ADFTDDFTTest(GenericTDTest):
"""Customized time-dependent DFT unittest"""
number = 5
def testsecs(self):
"""Is the sum of etsecs close to 1?"""
self.assertEqual(len(self.data.etsecs), self.number)
lowestEtrans = self.data.etsecs[1]
#ADF squares the etsecs
sumofsec = sum([z for (x, y, z) in lowestEtrans])
self.assertAlmostEqual(sumofsec, 1.0, delta=0.16)
class DALTONTDTest(GenericTDTest):
"""Customized time-dependent HF/DFT unittest"""
number = 20
class GaussianTDDFTTest(GenericTDTest):
"""Customized time-dependent HF/DFT unittest"""
expected_l_max = 48000
def testrotatsnumber(self):
"""Is the length of etrotats correct?"""
self.assertEqual(len(self.data.etrotats), self.number)
class GAMESSUSTDDFTTest(GenericTDTest):
"""Customized time-dependent HF/DFT unittest"""
number = 10
class JaguarTDDFTTest(GenericTDTest):
"""Customized time-dependent HF/DFT unittest"""
expected_l_max = 48000
def testoscs(self):
"""Is the maximum of etoscs in the right range?"""
self.assertEqual(len(self.data.etoscs), self.number)
self.assertAlmostEqual(max(self.data.etoscs), 1.0, delta=0.2)
class OrcaTDDFTTest(GenericTDTest):
"""Customized time-dependent HF/DFT unittest"""
number = 10
expected_l_max = 48000
def testoscs(self):
"""Is the maximum of etoscs in the right range?"""
self.assertEqual(len(self.data.etoscs), self.number)
self.assertAlmostEqual(max(self.data.etoscs), 1.0, delta=0.1)
class GenericTDDFTtrpTest(GenericTDTest):
"""Generic time-dependent HF/DFT (triplet) unittest"""
number = 5
expected_l_max = 24500
def testoscs(self):
"""Triplet excitations should be disallowed."""
self.assertEqual(len(self.data.etoscs), self.number)
self.assertAlmostEqual(max(self.data.etoscs), 0.0, delta=0.01)
if __name__=="__main__":
import sys
sys.path.append(os.path.join(__filedir__, ".."))
from test_data import DataSuite
suite = DataSuite(['TD'])
suite.testall()
|
ghutchis/cclib
|
test/data/testTD.py
|
Python
|
lgpl-2.1
| 4,812
|
[
"ADF",
"Dalton",
"cclib"
] |
9d5c3ec774f0a2fddf118fcda15ee9192e65eea5db63c2dda117f073fb7ebbfc
|
# Copyright (C) 2012,2013
# Max Planck Institute for Polymer Research
# Copyright (C) 2008,2009,2010,2011
# Max-Planck-Institute for Polymer Research & Fraunhofer SCAI
#
# This file is part of ESPResSo++.
#
# ESPResSo++ is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo++ is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
*****************************************
**units** - Real_Units
*****************************************
Espresso++ returns temperature, energy, pressure, box length etc. in dimensionless
units. Usually user should take care about real length, energy, mass and charge units.
This python class is a helper in order to simplify the conversion which is based on
basic units.
However, user always should use it carefully for complicated systems.
Currently it is implemented for SI units. Make sure that you are using
length in [nm]
energy in [kJ/mol]
mass in [amu]
q in [e]
and it will return you
pressure in [bar]
temperature in [K]
time in [ps]
density in [kg/m^3]
Example:
'''
import espressopp
import math
kB = 1.3806488 * pow(10,-23) # m^2 * kg * s^-2 * K^-1
Na = 6.0221413 * pow(10, 23) # mol^-1
amu = 1.6605389 #* pow(10,-27)
class Real_Units:
def __init__(self, _length, _energy, _mass, _charge):
self.length_factor = _length
self.energy_factor = _energy
self.mass_factor = _mass
self.charge_factor = _charge
self.pressure_factor = self.energy_factor / pow(self.length_factor, 3)
self.temperature_factor = self.energy_factor / (kB * Na) * 1000
self.time_factor = self.length_factor * math.sqrt( self.mass_factor / self.energy_factor)
self.density_factor = self.mass_factor * amu / pow(self.length_factor, 3)
def length(self, dl_length):
return dl_length * self.length_factor
def energy(self, dl_energy):
return dl_energy * self.energy_factor
def mass(self, dl_mass):
return dl_mass * self.mass_factor
def charge(self, dl_charge):
return dl_charge * self.charge_factor
def pressure(self, dl_pressure):
return dl_pressure * self.pressure_factor
def temperature(self, dl_temperature):
return dl_temperature * self.temperature_factor
def time(self, dl_time):
return dl_time * self.time_factor
def density(self, dl_density):
return dl_density * self.density_factor
# the other way arround
def dl_length(self, dl_length):
return dl_length / self.length_factor
def dl_energy(self, energy):
return energy / self.energy_factor
def dl_mass(self, mass):
return mass / self.mass_factor
def dl_charge(self, charge):
return charge / self.charge_factor
def dl_pressure(self, pressure):
return pressure / self.pressure_factor
def dl_temperature(self, temperature):
return temperature / self.temperature_factor
def dl_time(self, time):
return time / self.time_factor
def dl_density(self, density):
return density / self.density_factor
|
capoe/espressopp.soap
|
src/tools/convert/units.py
|
Python
|
gpl-3.0
| 3,557
|
[
"ESPResSo"
] |
45a8d56067357b3933eb51ca2c973d29307470c7faf5adb70cdfaefc613439fa
|
# Copyright 2008-2009 by Peter Cock. All rights reserved.
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
#
# This module is for reading and writing PIR or NBRF format files as
# SeqRecord objects. The code is based on Bio.SeqIO.FastaIO
"""Bio.SeqIO support for the "pir" (aka PIR or NBRF) file format.
You are expected to use this module via the Bio.SeqIO functions, or if
the file contains a sequence alignment, optionally via Bio.AlignIO instead.
This format was introduced for the Protein Information Resource (PIR), a
project of the National Biomedical Research Foundation (NBRF). The PIR
database itself is now part of UniProt.
The file format is described online at:
http://www.ebi.ac.uk/help/pir_frame.html
http://www.cmbi.kun.nl/bioinf/tools/crab_pir.html (currently down)
An example file in this format would be::
>P1;CRAB_ANAPL
ALPHA CRYSTALLIN B CHAIN (ALPHA(B)-CRYSTALLIN).
MDITIHNPLI RRPLFSWLAP SRIFDQIFGE HLQESELLPA SPSLSPFLMR
SPIFRMPSWL ETGLSEMRLE KDKFSVNLDV KHFSPEELKV KVLGDMVEIH
GKHEERQDEH GFIAREFNRK YRIPADVDPL TITSSLSLDG VLTVSAPRKQ
SDVPERSIPI TREEKPAIAG AQRK*
>P1;CRAB_BOVIN
ALPHA CRYSTALLIN B CHAIN (ALPHA(B)-CRYSTALLIN).
MDIAIHHPWI RRPFFPFHSP SRLFDQFFGE HLLESDLFPA STSLSPFYLR
PPSFLRAPSW IDTGLSEMRL EKDRFSVNLD VKHFSPEELK VKVLGDVIEV
HGKHEERQDE HGFISREFHR KYRIPADVDP LAITSSLSSD GVLTVNGPRK
QASGPERTIP ITREEKPAVT AAPKK*
Or, an example of a multiple sequence alignment::
>P1;S27231
rhodopsin - northern leopard frog
MNGTEGPNFY IPMSNKTGVV RSPFDYPQYY LAEPWKYSVL AAYMFLLILL GLPINFMTLY
VTIQHKKLRT PLNYILLNLG VCNHFMVLCG FTITMYTSLH GYFVFGQTGC YFEGFFATLG
GEIALWSLVV LAIERYIVVC KPMSNFRFGE NHAMMGVAFT WIMALACAVP PLFGWSRYIP
EGMQCSCGVD YYTLKPEVNN ESFVIYMFVV HFLIPLIIIS FCYGRLVCTV KEAAAQQQES
ATTQKAEKEV TRMVIIMVIF FLICWVPYAY VAFYIFTHQG SEFGPIFMTV PAFFAKSSAI
YNPVIYIMLN KQFRNCMITT LCCGKNPFGD DDASSAATSK TEATSVSTSQ VSPA*
>P1;I51200
rhodopsin - African clawed frog
MNGTEGPNFY VPMSNKTGVV RSPFDYPQYY LAEPWQYSAL AAYMFLLILL GLPINFMTLF
VTIQHKKLRT PLNYILLNLV FANHFMVLCG FTVTMYTSMH GYFIFGPTGC YIEGFFATLG
GEVALWSLVV LAVERYIVVC KPMANFRFGE NHAIMGVAFT WIMALSCAAP PLFGWSRYIP
EGMQCSCGVD YYTLKPEVNN ESFVIYMFIV HFTIPLIVIF FCYGRLLCTV KEAAAQQQES
LTTQKAEKEV TRMVVIMVVF FLICWVPYAY VAFYIFTHQG SNFGPVFMTV PAFFAKSSAI
YNPVIYIVLN KQFRNCLITT LCCGKNPFGD EDGSSAATSK TEASSVSSSQ VSPA*
>P1;JN0120
rhodopsin - Japanese lamprey
MNGTEGDNFY VPFSNKTGLA RSPYEYPQYY LAEPWKYSAL AAYMFFLILV GFPVNFLTLF
VTVQHKKLRT PLNYILLNLA MANLFMVLFG FTVTMYTSMN GYFVFGPTMC SIEGFFATLG
GEVALWSLVV LAIERYIVIC KPMGNFRFGN THAIMGVAFT WIMALACAAP PLVGWSRYIP
EGMQCSCGPD YYTLNPNFNN ESYVVYMFVV HFLVPFVIIF FCYGRLLCTV KEAAAAQQES
ASTQKAEKEV TRMVVLMVIG FLVCWVPYAS VAFYIFTHQG SDFGATFMTL PAFFAKSSAL
YNPVIYILMN KQFRNCMITT LCCGKNPLGD DE-SGASTSKT EVSSVSTSPV SPA*
As with the FASTA format, each record starts with a line beginning with ">"
character. There is then a two letter sequence type (P1, F1, DL, DC, RL,
RC, or XX), a semi colon, and the identification code. The second like is
free text description. The remaining lines contain the sequence itself,
terminating in an asterisk. Space separated blocks of ten letters as shown
above are typical.
Sequence codes and their meanings:
- P1 - Protein (complete)
- F1 - Protein (fragment)
- D1 - DNA (e.g. EMBOSS seqret output)
- DL - DNA (linear)
- DC - DNA (circular)
- RL - RNA (linear)
- RC - RNA (circular)
- N3 - tRNA
- N1 - Other functional RNA
- XX - Unknown
"""
from __future__ import print_function
from Bio.Alphabet import single_letter_alphabet, generic_protein, \
generic_dna, generic_rna
from Bio.Seq import Seq
from Bio.SeqRecord import SeqRecord
__docformat__ = "restructuredtext en"
_pir_alphabets = {"P1": generic_protein,
"F1": generic_protein,
"D1": generic_dna,
"DL": generic_dna,
"DC": generic_dna,
"RL": generic_rna,
"RC": generic_rna,
"N3": generic_rna,
"XX": single_letter_alphabet,
}
def PirIterator(handle):
"""Generator function to iterate over Fasta records (as SeqRecord objects).
handle - input file
alphabet - optional alphabet
title2ids - A function that, when given the title of the FASTA
file (without the beginning >), will return the id, name and
description (in that order) for the record as a tuple of strings.
If this is not given, then the entire title line will be used
as the description, and the first word as the id and name.
Note that use of title2ids matches that of Bio.Fasta.SequenceParser
but the defaults are slightly different.
"""
# Skip any text before the first record (e.g. blank lines, comments)
while True:
line = handle.readline()
if line == "":
return # Premature end of file, or just empty?
if line[0] == ">":
break
while True:
if line[0] != ">":
raise ValueError(
"Records in PIR files should start with '>' character")
pir_type = line[1:3]
if pir_type not in _pir_alphabets or line[3] != ";":
raise ValueError(
"Records should start with '>XX;' "
"where XX is a valid sequence type")
identifier = line[4:].strip()
description = handle.readline().strip()
lines = []
line = handle.readline()
while True:
if not line:
break
if line[0] == ">":
break
# Remove trailing whitespace, and any internal spaces
lines.append(line.rstrip().replace(" ", ""))
line = handle.readline()
seq = "".join(lines)
if seq[-1] != "*":
# Note the * terminator is present on nucleotide sequences too,
# it is not a stop codon!
raise ValueError(
"Sequences in PIR files should include a * terminator!")
# Return the record and then continue...
record = SeqRecord(Seq(seq[:-1], _pir_alphabets[pir_type]),
id=identifier, name=identifier,
description=description)
record.annotations["PIR-type"] = pir_type
yield record
if not line:
return # StopIteration
assert False, "Should not reach this line"
if __name__ == "__main__":
print("Running quick self test")
import os
for name in ["clustalw", "DMA_nuc", "DMB_prot", "B_nuc", "Cw_prot"]:
print(name)
filename = "../../Tests/NBRF/%s.pir" % name
if not os.path.isfile(filename):
print("Missing %s" % filename)
continue
records = list(PirIterator(open(filename)))
count = 0
for record in records:
count += 1
parts = record.description.split()
if "bases," in parts:
assert len(record) == int(parts[parts.index("bases,") - 1])
print("Could read %s (%i records)" % (name, count))
|
updownlife/multipleK
|
dependencies/biopython-1.65/build/lib.linux-x86_64-2.7/Bio/SeqIO/PirIO.py
|
Python
|
gpl-2.0
| 7,219
|
[
"Biopython"
] |
fa18fbf915e9ae07cda21c56296ae15197e122a8e870d4555afd8d19d77cc7a1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.